• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "berberis/tiny_loader/tiny_loader.h"
18 
19 #include <elf.h>
20 #include <fcntl.h>
21 #include <inttypes.h>
22 #include <sys/param.h>
23 #include <sys/stat.h>
24 #include <sys/user.h>
25 #include <unistd.h>
26 
27 #include "berberis/base/bit_util.h"
28 #include "berberis/base/checks.h"
29 #include "berberis/base/mapped_file_fragment.h"
30 #include "berberis/base/prctl_helpers.h"
31 #include "berberis/base/stringprintf.h"
32 
33 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
34 #define PFLAGS_TO_PROT(x)                                                        \
35   (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
36    MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
37 
38 namespace {
39 
set_error_msg(std::string * error_msg,const char * format,...)40 void set_error_msg(std::string* error_msg, const char* format, ...) {
41   if (error_msg == nullptr) {
42     return;
43   }
44 
45   va_list ap;
46   va_start(ap, format);
47   berberis::StringAppendV(error_msg, format, ap);
48   va_end(ap);
49 }
50 
51 template <typename T>
page_align_down(T addr)52 constexpr T page_align_down(T addr) {
53   return berberis::AlignDown(addr, PAGE_SIZE);
54 }
55 
56 template <typename T>
page_align_up(T addr)57 constexpr T page_align_up(T addr) {
58   return berberis::AlignUp(addr, PAGE_SIZE);
59 }
60 
61 template <typename T>
page_offset(T addr)62 constexpr T page_offset(T addr) {
63   return addr - page_align_down(addr);
64 }
65 
EiClassString(int elf_class)66 const char* EiClassString(int elf_class) {
67   switch (elf_class) {
68     case ELFCLASSNONE:
69       return "ELFCLASSNONE";
70     case ELFCLASS32:
71       return "ELFCLASS32";
72     case ELFCLASS64:
73       return "ELFCLASS64";
74     default:
75       return "(unknown)";
76   }
77 }
78 
79 // Returns the size of the extent of all the possibly non-contiguous
80 // loadable segments in an ELF program header table. This corresponds
81 // to the page-aligned size in bytes that needs to be reserved in the
82 // process' address space. If there are no loadable segments, 0 is
83 // returned.
84 //
85 // If out_min_vaddr or out_max_vaddr are not null, they will be
86 // set to the minimum and maximum addresses of pages to be reserved,
87 // or 0 if there is nothing to load.
phdr_table_get_load_size(const ElfPhdr * phdr_table,size_t phdr_count,ElfAddr * out_min_vaddr)88 size_t phdr_table_get_load_size(const ElfPhdr* phdr_table, size_t phdr_count,
89                                 ElfAddr* out_min_vaddr) {
90   ElfAddr min_vaddr = UINTPTR_MAX;
91   ElfAddr max_vaddr = 0;
92 
93   bool found_pt_load = false;
94   for (size_t i = 0; i < phdr_count; ++i) {
95     const ElfPhdr* phdr = &phdr_table[i];
96 
97     if (phdr->p_type != PT_LOAD) {
98       continue;
99     }
100     found_pt_load = true;
101 
102     if (phdr->p_vaddr < min_vaddr) {
103       min_vaddr = phdr->p_vaddr;
104     }
105 
106     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
107       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
108     }
109   }
110   if (!found_pt_load) {
111     min_vaddr = 0;
112   }
113 
114   min_vaddr = page_align_down(min_vaddr);
115   max_vaddr = page_align_up(max_vaddr);
116 
117   if (out_min_vaddr != nullptr) {
118     *out_min_vaddr = min_vaddr;
119   }
120   return max_vaddr - min_vaddr;
121 }
122 
123 class TinyElfLoader {
124  public:
125   explicit TinyElfLoader(const char* name);
126 
127   bool LoadFromFile(int fd, off64_t file_size, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
128                     TinyLoader::munmap_fn_t munmap_fn, LoadedElfFile* loaded_elf_file);
129 
130   bool LoadFromMemory(void* load_addr, size_t load_size, LoadedElfFile* loaded_elf_file);
131 
error_msg() const132   const std::string& error_msg() const { return error_msg_; }
133 
134  private:
135   bool CheckElfHeader(const ElfEhdr* header);
136   bool ReadElfHeader(int fd, ElfEhdr* header);
137   bool ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
138                                   const ElfPhdr** phdr_table, size_t* phdr_num);
139 
140   bool ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr, size_t load_size,
141                                     const ElfPhdr** phdr_table, size_t* phdr_num);
142 
143   bool ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
144                            TinyLoader::mmap64_fn_t mmap64_fn, TinyLoader::munmap_fn_t munmap_fn,
145                            void** load_start, size_t* load_size, uintptr_t* load_bias);
146 
147   bool LoadSegments(int fd, size_t file_size, ElfHalf e_type, const ElfPhdr* phdr_table,
148                     size_t phdr_num, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
149                     TinyLoader::munmap_fn_t munmap_fn, void** load_start, size_t* load_size);
150 
151   bool FindDynamicSegment(const ElfEhdr* header);
152   bool InitializeFields(const ElfEhdr* header);
153 
154   bool Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file);
155 
156   static bool CheckFileRange(off64_t file_size, ElfAddr offset, size_t size, size_t alignment);
157   static bool CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset, size_t size,
158                                size_t alignment);
159   uint8_t* Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn);
160 
161   bool did_load_;
162 
163   const char* name_;
164 
165   MappedFileFragment phdr_fragment_;
166 
167   // Loaded phdr
168   const ElfPhdr* loaded_phdr_;
169   size_t loaded_phdr_num_;
170 
171   ElfAddr load_bias_;
172 
173   void* entry_point_;
174 
175   // Loaded dynamic section
176   const ElfDyn* dynamic_;
177 
178   // Fields needed for symbol lookup
179   bool has_gnu_hash_;
180   size_t gnu_nbucket_;
181   uint32_t* gnu_bucket_;
182   uint32_t* gnu_chain_;
183   uint32_t gnu_maskwords_;
184   uint32_t gnu_shift2_;
185   ElfAddr* gnu_bloom_filter_;
186 
187   uint32_t sysv_nbucket_;
188   uint32_t sysv_nchain_;
189   uint32_t* sysv_bucket_;
190   uint32_t* sysv_chain_;
191 
192   ElfSym* symtab_;
193 
194   const char* strtab_;
195   size_t strtab_size_;
196 
197   std::string error_msg_;
198 };
199 
TinyElfLoader(const char * name)200 TinyElfLoader::TinyElfLoader(const char* name)
201     : did_load_(false),
202       name_(name),
203       loaded_phdr_(nullptr),
204       loaded_phdr_num_(0),
205       load_bias_(0),
206       entry_point_(nullptr),
207       dynamic_(nullptr),
208       has_gnu_hash_(false),
209       gnu_nbucket_(0),
210       gnu_bucket_(nullptr),
211       gnu_chain_(nullptr),
212       gnu_maskwords_(0),
213       gnu_shift2_(0),
214       gnu_bloom_filter_(nullptr),
215       sysv_nbucket_(0),
216       sysv_nchain_(0),
217       sysv_bucket_(nullptr),
218       sysv_chain_(nullptr),
219       symtab_(nullptr),
220       strtab_(nullptr),
221       strtab_size_(0) {}
222 
CheckElfHeader(const ElfEhdr * header)223 bool TinyElfLoader::CheckElfHeader(const ElfEhdr* header) {
224   if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0) {
225     set_error_msg(&error_msg_, "\"%s\" has bad ELF magic", name_);
226     return false;
227   }
228 
229   int elf_class = header->e_ident[EI_CLASS];
230   if (elf_class != kSupportedElfClass) {
231     set_error_msg(&error_msg_, "\"%s\" %s is not supported, expected %s.", name_,
232                   EiClassString(elf_class), EiClassString(kSupportedElfClass));
233     return false;
234   }
235 
236   if (header->e_ident[EI_DATA] != ELFDATA2LSB) {
237     set_error_msg(&error_msg_, "\"%s\" not little-endian: %d", name_, header->e_ident[EI_DATA]);
238     return false;
239   }
240 
241   if (header->e_version != EV_CURRENT) {
242     set_error_msg(&error_msg_, "\"%s\" has unexpected e_version: %d", name_, header->e_version);
243     return false;
244   }
245 
246   if (header->e_shentsize != sizeof(ElfShdr)) {
247     set_error_msg(&error_msg_, "\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)", name_,
248                   header->e_shentsize, sizeof(ElfShdr));
249     return false;
250   }
251 
252   if (header->e_shstrndx == 0) {
253     set_error_msg(&error_msg_, "\"%s\" has invalid e_shstrndx", name_);
254     return false;
255   }
256 
257   // Like the kernel, we only accept program header tables that
258   // are smaller than 64KiB.
259   if (header->e_phnum < 1 || header->e_phnum > 65536 / sizeof(ElfPhdr)) {
260     set_error_msg(&error_msg_, "\"%s\" has invalid e_phnum: %zd", name_, header->e_phnum);
261     return false;
262   }
263 
264   return true;
265 }
266 
ReadElfHeader(int fd,ElfEhdr * header)267 bool TinyElfLoader::ReadElfHeader(int fd, ElfEhdr* header) {
268   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd, header, sizeof(*header), 0));
269   if (rc < 0) {
270     set_error_msg(&error_msg_, "can't read file \"%s\": %s", name_, strerror(errno));
271     return false;
272   }
273 
274   if (rc != sizeof(*header)) {
275     set_error_msg(&error_msg_, "\"%s\" is too small to be an ELF executable: only found %zd bytes",
276                   name_, static_cast<size_t>(rc));
277     return false;
278   }
279 
280   return CheckElfHeader(header);
281 }
282 
CheckFileRange(off64_t file_size,ElfAddr offset,size_t size,size_t alignment)283 bool TinyElfLoader::CheckFileRange(off64_t file_size, ElfAddr offset, size_t size,
284                                    size_t alignment) {
285   off64_t range_start = offset;
286   off64_t range_end;
287 
288   return offset > 0 && !__builtin_add_overflow(range_start, size, &range_end) &&
289          (range_start < file_size) && (range_end <= file_size) && ((offset % alignment) == 0);
290 }
291 
CheckMemoryRange(uintptr_t load_addr,size_t load_size,ElfAddr offset,size_t size,size_t alignment)292 bool TinyElfLoader::CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset,
293                                      size_t size, size_t alignment) {
294   uintptr_t dummy;
295   uintptr_t offset_end;
296 
297   return offset < load_size && !__builtin_add_overflow(load_addr, load_size, &dummy) &&
298          !__builtin_add_overflow(offset, size, &offset_end) && offset_end <= load_size &&
299          ((offset % alignment) == 0);
300 }
301 
ReadProgramHeadersFromFile(const ElfEhdr * header,int fd,off64_t file_size,const ElfPhdr ** phdr_table,size_t * phdr_num)302 bool TinyElfLoader::ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
303                                                const ElfPhdr** phdr_table, size_t* phdr_num) {
304   size_t phnum = header->e_phnum;
305   size_t size = phnum * sizeof(ElfPhdr);
306 
307   if (!CheckFileRange(file_size, header->e_phoff, size, alignof(ElfPhdr))) {
308     set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
309                   static_cast<size_t>(header->e_phoff), size);
310     return false;
311   }
312 
313   if (!phdr_fragment_.Map(fd, 0, header->e_phoff, size)) {
314     set_error_msg(&error_msg_, "\"%s\" phdr mmap failed: %s", name_, strerror(errno));
315     return false;
316   }
317 
318   *phdr_table = static_cast<ElfPhdr*>(phdr_fragment_.data());
319   *phdr_num = phnum;
320   return true;
321 }
322 
ReadProgramHeadersFromMemory(const ElfEhdr * header,uintptr_t load_addr,size_t load_size,const ElfPhdr ** phdr_table,size_t * phdr_num)323 bool TinyElfLoader::ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr,
324                                                  size_t load_size, const ElfPhdr** phdr_table,
325                                                  size_t* phdr_num) {
326   size_t phnum = header->e_phnum;
327   size_t size = phnum * sizeof(ElfPhdr);
328 
329   if (!CheckMemoryRange(load_addr, load_size, header->e_phoff, size, alignof(ElfPhdr))) {
330     set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
331                   static_cast<size_t>(header->e_phoff), size);
332     return false;
333   }
334 
335   *phdr_table = reinterpret_cast<const ElfPhdr*>(load_addr + header->e_phoff);
336   *phdr_num = phnum;
337   return true;
338 }
339 
Reserve(void * hint,size_t size,TinyLoader::mmap64_fn_t mmap64_fn)340 uint8_t* TinyElfLoader::Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn) {
341   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
342 
343   void* mmap_ptr = mmap64_fn(hint, size, PROT_NONE, mmap_flags, -1, 0);
344   if (mmap_ptr == MAP_FAILED) {
345     return nullptr;
346   }
347 
348   return reinterpret_cast<uint8_t*>(mmap_ptr);
349 }
350 
ReserveAddressSpace(ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size,uintptr_t * load_bias)351 bool TinyElfLoader::ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num,
352                                         size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
353                                         TinyLoader::munmap_fn_t munmap_fn, void** load_start,
354                                         size_t* load_size, uintptr_t* load_bias) {
355   ElfAddr min_vaddr;
356   size_t size = phdr_table_get_load_size(phdr_table, phdr_num, &min_vaddr);
357   if (size == 0) {
358     set_error_msg(&error_msg_, "\"%s\" has no loadable segments", name_);
359     return false;
360   }
361 
362   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
363   uint8_t* start;
364 
365   if (e_type == ET_EXEC) {
366     // Reserve with hint.
367     start = Reserve(addr, size, mmap64_fn);
368     if (start != addr) {
369       if (start != nullptr) {
370         munmap_fn(start, size);
371       }
372       set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space at %p for \"%s\"",
373                     size, addr, name_);
374 
375       return false;
376     }
377   } else if (align <= PAGE_SIZE) {
378     // Reserve.
379     start = Reserve(nullptr, size, mmap64_fn);
380     if (start == nullptr) {
381       set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space for \"%s\"", size,
382                     name_);
383       return false;
384     }
385   } else {
386     // Reserve overaligned.
387     CHECK(berberis::IsPowerOf2(align));
388     uint8_t* unaligned_start = Reserve(nullptr, align + size, mmap64_fn);
389     if (unaligned_start == nullptr) {
390       set_error_msg(&error_msg_,
391                     "couldn't reserve %zd bytes of address space aligned on %zd for \"%s\"", size,
392                     align, name_);
393       return false;
394     }
395     start = berberis::AlignUp(unaligned_start, align);
396     munmap_fn(unaligned_start, start - unaligned_start);
397     munmap_fn(start + size, unaligned_start + align - start);
398   }
399 
400   *load_start = start;
401   *load_size = size;
402   *load_bias = start - addr;
403   return true;
404 }
405 
LoadSegments(int fd,size_t file_size,ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size)406 bool TinyElfLoader::LoadSegments(int fd, size_t file_size, ElfHalf e_type,
407                                  const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
408                                  TinyLoader::mmap64_fn_t mmap64_fn,
409                                  TinyLoader::munmap_fn_t munmap_fn, void** load_start,
410                                  size_t* load_size) {
411   uintptr_t load_bias = 0;
412   if (!ReserveAddressSpace(e_type, phdr_table, phdr_num, align, mmap64_fn, munmap_fn, load_start,
413                            load_size, &load_bias)) {
414     return false;
415   }
416 
417   for (size_t i = 0; i < phdr_num; ++i) {
418     const ElfPhdr* phdr = &phdr_table[i];
419 
420     if (phdr->p_type != PT_LOAD) {
421       continue;
422     }
423 
424     // Segment addresses in memory.
425     ElfAddr seg_start = phdr->p_vaddr + load_bias;
426     ElfAddr seg_end = seg_start + phdr->p_memsz;
427 
428     ElfAddr seg_page_start = page_align_down(seg_start);
429     ElfAddr seg_page_end = page_align_up(seg_end);
430 
431     ElfAddr seg_file_end = seg_start + phdr->p_filesz;
432 
433     // File offsets.
434     ElfAddr file_start = phdr->p_offset;
435     ElfAddr file_end = file_start + phdr->p_filesz;
436 
437     ElfAddr file_page_start = page_align_down(file_start);
438     ElfAddr file_length = file_end - file_page_start;
439 
440     if (file_size <= 0) {
441       set_error_msg(&error_msg_, "\"%s\" invalid file size: %" PRId64, name_, file_size);
442       return false;
443     }
444 
445     if (file_end > static_cast<size_t>(file_size)) {
446       set_error_msg(&error_msg_,
447                     "invalid ELF file \"%s\" load segment[%zd]:"
448                     " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
449                     name_, i, reinterpret_cast<void*>(phdr->p_offset),
450                     reinterpret_cast<void*>(phdr->p_filesz), reinterpret_cast<void*>(file_end),
451                     file_size);
452       return false;
453     }
454 
455     if (file_length != 0) {
456       int prot = PFLAGS_TO_PROT(phdr->p_flags);
457       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
458         set_error_msg(&error_msg_, "\"%s\": W + E load segments are not allowed", name_);
459         return false;
460       }
461 
462       void* seg_addr = mmap64_fn(reinterpret_cast<void*>(seg_page_start), file_length, prot,
463                                  MAP_FIXED | MAP_PRIVATE, fd, file_page_start);
464       if (seg_addr == MAP_FAILED) {
465         set_error_msg(&error_msg_, "couldn't map \"%s\" segment %zd: %s", name_, i,
466                       strerror(errno));
467         return false;
468       }
469     }
470 
471     // if the segment is writable, and does not end on a page boundary,
472     // zero-fill it until the page limit.
473     if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
474       memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - page_offset(seg_file_end));
475     }
476 
477     seg_file_end = page_align_up(seg_file_end);
478 
479     // seg_file_end is now the first page address after the file
480     // content. If seg_end is larger, we need to zero anything
481     // between them. This is done by using a private anonymous
482     // map for all extra pages.
483     if (seg_page_end > seg_file_end) {
484       size_t zeromap_size = seg_page_end - seg_file_end;
485       void* zeromap =
486           mmap64_fn(reinterpret_cast<void*>(seg_file_end), zeromap_size,
487                     PFLAGS_TO_PROT(phdr->p_flags), MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
488       if (zeromap == MAP_FAILED) {
489         set_error_msg(&error_msg_, "couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
490         return false;
491       }
492 
493       berberis::SetVmaAnonName(zeromap, zeromap_size, ".bss");
494     }
495   }
496 
497   return true;
498 }
499 
FindDynamicSegment(const ElfEhdr * header)500 bool TinyElfLoader::FindDynamicSegment(const ElfEhdr* header) {
501   // Static executables do not have PT_DYNAMIC
502   if (header->e_type == ET_EXEC) {
503     return true;
504   }
505 
506   for (size_t i = 0; i < loaded_phdr_num_; ++i) {
507     const ElfPhdr& phdr = loaded_phdr_[i];
508     if (phdr.p_type == PT_DYNAMIC) {
509       // TODO(dimitry): Check all addresses and sizes referencing loaded segments.
510       dynamic_ = reinterpret_cast<ElfDyn*>(load_bias_ + phdr.p_vaddr);
511       return true;
512     }
513   }
514 
515   set_error_msg(&error_msg_, "dynamic segment was not found in \"%s\"", name_);
516   return false;
517 }
518 
InitializeFields(const ElfEhdr * header)519 bool TinyElfLoader::InitializeFields(const ElfEhdr* header) {
520   if (header->e_entry != 0) {
521     entry_point_ = reinterpret_cast<void*>(load_bias_ + header->e_entry);
522   }
523 
524   // There is nothing else to do for a static executable.
525   if (header->e_type == ET_EXEC) {
526     return true;
527   }
528 
529   for (const ElfDyn* d = dynamic_; d->d_tag != DT_NULL; ++d) {
530     if (d->d_tag == DT_GNU_HASH) {
531       has_gnu_hash_ = true;
532       gnu_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
533       gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[2];
534       gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[3];
535       gnu_bloom_filter_ = reinterpret_cast<ElfAddr*>(load_bias_ + d->d_un.d_ptr + 16);
536       gnu_bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
537       gnu_chain_ =
538           gnu_bucket_ + gnu_nbucket_ - reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
539 
540       if (!powerof2(gnu_maskwords_)) {
541         set_error_msg(&error_msg_,
542                       "invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power of two",
543                       gnu_maskwords_, name_);
544 
545         return false;
546       }
547 
548       --gnu_maskwords_;
549     } else if (d->d_tag == DT_HASH) {
550       sysv_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
551       sysv_nchain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
552       sysv_bucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8);
553       sysv_chain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8 + sysv_nbucket_ * 4);
554     } else if (d->d_tag == DT_SYMTAB) {
555       symtab_ = reinterpret_cast<ElfSym*>(load_bias_ + d->d_un.d_ptr);
556     } else if (d->d_tag == DT_STRTAB) {
557       strtab_ = reinterpret_cast<const char*>(load_bias_ + d->d_un.d_ptr);
558     } else if (d->d_tag == DT_STRSZ) {
559       strtab_size_ = d->d_un.d_val;
560     }
561   }
562 
563   if (symtab_ == nullptr) {
564     set_error_msg(&error_msg_, "missing DT_SYMTAB in \"%s\"", name_);
565     return false;
566   }
567 
568   if (strtab_ == nullptr) {
569     set_error_msg(&error_msg_, "missing DT_STRTAB in \"%s\"", name_);
570     return false;
571   }
572 
573   if (strtab_size_ == 0) {
574     set_error_msg(&error_msg_, "missing or invalid (0) DT_STRSZ in \"%s\"", name_);
575     return false;
576   }
577 
578   return true;
579 }
580 
Parse(void * load_ptr,size_t load_size,LoadedElfFile * loaded_elf_file)581 bool TinyElfLoader::Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file) {
582   uintptr_t load_addr = reinterpret_cast<uintptr_t>(load_ptr);
583   const ElfEhdr* header = reinterpret_cast<const ElfEhdr*>(load_addr);
584   if (!CheckElfHeader(header)) {
585     return false;
586   }
587 
588   if (!ReadProgramHeadersFromMemory(header, load_addr, load_size, &loaded_phdr_,
589                                     &loaded_phdr_num_)) {
590     return false;
591   }
592 
593   ElfAddr min_vaddr;
594   phdr_table_get_load_size(loaded_phdr_, loaded_phdr_num_, &min_vaddr);
595   load_bias_ = load_addr - min_vaddr;
596 
597   if (!FindDynamicSegment(header) || !InitializeFields(header)) {
598     return false;
599   }
600 
601   if (has_gnu_hash_) {
602     *loaded_elf_file = LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_,
603                                      loaded_phdr_, loaded_phdr_num_, dynamic_, gnu_nbucket_,
604                                      gnu_bucket_, gnu_chain_, gnu_maskwords_, gnu_shift2_,
605                                      gnu_bloom_filter_, symtab_, strtab_, strtab_size_);
606   } else {
607     *loaded_elf_file =
608         LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_, loaded_phdr_,
609                       loaded_phdr_num_, dynamic_, sysv_nbucket_, sysv_nchain_, sysv_bucket_,
610                       sysv_chain_, symtab_, strtab_, strtab_size_);
611   }
612   return true;
613 }
614 
LoadFromFile(int fd,off64_t file_size,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file)615 bool TinyElfLoader::LoadFromFile(int fd, off64_t file_size, size_t align,
616                                  TinyLoader::mmap64_fn_t mmap64_fn,
617                                  TinyLoader::munmap_fn_t munmap_fn,
618                                  LoadedElfFile* loaded_elf_file) {
619   CHECK(!did_load_);
620   void* load_addr = nullptr;
621   size_t load_size = 0;
622   ElfEhdr header;
623   const ElfPhdr* phdr_table = nullptr;
624   size_t phdr_num = 0;
625 
626   did_load_ = ReadElfHeader(fd, &header) &&
627               ReadProgramHeadersFromFile(&header, fd, file_size, &phdr_table, &phdr_num) &&
628               LoadSegments(fd, file_size, header.e_type, phdr_table, phdr_num, align, mmap64_fn,
629                            munmap_fn, &load_addr, &load_size) &&
630               Parse(load_addr, load_size, loaded_elf_file);
631 
632   return did_load_;
633 }
634 
LoadFromMemory(void * load_addr,size_t load_size,LoadedElfFile * loaded_elf_file)635 bool TinyElfLoader::LoadFromMemory(void* load_addr, size_t load_size,
636                                    LoadedElfFile* loaded_elf_file) {
637   CHECK(!did_load_);
638   did_load_ = Parse(load_addr, load_size, loaded_elf_file);
639   return did_load_;
640 }
641 
642 }  // namespace
643 
LoadFromFile(const char * path,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file,std::string * error_msg)644 bool TinyLoader::LoadFromFile(const char* path, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
645                               TinyLoader::munmap_fn_t munmap_fn, LoadedElfFile* loaded_elf_file,
646                               std::string* error_msg) {
647   int fd = TEMP_FAILURE_RETRY(open(path, O_RDONLY | O_CLOEXEC));
648   if (fd == -1) {
649     set_error_msg(error_msg, "unable to open the file \"%s\": %s", path, strerror(errno));
650     return false;
651   }
652 
653   struct stat file_stat;
654   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
655     set_error_msg(error_msg, "unable to stat file for the library \"%s\": %s", path,
656                   strerror(errno));
657     close(fd);
658     return false;
659   }
660 
661   TinyElfLoader loader(path);
662 
663   if (!loader.LoadFromFile(fd, file_stat.st_size, align, mmap64_fn, munmap_fn, loaded_elf_file)) {
664     if (error_msg != nullptr) {
665       *error_msg = loader.error_msg();
666     }
667 
668     close(fd);
669     return false;
670   }
671 
672   close(fd);
673   return true;
674 }
675 
LoadFromMemory(const char * path,void * address,size_t size,LoadedElfFile * loaded_elf_file,std::string * error_msg)676 bool TinyLoader::LoadFromMemory(const char* path, void* address, size_t size,
677                                 LoadedElfFile* loaded_elf_file, std::string* error_msg) {
678   TinyElfLoader loader(path);
679   if (!loader.LoadFromMemory(address, size, loaded_elf_file)) {
680     if (error_msg != nullptr) {
681       *error_msg = loader.error_msg();
682     }
683 
684     return false;
685   }
686 
687   return true;
688 }
689