• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/prctl.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <unistd.h>
38 
39 #include "linker.h"
40 #include "linker_dlwarning.h"
41 #include "linker_globals.h"
42 #include "linker_debug.h"
43 #include "linker_utils.h"
44 
45 #include "private/CFIShadow.h" // For kLibraryAlignment
46 
GetTargetElfMachine()47 static int GetTargetElfMachine() {
48 #if defined(__arm__)
49   return EM_ARM;
50 #elif defined(__aarch64__)
51   return EM_AARCH64;
52 #elif defined(__i386__)
53   return EM_386;
54 #elif defined(__mips__)
55   return EM_MIPS;
56 #elif defined(__x86_64__)
57   return EM_X86_64;
58 #endif
59 }
60 
61 /**
62   TECHNICAL NOTE ON ELF LOADING.
63 
64   An ELF file's program header table contains one or more PT_LOAD
65   segments, which corresponds to portions of the file that need to
66   be mapped into the process' address space.
67 
68   Each loadable segment has the following important properties:
69 
70     p_offset  -> segment file offset
71     p_filesz  -> segment file size
72     p_memsz   -> segment memory size (always >= p_filesz)
73     p_vaddr   -> segment's virtual address
74     p_flags   -> segment flags (e.g. readable, writable, executable)
75 
76   We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
77 
78   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79   ranges of virtual addresses. A few rules apply:
80 
81   - the virtual address ranges should not overlap.
82 
83   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84     between them should always be initialized to 0.
85 
86   - ranges do not necessarily start or end at page boundaries. Two distinct
87     segments can have their start and end on the same page. In this case, the
88     page inherits the mapping flags of the latter segment.
89 
90   Finally, the real load addrs of each segment is not p_vaddr. Instead the
91   loader decides where to load the first segment, then will load all others
92   relative to the first one to respect the initial range layout.
93 
94   For example, consider the following list:
95 
96     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98 
99   This corresponds to two segments that cover these virtual address ranges:
100 
101        0x30000...0x34000
102        0x40000...0x48000
103 
104   If the loader decides to load the first segment at address 0xa0000000
105   then the segments' load address ranges will be:
106 
107        0xa0030000...0xa0034000
108        0xa0040000...0xa0048000
109 
110   In other words, all segments must be loaded at an address that has the same
111   constant offset from their p_vaddr value. This offset is computed as the
112   difference between the first segment's load address, and its p_vaddr value.
113 
114   However, in practice, segments do _not_ start at page boundaries. Since we
115   can only memory-map at page boundaries, this means that the bias is
116   computed as:
117 
118        load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119 
120   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121           possible wrap around UINT32_MAX for possible large p_vaddr values).
122 
123   And that the phdr0_load_address must start at a page boundary, with
124   the segment's real content starting at:
125 
126        phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127 
128   Note that ELF requires the following condition to make the mmap()-ing work:
129 
130       PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131 
132   The load_bias must be added to any p_vaddr value read from the ELF file to
133   determine the corresponding memory address.
134 
135  **/
136 
137 #define MAYBE_MAP_FLAG(x, from, to)  (((x) & (from)) ? (to) : 0)
138 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141 
ElfReader()142 ElfReader::ElfReader()
143     : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144       phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
145       strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146       mapped_by_caller_(false) {
147 }
148 
Read(const char * name,int fd,off64_t file_offset,off64_t file_size)149 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
150   if (did_read_) {
151     return true;
152   }
153   name_ = name;
154   fd_ = fd;
155   file_offset_ = file_offset;
156   file_size_ = file_size;
157 
158   if (ReadElfHeader() &&
159       VerifyElfHeader() &&
160       ReadProgramHeaders() &&
161       ReadSectionHeaders() &&
162       ReadDynamicSection()) {
163     did_read_ = true;
164   }
165 
166   return did_read_;
167 }
168 
Load(address_space_params * address_space)169 bool ElfReader::Load(address_space_params* address_space) {
170   CHECK(did_read_);
171   if (did_load_) {
172     return true;
173   }
174   if (ReserveAddressSpace(address_space) && LoadSegments() && FindPhdr()) {
175     did_load_ = true;
176   }
177 
178   return did_load_;
179 }
180 
get_string(ElfW (Word)index) const181 const char* ElfReader::get_string(ElfW(Word) index) const {
182   CHECK(strtab_ != nullptr);
183   CHECK(index < strtab_size_);
184 
185   return strtab_ + index;
186 }
187 
ReadElfHeader()188 bool ElfReader::ReadElfHeader() {
189   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
190   if (rc < 0) {
191     DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
192     return false;
193   }
194 
195   if (rc != sizeof(header_)) {
196     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
197            static_cast<size_t>(rc));
198     return false;
199   }
200   return true;
201 }
202 
EM_to_string(int em)203 static const char* EM_to_string(int em) {
204   if (em == EM_386) return "EM_386";
205   if (em == EM_AARCH64) return "EM_AARCH64";
206   if (em == EM_ARM) return "EM_ARM";
207   if (em == EM_MIPS) return "EM_MIPS";
208   if (em == EM_X86_64) return "EM_X86_64";
209   return "EM_???";
210 }
211 
VerifyElfHeader()212 bool ElfReader::VerifyElfHeader() {
213   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
214     DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
215            header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
216     return false;
217   }
218 
219   // Try to give a clear diagnostic for ELF class mismatches, since they're
220   // an easy mistake to make during the 32-bit/64-bit transition period.
221   int elf_class = header_.e_ident[EI_CLASS];
222 #if defined(__LP64__)
223   if (elf_class != ELFCLASS64) {
224     if (elf_class == ELFCLASS32) {
225       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
226     } else {
227       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
228     }
229     return false;
230   }
231 #else
232   if (elf_class != ELFCLASS32) {
233     if (elf_class == ELFCLASS64) {
234       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
235     } else {
236       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
237     }
238     return false;
239   }
240 #endif
241 
242   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
243     DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
244     return false;
245   }
246 
247   if (header_.e_type != ET_DYN) {
248     DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
249     return false;
250   }
251 
252   if (header_.e_version != EV_CURRENT) {
253     DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
254     return false;
255   }
256 
257   if (header_.e_machine != GetTargetElfMachine()) {
258     DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
259            name_.c_str(),
260            EM_to_string(header_.e_machine), header_.e_machine,
261            EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
262     return false;
263   }
264 
265   if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
266     // Fail if app is targeting Android O or above
267     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
268       DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
269                      name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
270       return false;
271     }
272     DL_WARN_documented_change(__ANDROID_API_O__,
273                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
274                               "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
275                               name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
276     add_dlwarning(name_.c_str(), "has invalid ELF header");
277   }
278 
279   if (header_.e_shstrndx == 0) {
280     // Fail if app is targeting Android O or above
281     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
282       DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
283       return false;
284     }
285 
286     DL_WARN_documented_change(__ANDROID_API_O__,
287                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
288                               "\"%s\" has invalid e_shstrndx", name_.c_str());
289     add_dlwarning(name_.c_str(), "has invalid ELF header");
290   }
291 
292   return true;
293 }
294 
CheckFileRange(ElfW (Addr)offset,size_t size,size_t alignment)295 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
296   off64_t range_start;
297   off64_t range_end;
298 
299   // Only header can be located at the 0 offset... This function called to
300   // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
301   // at offset 0.
302 
303   return offset > 0 &&
304          safe_add(&range_start, file_offset_, offset) &&
305          safe_add(&range_end, range_start, size) &&
306          (range_start < file_size_) &&
307          (range_end <= file_size_) &&
308          ((offset % alignment) == 0);
309 }
310 
311 // Loads the program header table from an ELF file into a read-only private
312 // anonymous mmap-ed block.
ReadProgramHeaders()313 bool ElfReader::ReadProgramHeaders() {
314   phdr_num_ = header_.e_phnum;
315 
316   // Like the kernel, we only accept program header tables that
317   // are smaller than 64KiB.
318   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
319     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
320     return false;
321   }
322 
323   // Boundary checks
324   size_t size = phdr_num_ * sizeof(ElfW(Phdr));
325   if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
326     DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
327                    name_.c_str(),
328                    static_cast<size_t>(header_.e_phoff),
329                    size);
330     return false;
331   }
332 
333   if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
334     DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
335     return false;
336   }
337 
338   phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
339   return true;
340 }
341 
ReadSectionHeaders()342 bool ElfReader::ReadSectionHeaders() {
343   shdr_num_ = header_.e_shnum;
344 
345   if (shdr_num_ == 0) {
346     DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
347     return false;
348   }
349 
350   size_t size = shdr_num_ * sizeof(ElfW(Shdr));
351   if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
352     DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
353                    name_.c_str(),
354                    static_cast<size_t>(header_.e_shoff),
355                    size);
356     return false;
357   }
358 
359   if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
360     DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
361     return false;
362   }
363 
364   shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
365   return true;
366 }
367 
ReadDynamicSection()368 bool ElfReader::ReadDynamicSection() {
369   // 1. Find .dynamic section (in section headers)
370   const ElfW(Shdr)* dynamic_shdr = nullptr;
371   for (size_t i = 0; i < shdr_num_; ++i) {
372     if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
373       dynamic_shdr = &shdr_table_ [i];
374       break;
375     }
376   }
377 
378   if (dynamic_shdr == nullptr) {
379     DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
380     return false;
381   }
382 
383   // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
384   size_t pt_dynamic_offset = 0;
385   size_t pt_dynamic_filesz = 0;
386   for (size_t i = 0; i < phdr_num_; ++i) {
387     const ElfW(Phdr)* phdr = &phdr_table_[i];
388     if (phdr->p_type == PT_DYNAMIC) {
389       pt_dynamic_offset = phdr->p_offset;
390       pt_dynamic_filesz = phdr->p_filesz;
391     }
392   }
393 
394   if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
395     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
396       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
397                      "expected to match PT_DYNAMIC offset: 0x%zx",
398                      name_.c_str(),
399                      static_cast<size_t>(dynamic_shdr->sh_offset),
400                      pt_dynamic_offset);
401       return false;
402     }
403     DL_WARN_documented_change(__ANDROID_API_O__,
404                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
405                               "\"%s\" .dynamic section has invalid offset: 0x%zx "
406                               "(expected to match PT_DYNAMIC offset 0x%zx)",
407                               name_.c_str(),
408                               static_cast<size_t>(dynamic_shdr->sh_offset),
409                               pt_dynamic_offset);
410     add_dlwarning(name_.c_str(), "invalid .dynamic section");
411   }
412 
413   if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
414     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
415       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
416                      "expected to match PT_DYNAMIC filesz: 0x%zx",
417                      name_.c_str(),
418                      static_cast<size_t>(dynamic_shdr->sh_size),
419                      pt_dynamic_filesz);
420       return false;
421     }
422     DL_WARN_documented_change(__ANDROID_API_O__,
423                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
424                               "\"%s\" .dynamic section has invalid size: 0x%zx "
425                               "(expected to match PT_DYNAMIC filesz 0x%zx)",
426                               name_.c_str(),
427                               static_cast<size_t>(dynamic_shdr->sh_size),
428                               pt_dynamic_filesz);
429     add_dlwarning(name_.c_str(), "invalid .dynamic section");
430   }
431 
432   if (dynamic_shdr->sh_link >= shdr_num_) {
433     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
434                    name_.c_str(),
435                    dynamic_shdr->sh_link);
436     return false;
437   }
438 
439   const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
440 
441   if (strtab_shdr->sh_type != SHT_STRTAB) {
442     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
443                    name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
444     return false;
445   }
446 
447   if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
448     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
449     return false;
450   }
451 
452   if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
453     DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
454     return false;
455   }
456 
457   dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
458 
459   if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
460     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
461                    name_.c_str());
462     return false;
463   }
464 
465   if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
466     DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
467     return false;
468   }
469 
470   strtab_ = static_cast<const char*>(strtab_fragment_.data());
471   strtab_size_ = strtab_fragment_.size();
472   return true;
473 }
474 
475 /* Returns the size of the extent of all the possibly non-contiguous
476  * loadable segments in an ELF program header table. This corresponds
477  * to the page-aligned size in bytes that needs to be reserved in the
478  * process' address space. If there are no loadable segments, 0 is
479  * returned.
480  *
481  * If out_min_vaddr or out_max_vaddr are not null, they will be
482  * set to the minimum and maximum addresses of pages to be reserved,
483  * or 0 if there is nothing to load.
484  */
phdr_table_get_load_size(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)* out_min_vaddr,ElfW (Addr)* out_max_vaddr)485 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
486                                 ElfW(Addr)* out_min_vaddr,
487                                 ElfW(Addr)* out_max_vaddr) {
488   ElfW(Addr) min_vaddr = UINTPTR_MAX;
489   ElfW(Addr) max_vaddr = 0;
490 
491   bool found_pt_load = false;
492   for (size_t i = 0; i < phdr_count; ++i) {
493     const ElfW(Phdr)* phdr = &phdr_table[i];
494 
495     if (phdr->p_type != PT_LOAD) {
496       continue;
497     }
498     found_pt_load = true;
499 
500     if (phdr->p_vaddr < min_vaddr) {
501       min_vaddr = phdr->p_vaddr;
502     }
503 
504     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
505       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
506     }
507   }
508   if (!found_pt_load) {
509     min_vaddr = 0;
510   }
511 
512   min_vaddr = PAGE_START(min_vaddr);
513   max_vaddr = PAGE_END(max_vaddr);
514 
515   if (out_min_vaddr != nullptr) {
516     *out_min_vaddr = min_vaddr;
517   }
518   if (out_max_vaddr != nullptr) {
519     *out_max_vaddr = max_vaddr;
520   }
521   return max_vaddr - min_vaddr;
522 }
523 
524 // Reserve a virtual address range such that if it's limits were extended to the next 2**align
525 // boundary, it would not overlap with any existing mappings.
ReserveAligned(size_t size,size_t align)526 static void* ReserveAligned(size_t size, size_t align) {
527   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
528   if (align == PAGE_SIZE) {
529     void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
530     if (mmap_ptr == MAP_FAILED) {
531       return nullptr;
532     }
533     return mmap_ptr;
534   }
535 
536   // Allocate enough space so that the end of the desired region aligned up is still inside the
537   // mapping.
538   size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
539   uint8_t* mmap_ptr =
540       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
541   if (mmap_ptr == MAP_FAILED) {
542     return nullptr;
543   }
544 
545   uint8_t* first = align_up(mmap_ptr, align);
546   uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
547 
548   // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
549   // created. Don't randomize then.
550   size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / PAGE_SIZE + 1);
551   uint8_t* start = first + n * PAGE_SIZE;
552   munmap(mmap_ptr, start - mmap_ptr);
553   munmap(start + size, mmap_ptr + mmap_size - (start + size));
554   return start;
555 }
556 
557 // Reserve a virtual address range big enough to hold all loadable
558 // segments of a program header table. This is done by creating a
559 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace(address_space_params * address_space)560 bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
561   ElfW(Addr) min_vaddr;
562   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
563   if (load_size_ == 0) {
564     DL_ERR("\"%s\" has no loadable segments", name_.c_str());
565     return false;
566   }
567 
568   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
569   void* start;
570 
571   if (load_size_ > address_space->reserved_size) {
572     if (address_space->must_use_address) {
573       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
574              load_size_ - address_space->reserved_size, load_size_, name_.c_str());
575       return false;
576     }
577     start = ReserveAligned(load_size_, kLibraryAlignment);
578     if (start == nullptr) {
579       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
580       return false;
581     }
582   } else {
583     start = address_space->start_addr;
584     mapped_by_caller_ = true;
585 
586     // Update the reserved address space to subtract the space used by this library.
587     address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
588     address_space->reserved_size -= load_size_;
589   }
590 
591   load_start_ = start;
592   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
593   return true;
594 }
595 
LoadSegments()596 bool ElfReader::LoadSegments() {
597   for (size_t i = 0; i < phdr_num_; ++i) {
598     const ElfW(Phdr)* phdr = &phdr_table_[i];
599 
600     if (phdr->p_type != PT_LOAD) {
601       continue;
602     }
603 
604     // Segment addresses in memory.
605     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
606     ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
607 
608     ElfW(Addr) seg_page_start = PAGE_START(seg_start);
609     ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
610 
611     ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
612 
613     // File offsets.
614     ElfW(Addr) file_start = phdr->p_offset;
615     ElfW(Addr) file_end   = file_start + phdr->p_filesz;
616 
617     ElfW(Addr) file_page_start = PAGE_START(file_start);
618     ElfW(Addr) file_length = file_end - file_page_start;
619 
620     if (file_size_ <= 0) {
621       DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
622       return false;
623     }
624 
625     if (file_end > static_cast<size_t>(file_size_)) {
626       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
627           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
628           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
629           reinterpret_cast<void*>(phdr->p_filesz),
630           reinterpret_cast<void*>(file_end), file_size_);
631       return false;
632     }
633 
634     if (file_length != 0) {
635       int prot = PFLAGS_TO_PROT(phdr->p_flags);
636       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
637         // W + E PT_LOAD segments are not allowed in O.
638         if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
639           DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
640           return false;
641         }
642         DL_WARN_documented_change(__ANDROID_API_O__,
643                                   "writable-and-executable-segments-enforced-for-api-level-26",
644                                   "\"%s\" has load segments that are both writable and executable",
645                                   name_.c_str());
646         add_dlwarning(name_.c_str(), "W+E load segments");
647       }
648 
649       void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
650                             file_length,
651                             prot,
652                             MAP_FIXED|MAP_PRIVATE,
653                             fd_,
654                             file_offset_ + file_page_start);
655       if (seg_addr == MAP_FAILED) {
656         DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
657         return false;
658       }
659     }
660 
661     // if the segment is writable, and does not end on a page boundary,
662     // zero-fill it until the page limit.
663     if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
664       memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
665     }
666 
667     seg_file_end = PAGE_END(seg_file_end);
668 
669     // seg_file_end is now the first page address after the file
670     // content. If seg_end is larger, we need to zero anything
671     // between them. This is done by using a private anonymous
672     // map for all extra pages.
673     if (seg_page_end > seg_file_end) {
674       size_t zeromap_size = seg_page_end - seg_file_end;
675       void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
676                            zeromap_size,
677                            PFLAGS_TO_PROT(phdr->p_flags),
678                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
679                            -1,
680                            0);
681       if (zeromap == MAP_FAILED) {
682         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
683         return false;
684       }
685 
686       prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
687     }
688   }
689   return true;
690 }
691 
692 /* Used internally. Used to set the protection bits of all loaded segments
693  * with optional extra flags (i.e. really PROT_WRITE). Used by
694  * phdr_table_protect_segments and phdr_table_unprotect_segments.
695  */
_phdr_table_set_load_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int extra_prot_flags)696 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
697                                      ElfW(Addr) load_bias, int extra_prot_flags) {
698   const ElfW(Phdr)* phdr = phdr_table;
699   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
700 
701   for (; phdr < phdr_limit; phdr++) {
702     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
703       continue;
704     }
705 
706     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
707     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
708 
709     int prot = PFLAGS_TO_PROT(phdr->p_flags);
710     if ((extra_prot_flags & PROT_WRITE) != 0) {
711       // make sure we're never simultaneously writable / executable
712       prot &= ~PROT_EXEC;
713     }
714 
715     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
716                        seg_page_end - seg_page_start,
717                        prot | extra_prot_flags);
718     if (ret < 0) {
719       return -1;
720     }
721   }
722   return 0;
723 }
724 
725 /* Restore the original protection modes for all loadable segments.
726  * You should only call this after phdr_table_unprotect_segments and
727  * applying all relocations.
728  *
729  * Input:
730  *   phdr_table  -> program header table
731  *   phdr_count  -> number of entries in tables
732  *   load_bias   -> load bias
733  * Return:
734  *   0 on error, -1 on failure (error code in errno).
735  */
phdr_table_protect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)736 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
737                                 size_t phdr_count, ElfW(Addr) load_bias) {
738   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
739 }
740 
741 /* Change the protection of all loaded segments in memory to writable.
742  * This is useful before performing relocations. Once completed, you
743  * will have to call phdr_table_protect_segments to restore the original
744  * protection flags on all segments.
745  *
746  * Note that some writable segments can also have their content turned
747  * to read-only by calling phdr_table_protect_gnu_relro. This is no
748  * performed here.
749  *
750  * Input:
751  *   phdr_table  -> program header table
752  *   phdr_count  -> number of entries in tables
753  *   load_bias   -> load bias
754  * Return:
755  *   0 on error, -1 on failure (error code in errno).
756  */
phdr_table_unprotect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)757 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
758                                   size_t phdr_count, ElfW(Addr) load_bias) {
759   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
760 }
761 
762 /* Used internally by phdr_table_protect_gnu_relro and
763  * phdr_table_unprotect_gnu_relro.
764  */
_phdr_table_set_gnu_relro_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int prot_flags)765 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
766                                           ElfW(Addr) load_bias, int prot_flags) {
767   const ElfW(Phdr)* phdr = phdr_table;
768   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
769 
770   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
771     if (phdr->p_type != PT_GNU_RELRO) {
772       continue;
773     }
774 
775     // Tricky: what happens when the relro segment does not start
776     // or end at page boundaries? We're going to be over-protective
777     // here and put every page touched by the segment as read-only.
778 
779     // This seems to match Ian Lance Taylor's description of the
780     // feature at http://www.airs.com/blog/archives/189.
781 
782     //    Extract:
783     //       Note that the current dynamic linker code will only work
784     //       correctly if the PT_GNU_RELRO segment starts on a page
785     //       boundary. This is because the dynamic linker rounds the
786     //       p_vaddr field down to the previous page boundary. If
787     //       there is anything on the page which should not be read-only,
788     //       the program is likely to fail at runtime. So in effect the
789     //       linker must only emit a PT_GNU_RELRO segment if it ensures
790     //       that it starts on a page boundary.
791     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
792     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
793 
794     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
795                        seg_page_end - seg_page_start,
796                        prot_flags);
797     if (ret < 0) {
798       return -1;
799     }
800   }
801   return 0;
802 }
803 
804 /* Apply GNU relro protection if specified by the program header. This will
805  * turn some of the pages of a writable PT_LOAD segment to read-only, as
806  * specified by one or more PT_GNU_RELRO segments. This must be always
807  * performed after relocations.
808  *
809  * The areas typically covered are .got and .data.rel.ro, these are
810  * read-only from the program's POV, but contain absolute addresses
811  * that need to be relocated before use.
812  *
813  * Input:
814  *   phdr_table  -> program header table
815  *   phdr_count  -> number of entries in tables
816  *   load_bias   -> load bias
817  * Return:
818  *   0 on error, -1 on failure (error code in errno).
819  */
phdr_table_protect_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)820 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
821                                  size_t phdr_count, ElfW(Addr) load_bias) {
822   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
823 }
824 
825 /* Serialize the GNU relro segments to the given file descriptor. This can be
826  * performed after relocations to allow another process to later share the
827  * relocated segment, if it was loaded at the same address.
828  *
829  * Input:
830  *   phdr_table  -> program header table
831  *   phdr_count  -> number of entries in tables
832  *   load_bias   -> load bias
833  *   fd          -> writable file descriptor to use
834  *   file_offset -> pointer to offset into file descriptor to use/update
835  * Return:
836  *   0 on error, -1 on failure (error code in errno).
837  */
phdr_table_serialize_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)838 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
839                                    size_t phdr_count,
840                                    ElfW(Addr) load_bias,
841                                    int fd,
842                                    size_t* file_offset) {
843   const ElfW(Phdr)* phdr = phdr_table;
844   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
845 
846   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
847     if (phdr->p_type != PT_GNU_RELRO) {
848       continue;
849     }
850 
851     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
852     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
853     ssize_t size = seg_page_end - seg_page_start;
854 
855     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
856     if (written != size) {
857       return -1;
858     }
859     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
860                      MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
861     if (map == MAP_FAILED) {
862       return -1;
863     }
864     *file_offset += size;
865   }
866   return 0;
867 }
868 
869 /* Where possible, replace the GNU relro segments with mappings of the given
870  * file descriptor. This can be performed after relocations to allow a file
871  * previously created by phdr_table_serialize_gnu_relro in another process to
872  * replace the dirty relocated pages, saving memory, if it was loaded at the
873  * same address. We have to compare the data before we map over it, since some
874  * parts of the relro segment may not be identical due to other libraries in
875  * the process being loaded at different addresses.
876  *
877  * Input:
878  *   phdr_table  -> program header table
879  *   phdr_count  -> number of entries in tables
880  *   load_bias   -> load bias
881  *   fd          -> readable file descriptor to use
882  *   file_offset -> pointer to offset into file descriptor to use/update
883  * Return:
884  *   0 on error, -1 on failure (error code in errno).
885  */
phdr_table_map_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)886 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
887                              size_t phdr_count,
888                              ElfW(Addr) load_bias,
889                              int fd,
890                              size_t* file_offset) {
891   // Map the file at a temporary location so we can compare its contents.
892   struct stat file_stat;
893   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
894     return -1;
895   }
896   off_t file_size = file_stat.st_size;
897   void* temp_mapping = nullptr;
898   if (file_size > 0) {
899     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
900     if (temp_mapping == MAP_FAILED) {
901       return -1;
902     }
903   }
904 
905   // Iterate over the relro segments and compare/remap the pages.
906   const ElfW(Phdr)* phdr = phdr_table;
907   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
908 
909   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
910     if (phdr->p_type != PT_GNU_RELRO) {
911       continue;
912     }
913 
914     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
915     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
916 
917     char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
918     char* mem_base = reinterpret_cast<char*>(seg_page_start);
919     size_t match_offset = 0;
920     size_t size = seg_page_end - seg_page_start;
921 
922     if (file_size - *file_offset < size) {
923       // File is too short to compare to this segment. The contents are likely
924       // different as well (it's probably for a different library version) so
925       // just don't bother checking.
926       break;
927     }
928 
929     while (match_offset < size) {
930       // Skip over dissimilar pages.
931       while (match_offset < size &&
932              memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
933         match_offset += PAGE_SIZE;
934       }
935 
936       // Count similar pages.
937       size_t mismatch_offset = match_offset;
938       while (mismatch_offset < size &&
939              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
940         mismatch_offset += PAGE_SIZE;
941       }
942 
943       // Map over similar pages.
944       if (mismatch_offset > match_offset) {
945         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
946                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
947         if (map == MAP_FAILED) {
948           munmap(temp_mapping, file_size);
949           return -1;
950         }
951       }
952 
953       match_offset = mismatch_offset;
954     }
955 
956     // Add to the base file offset in case there are multiple relro segments.
957     *file_offset += size;
958   }
959   munmap(temp_mapping, file_size);
960   return 0;
961 }
962 
963 
964 #if defined(__arm__)
965 
966 #  ifndef PT_ARM_EXIDX
967 #    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
968 #  endif
969 
970 /* Return the address and size of the .ARM.exidx section in memory,
971  * if present.
972  *
973  * Input:
974  *   phdr_table  -> program header table
975  *   phdr_count  -> number of entries in tables
976  *   load_bias   -> load bias
977  * Output:
978  *   arm_exidx       -> address of table in memory (null on failure).
979  *   arm_exidx_count -> number of items in table (0 on failure).
980  * Return:
981  *   0 on error, -1 on failure (_no_ error code in errno)
982  */
phdr_table_get_arm_exidx(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)** arm_exidx,size_t * arm_exidx_count)983 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
984                              ElfW(Addr) load_bias,
985                              ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
986   const ElfW(Phdr)* phdr = phdr_table;
987   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
988 
989   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
990     if (phdr->p_type != PT_ARM_EXIDX) {
991       continue;
992     }
993 
994     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
995     *arm_exidx_count = phdr->p_memsz / 8;
996     return 0;
997   }
998   *arm_exidx = nullptr;
999   *arm_exidx_count = 0;
1000   return -1;
1001 }
1002 #endif
1003 
1004 /* Return the address and size of the ELF file's .dynamic section in memory,
1005  * or null if missing.
1006  *
1007  * Input:
1008  *   phdr_table  -> program header table
1009  *   phdr_count  -> number of entries in tables
1010  *   load_bias   -> load bias
1011  * Output:
1012  *   dynamic       -> address of table in memory (null on failure).
1013  *   dynamic_flags -> protection flags for section (unset on failure)
1014  * Return:
1015  *   void
1016  */
phdr_table_get_dynamic_section(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Dyn)** dynamic,ElfW (Word)* dynamic_flags)1017 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1018                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1019                                     ElfW(Word)* dynamic_flags) {
1020   *dynamic = nullptr;
1021   for (size_t i = 0; i<phdr_count; ++i) {
1022     const ElfW(Phdr)& phdr = phdr_table[i];
1023     if (phdr.p_type == PT_DYNAMIC) {
1024       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
1025       if (dynamic_flags) {
1026         *dynamic_flags = phdr.p_flags;
1027       }
1028       return;
1029     }
1030   }
1031 }
1032 
1033 /* Return the program interpreter string, or nullptr if missing.
1034  *
1035  * Input:
1036  *   phdr_table  -> program header table
1037  *   phdr_count  -> number of entries in tables
1038  *   load_bias   -> load bias
1039  * Return:
1040  *   pointer to the program interpreter string.
1041  */
phdr_table_get_interpreter_name(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)1042 const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1043                                             ElfW(Addr) load_bias) {
1044   for (size_t i = 0; i<phdr_count; ++i) {
1045     const ElfW(Phdr)& phdr = phdr_table[i];
1046     if (phdr.p_type == PT_INTERP) {
1047       return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1048     }
1049   }
1050   return nullptr;
1051 }
1052 
1053 // Sets loaded_phdr_ to the address of the program header table as it appears
1054 // in the loaded segments in memory. This is in contrast with phdr_table_,
1055 // which is temporary and will be released before the library is relocated.
FindPhdr()1056 bool ElfReader::FindPhdr() {
1057   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1058 
1059   // If there is a PT_PHDR, use it directly.
1060   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1061     if (phdr->p_type == PT_PHDR) {
1062       return CheckPhdr(load_bias_ + phdr->p_vaddr);
1063     }
1064   }
1065 
1066   // Otherwise, check the first loadable segment. If its file offset
1067   // is 0, it starts with the ELF header, and we can trivially find the
1068   // loaded program header from it.
1069   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1070     if (phdr->p_type == PT_LOAD) {
1071       if (phdr->p_offset == 0) {
1072         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
1073         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
1074         ElfW(Addr)  offset = ehdr->e_phoff;
1075         return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
1076       }
1077       break;
1078     }
1079   }
1080 
1081   DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
1082   return false;
1083 }
1084 
1085 // Ensures that our program header is actually within a loadable
1086 // segment. This should help catch badly-formed ELF files that
1087 // would cause the linker to crash later when trying to access it.
CheckPhdr(ElfW (Addr)loaded)1088 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1089   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1090   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
1091   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1092     if (phdr->p_type != PT_LOAD) {
1093       continue;
1094     }
1095     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1096     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
1097     if (seg_start <= loaded && loaded_end <= seg_end) {
1098       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
1099       return true;
1100     }
1101   }
1102   DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1103          name_.c_str(), reinterpret_cast<void*>(loaded));
1104   return false;
1105 }
1106