• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/prctl.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <unistd.h>
38 
39 #include "linker.h"
40 #include "linker_dlwarning.h"
41 #include "linker_globals.h"
42 #include "linker_debug.h"
43 #include "linker_utils.h"
44 
45 #include "private/CFIShadow.h" // For kLibraryAlignment
46 
GetTargetElfMachine()47 static int GetTargetElfMachine() {
48 #if defined(__arm__)
49   return EM_ARM;
50 #elif defined(__aarch64__)
51   return EM_AARCH64;
52 #elif defined(__i386__)
53   return EM_386;
54 #elif defined(__x86_64__)
55   return EM_X86_64;
56 #endif
57 }
58 
59 /**
60   TECHNICAL NOTE ON ELF LOADING.
61 
62   An ELF file's program header table contains one or more PT_LOAD
63   segments, which corresponds to portions of the file that need to
64   be mapped into the process' address space.
65 
66   Each loadable segment has the following important properties:
67 
68     p_offset  -> segment file offset
69     p_filesz  -> segment file size
70     p_memsz   -> segment memory size (always >= p_filesz)
71     p_vaddr   -> segment's virtual address
72     p_flags   -> segment flags (e.g. readable, writable, executable)
73 
74   We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
75 
76   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
77   ranges of virtual addresses. A few rules apply:
78 
79   - the virtual address ranges should not overlap.
80 
81   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
82     between them should always be initialized to 0.
83 
84   - ranges do not necessarily start or end at page boundaries. Two distinct
85     segments can have their start and end on the same page. In this case, the
86     page inherits the mapping flags of the latter segment.
87 
88   Finally, the real load addrs of each segment is not p_vaddr. Instead the
89   loader decides where to load the first segment, then will load all others
90   relative to the first one to respect the initial range layout.
91 
92   For example, consider the following list:
93 
94     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
95     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
96 
97   This corresponds to two segments that cover these virtual address ranges:
98 
99        0x30000...0x34000
100        0x40000...0x48000
101 
102   If the loader decides to load the first segment at address 0xa0000000
103   then the segments' load address ranges will be:
104 
105        0xa0030000...0xa0034000
106        0xa0040000...0xa0048000
107 
108   In other words, all segments must be loaded at an address that has the same
109   constant offset from their p_vaddr value. This offset is computed as the
110   difference between the first segment's load address, and its p_vaddr value.
111 
112   However, in practice, segments do _not_ start at page boundaries. Since we
113   can only memory-map at page boundaries, this means that the bias is
114   computed as:
115 
116        load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
117 
118   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
119           possible wrap around UINT32_MAX for possible large p_vaddr values).
120 
121   And that the phdr0_load_address must start at a page boundary, with
122   the segment's real content starting at:
123 
124        phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
125 
126   Note that ELF requires the following condition to make the mmap()-ing work:
127 
128       PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
129 
130   The load_bias must be added to any p_vaddr value read from the ELF file to
131   determine the corresponding memory address.
132 
133  **/
134 
135 #define MAYBE_MAP_FLAG(x, from, to)  (((x) & (from)) ? (to) : 0)
136 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
137                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
138                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
139 
ElfReader()140 ElfReader::ElfReader()
141     : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
142       phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
143       strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
144       mapped_by_caller_(false) {
145 }
146 
Read(const char * name,int fd,off64_t file_offset,off64_t file_size)147 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
148   if (did_read_) {
149     return true;
150   }
151   name_ = name;
152   fd_ = fd;
153   file_offset_ = file_offset;
154   file_size_ = file_size;
155 
156   if (ReadElfHeader() &&
157       VerifyElfHeader() &&
158       ReadProgramHeaders() &&
159       ReadSectionHeaders() &&
160       ReadDynamicSection()) {
161     did_read_ = true;
162   }
163 
164   return did_read_;
165 }
166 
Load(address_space_params * address_space)167 bool ElfReader::Load(address_space_params* address_space) {
168   CHECK(did_read_);
169   if (did_load_) {
170     return true;
171   }
172   if (ReserveAddressSpace(address_space) && LoadSegments() && FindPhdr()) {
173     did_load_ = true;
174   }
175 
176   return did_load_;
177 }
178 
get_string(ElfW (Word)index) const179 const char* ElfReader::get_string(ElfW(Word) index) const {
180   CHECK(strtab_ != nullptr);
181   CHECK(index < strtab_size_);
182 
183   return strtab_ + index;
184 }
185 
ReadElfHeader()186 bool ElfReader::ReadElfHeader() {
187   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
188   if (rc < 0) {
189     DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
190     return false;
191   }
192 
193   if (rc != sizeof(header_)) {
194     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
195            static_cast<size_t>(rc));
196     return false;
197   }
198   return true;
199 }
200 
EM_to_string(int em)201 static const char* EM_to_string(int em) {
202   if (em == EM_386) return "EM_386";
203   if (em == EM_AARCH64) return "EM_AARCH64";
204   if (em == EM_ARM) return "EM_ARM";
205   if (em == EM_X86_64) return "EM_X86_64";
206   return "EM_???";
207 }
208 
VerifyElfHeader()209 bool ElfReader::VerifyElfHeader() {
210   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
211     DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
212            header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
213     return false;
214   }
215 
216   // Try to give a clear diagnostic for ELF class mismatches, since they're
217   // an easy mistake to make during the 32-bit/64-bit transition period.
218   int elf_class = header_.e_ident[EI_CLASS];
219 #if defined(__LP64__)
220   if (elf_class != ELFCLASS64) {
221     if (elf_class == ELFCLASS32) {
222       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
223     } else {
224       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
225     }
226     return false;
227   }
228 #else
229   if (elf_class != ELFCLASS32) {
230     if (elf_class == ELFCLASS64) {
231       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
232     } else {
233       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
234     }
235     return false;
236   }
237 #endif
238 
239   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
240     DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
241     return false;
242   }
243 
244   if (header_.e_type != ET_DYN) {
245     DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
246     return false;
247   }
248 
249   if (header_.e_version != EV_CURRENT) {
250     DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
251     return false;
252   }
253 
254   if (header_.e_machine != GetTargetElfMachine()) {
255     DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
256            name_.c_str(),
257            EM_to_string(header_.e_machine), header_.e_machine,
258            EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
259     return false;
260   }
261 
262   if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
263     // Fail if app is targeting Android O or above
264     if (get_application_target_sdk_version() >= 26) {
265       DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
266                      name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
267       return false;
268     }
269     DL_WARN_documented_change(26,
270                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
271                               "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
272                               name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
273     add_dlwarning(name_.c_str(), "has invalid ELF header");
274   }
275 
276   if (header_.e_shstrndx == 0) {
277     // Fail if app is targeting Android O or above
278     if (get_application_target_sdk_version() >= 26) {
279       DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
280       return false;
281     }
282 
283     DL_WARN_documented_change(26,
284                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
285                               "\"%s\" has invalid e_shstrndx", name_.c_str());
286     add_dlwarning(name_.c_str(), "has invalid ELF header");
287   }
288 
289   return true;
290 }
291 
CheckFileRange(ElfW (Addr)offset,size_t size,size_t alignment)292 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
293   off64_t range_start;
294   off64_t range_end;
295 
296   // Only header can be located at the 0 offset... This function called to
297   // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
298   // at offset 0.
299 
300   return offset > 0 &&
301          safe_add(&range_start, file_offset_, offset) &&
302          safe_add(&range_end, range_start, size) &&
303          (range_start < file_size_) &&
304          (range_end <= file_size_) &&
305          ((offset % alignment) == 0);
306 }
307 
308 // Loads the program header table from an ELF file into a read-only private
309 // anonymous mmap-ed block.
ReadProgramHeaders()310 bool ElfReader::ReadProgramHeaders() {
311   phdr_num_ = header_.e_phnum;
312 
313   // Like the kernel, we only accept program header tables that
314   // are smaller than 64KiB.
315   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
316     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
317     return false;
318   }
319 
320   // Boundary checks
321   size_t size = phdr_num_ * sizeof(ElfW(Phdr));
322   if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
323     DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
324                    name_.c_str(),
325                    static_cast<size_t>(header_.e_phoff),
326                    size);
327     return false;
328   }
329 
330   if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
331     DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
332     return false;
333   }
334 
335   phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
336   return true;
337 }
338 
ReadSectionHeaders()339 bool ElfReader::ReadSectionHeaders() {
340   shdr_num_ = header_.e_shnum;
341 
342   if (shdr_num_ == 0) {
343     DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
344     return false;
345   }
346 
347   size_t size = shdr_num_ * sizeof(ElfW(Shdr));
348   if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
349     DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
350                    name_.c_str(),
351                    static_cast<size_t>(header_.e_shoff),
352                    size);
353     return false;
354   }
355 
356   if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
357     DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
358     return false;
359   }
360 
361   shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
362   return true;
363 }
364 
ReadDynamicSection()365 bool ElfReader::ReadDynamicSection() {
366   // 1. Find .dynamic section (in section headers)
367   const ElfW(Shdr)* dynamic_shdr = nullptr;
368   for (size_t i = 0; i < shdr_num_; ++i) {
369     if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
370       dynamic_shdr = &shdr_table_ [i];
371       break;
372     }
373   }
374 
375   if (dynamic_shdr == nullptr) {
376     DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
377     return false;
378   }
379 
380   // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
381   size_t pt_dynamic_offset = 0;
382   size_t pt_dynamic_filesz = 0;
383   for (size_t i = 0; i < phdr_num_; ++i) {
384     const ElfW(Phdr)* phdr = &phdr_table_[i];
385     if (phdr->p_type == PT_DYNAMIC) {
386       pt_dynamic_offset = phdr->p_offset;
387       pt_dynamic_filesz = phdr->p_filesz;
388     }
389   }
390 
391   if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
392     if (get_application_target_sdk_version() >= 26) {
393       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
394                      "expected to match PT_DYNAMIC offset: 0x%zx",
395                      name_.c_str(),
396                      static_cast<size_t>(dynamic_shdr->sh_offset),
397                      pt_dynamic_offset);
398       return false;
399     }
400     DL_WARN_documented_change(26,
401                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
402                               "\"%s\" .dynamic section has invalid offset: 0x%zx "
403                               "(expected to match PT_DYNAMIC offset 0x%zx)",
404                               name_.c_str(),
405                               static_cast<size_t>(dynamic_shdr->sh_offset),
406                               pt_dynamic_offset);
407     add_dlwarning(name_.c_str(), "invalid .dynamic section");
408   }
409 
410   if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
411     if (get_application_target_sdk_version() >= 26) {
412       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
413                      "expected to match PT_DYNAMIC filesz: 0x%zx",
414                      name_.c_str(),
415                      static_cast<size_t>(dynamic_shdr->sh_size),
416                      pt_dynamic_filesz);
417       return false;
418     }
419     DL_WARN_documented_change(26,
420                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
421                               "\"%s\" .dynamic section has invalid size: 0x%zx "
422                               "(expected to match PT_DYNAMIC filesz 0x%zx)",
423                               name_.c_str(),
424                               static_cast<size_t>(dynamic_shdr->sh_size),
425                               pt_dynamic_filesz);
426     add_dlwarning(name_.c_str(), "invalid .dynamic section");
427   }
428 
429   if (dynamic_shdr->sh_link >= shdr_num_) {
430     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
431                    name_.c_str(),
432                    dynamic_shdr->sh_link);
433     return false;
434   }
435 
436   const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
437 
438   if (strtab_shdr->sh_type != SHT_STRTAB) {
439     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
440                    name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
441     return false;
442   }
443 
444   if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
445     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
446     return false;
447   }
448 
449   if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
450     DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
451     return false;
452   }
453 
454   dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
455 
456   if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
457     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
458                    name_.c_str());
459     return false;
460   }
461 
462   if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
463     DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
464     return false;
465   }
466 
467   strtab_ = static_cast<const char*>(strtab_fragment_.data());
468   strtab_size_ = strtab_fragment_.size();
469   return true;
470 }
471 
472 /* Returns the size of the extent of all the possibly non-contiguous
473  * loadable segments in an ELF program header table. This corresponds
474  * to the page-aligned size in bytes that needs to be reserved in the
475  * process' address space. If there are no loadable segments, 0 is
476  * returned.
477  *
478  * If out_min_vaddr or out_max_vaddr are not null, they will be
479  * set to the minimum and maximum addresses of pages to be reserved,
480  * or 0 if there is nothing to load.
481  */
phdr_table_get_load_size(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)* out_min_vaddr,ElfW (Addr)* out_max_vaddr)482 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
483                                 ElfW(Addr)* out_min_vaddr,
484                                 ElfW(Addr)* out_max_vaddr) {
485   ElfW(Addr) min_vaddr = UINTPTR_MAX;
486   ElfW(Addr) max_vaddr = 0;
487 
488   bool found_pt_load = false;
489   for (size_t i = 0; i < phdr_count; ++i) {
490     const ElfW(Phdr)* phdr = &phdr_table[i];
491 
492     if (phdr->p_type != PT_LOAD) {
493       continue;
494     }
495     found_pt_load = true;
496 
497     if (phdr->p_vaddr < min_vaddr) {
498       min_vaddr = phdr->p_vaddr;
499     }
500 
501     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
502       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
503     }
504   }
505   if (!found_pt_load) {
506     min_vaddr = 0;
507   }
508 
509   min_vaddr = PAGE_START(min_vaddr);
510   max_vaddr = PAGE_END(max_vaddr);
511 
512   if (out_min_vaddr != nullptr) {
513     *out_min_vaddr = min_vaddr;
514   }
515   if (out_max_vaddr != nullptr) {
516     *out_max_vaddr = max_vaddr;
517   }
518   return max_vaddr - min_vaddr;
519 }
520 
521 // Reserve a virtual address range such that if it's limits were extended to the next 2**align
522 // boundary, it would not overlap with any existing mappings.
ReserveAligned(size_t size,size_t align)523 static void* ReserveAligned(size_t size, size_t align) {
524   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
525   if (align == PAGE_SIZE) {
526     void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
527     if (mmap_ptr == MAP_FAILED) {
528       return nullptr;
529     }
530     return mmap_ptr;
531   }
532 
533   // Allocate enough space so that the end of the desired region aligned up is still inside the
534   // mapping.
535   size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
536   uint8_t* mmap_ptr =
537       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
538   if (mmap_ptr == MAP_FAILED) {
539     return nullptr;
540   }
541 
542   uint8_t* first = align_up(mmap_ptr, align);
543   uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
544 
545   // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
546   // created. Don't randomize then.
547   size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / PAGE_SIZE + 1);
548   uint8_t* start = first + n * PAGE_SIZE;
549   munmap(mmap_ptr, start - mmap_ptr);
550   munmap(start + size, mmap_ptr + mmap_size - (start + size));
551   return start;
552 }
553 
554 // Reserve a virtual address range big enough to hold all loadable
555 // segments of a program header table. This is done by creating a
556 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace(address_space_params * address_space)557 bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
558   ElfW(Addr) min_vaddr;
559   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
560   if (load_size_ == 0) {
561     DL_ERR("\"%s\" has no loadable segments", name_.c_str());
562     return false;
563   }
564 
565   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
566   void* start;
567 
568   if (load_size_ > address_space->reserved_size) {
569     if (address_space->must_use_address) {
570       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
571              load_size_ - address_space->reserved_size, load_size_, name_.c_str());
572       return false;
573     }
574     start = ReserveAligned(load_size_, kLibraryAlignment);
575     if (start == nullptr) {
576       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
577       return false;
578     }
579   } else {
580     start = address_space->start_addr;
581     mapped_by_caller_ = true;
582 
583     // Update the reserved address space to subtract the space used by this library.
584     address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
585     address_space->reserved_size -= load_size_;
586   }
587 
588   load_start_ = start;
589   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
590   return true;
591 }
592 
LoadSegments()593 bool ElfReader::LoadSegments() {
594   for (size_t i = 0; i < phdr_num_; ++i) {
595     const ElfW(Phdr)* phdr = &phdr_table_[i];
596 
597     if (phdr->p_type != PT_LOAD) {
598       continue;
599     }
600 
601     // Segment addresses in memory.
602     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
603     ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
604 
605     ElfW(Addr) seg_page_start = PAGE_START(seg_start);
606     ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
607 
608     ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
609 
610     // File offsets.
611     ElfW(Addr) file_start = phdr->p_offset;
612     ElfW(Addr) file_end   = file_start + phdr->p_filesz;
613 
614     ElfW(Addr) file_page_start = PAGE_START(file_start);
615     ElfW(Addr) file_length = file_end - file_page_start;
616 
617     if (file_size_ <= 0) {
618       DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
619       return false;
620     }
621 
622     if (file_end > static_cast<size_t>(file_size_)) {
623       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
624           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
625           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
626           reinterpret_cast<void*>(phdr->p_filesz),
627           reinterpret_cast<void*>(file_end), file_size_);
628       return false;
629     }
630 
631     if (file_length != 0) {
632       int prot = PFLAGS_TO_PROT(phdr->p_flags);
633       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
634         // W + E PT_LOAD segments are not allowed in O.
635         if (get_application_target_sdk_version() >= 26) {
636           DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
637           return false;
638         }
639         DL_WARN_documented_change(26,
640                                   "writable-and-executable-segments-enforced-for-api-level-26",
641                                   "\"%s\" has load segments that are both writable and executable",
642                                   name_.c_str());
643         add_dlwarning(name_.c_str(), "W+E load segments");
644       }
645 
646       void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
647                             file_length,
648                             prot,
649                             MAP_FIXED|MAP_PRIVATE,
650                             fd_,
651                             file_offset_ + file_page_start);
652       if (seg_addr == MAP_FAILED) {
653         DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
654         return false;
655       }
656     }
657 
658     // if the segment is writable, and does not end on a page boundary,
659     // zero-fill it until the page limit.
660     if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
661       memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
662     }
663 
664     seg_file_end = PAGE_END(seg_file_end);
665 
666     // seg_file_end is now the first page address after the file
667     // content. If seg_end is larger, we need to zero anything
668     // between them. This is done by using a private anonymous
669     // map for all extra pages.
670     if (seg_page_end > seg_file_end) {
671       size_t zeromap_size = seg_page_end - seg_file_end;
672       void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
673                            zeromap_size,
674                            PFLAGS_TO_PROT(phdr->p_flags),
675                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
676                            -1,
677                            0);
678       if (zeromap == MAP_FAILED) {
679         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
680         return false;
681       }
682 
683       prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
684     }
685   }
686   return true;
687 }
688 
689 /* Used internally. Used to set the protection bits of all loaded segments
690  * with optional extra flags (i.e. really PROT_WRITE). Used by
691  * phdr_table_protect_segments and phdr_table_unprotect_segments.
692  */
_phdr_table_set_load_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int extra_prot_flags)693 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
694                                      ElfW(Addr) load_bias, int extra_prot_flags) {
695   const ElfW(Phdr)* phdr = phdr_table;
696   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
697 
698   for (; phdr < phdr_limit; phdr++) {
699     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
700       continue;
701     }
702 
703     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
704     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
705 
706     int prot = PFLAGS_TO_PROT(phdr->p_flags);
707     if ((extra_prot_flags & PROT_WRITE) != 0) {
708       // make sure we're never simultaneously writable / executable
709       prot &= ~PROT_EXEC;
710     }
711 
712     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
713                        seg_page_end - seg_page_start,
714                        prot | extra_prot_flags);
715     if (ret < 0) {
716       return -1;
717     }
718   }
719   return 0;
720 }
721 
722 /* Restore the original protection modes for all loadable segments.
723  * You should only call this after phdr_table_unprotect_segments and
724  * applying all relocations.
725  *
726  * Input:
727  *   phdr_table  -> program header table
728  *   phdr_count  -> number of entries in tables
729  *   load_bias   -> load bias
730  * Return:
731  *   0 on error, -1 on failure (error code in errno).
732  */
phdr_table_protect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)733 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
734                                 size_t phdr_count, ElfW(Addr) load_bias) {
735   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
736 }
737 
738 /* Change the protection of all loaded segments in memory to writable.
739  * This is useful before performing relocations. Once completed, you
740  * will have to call phdr_table_protect_segments to restore the original
741  * protection flags on all segments.
742  *
743  * Note that some writable segments can also have their content turned
744  * to read-only by calling phdr_table_protect_gnu_relro. This is no
745  * performed here.
746  *
747  * Input:
748  *   phdr_table  -> program header table
749  *   phdr_count  -> number of entries in tables
750  *   load_bias   -> load bias
751  * Return:
752  *   0 on error, -1 on failure (error code in errno).
753  */
phdr_table_unprotect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)754 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
755                                   size_t phdr_count, ElfW(Addr) load_bias) {
756   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
757 }
758 
759 /* Used internally by phdr_table_protect_gnu_relro and
760  * phdr_table_unprotect_gnu_relro.
761  */
_phdr_table_set_gnu_relro_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int prot_flags)762 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
763                                           ElfW(Addr) load_bias, int prot_flags) {
764   const ElfW(Phdr)* phdr = phdr_table;
765   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
766 
767   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
768     if (phdr->p_type != PT_GNU_RELRO) {
769       continue;
770     }
771 
772     // Tricky: what happens when the relro segment does not start
773     // or end at page boundaries? We're going to be over-protective
774     // here and put every page touched by the segment as read-only.
775 
776     // This seems to match Ian Lance Taylor's description of the
777     // feature at http://www.airs.com/blog/archives/189.
778 
779     //    Extract:
780     //       Note that the current dynamic linker code will only work
781     //       correctly if the PT_GNU_RELRO segment starts on a page
782     //       boundary. This is because the dynamic linker rounds the
783     //       p_vaddr field down to the previous page boundary. If
784     //       there is anything on the page which should not be read-only,
785     //       the program is likely to fail at runtime. So in effect the
786     //       linker must only emit a PT_GNU_RELRO segment if it ensures
787     //       that it starts on a page boundary.
788     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
789     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
790 
791     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
792                        seg_page_end - seg_page_start,
793                        prot_flags);
794     if (ret < 0) {
795       return -1;
796     }
797   }
798   return 0;
799 }
800 
801 /* Apply GNU relro protection if specified by the program header. This will
802  * turn some of the pages of a writable PT_LOAD segment to read-only, as
803  * specified by one or more PT_GNU_RELRO segments. This must be always
804  * performed after relocations.
805  *
806  * The areas typically covered are .got and .data.rel.ro, these are
807  * read-only from the program's POV, but contain absolute addresses
808  * that need to be relocated before use.
809  *
810  * Input:
811  *   phdr_table  -> program header table
812  *   phdr_count  -> number of entries in tables
813  *   load_bias   -> load bias
814  * Return:
815  *   0 on error, -1 on failure (error code in errno).
816  */
phdr_table_protect_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)817 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
818                                  size_t phdr_count, ElfW(Addr) load_bias) {
819   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
820 }
821 
822 /* Serialize the GNU relro segments to the given file descriptor. This can be
823  * performed after relocations to allow another process to later share the
824  * relocated segment, if it was loaded at the same address.
825  *
826  * Input:
827  *   phdr_table  -> program header table
828  *   phdr_count  -> number of entries in tables
829  *   load_bias   -> load bias
830  *   fd          -> writable file descriptor to use
831  *   file_offset -> pointer to offset into file descriptor to use/update
832  * Return:
833  *   0 on error, -1 on failure (error code in errno).
834  */
phdr_table_serialize_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)835 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
836                                    size_t phdr_count,
837                                    ElfW(Addr) load_bias,
838                                    int fd,
839                                    size_t* file_offset) {
840   const ElfW(Phdr)* phdr = phdr_table;
841   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
842 
843   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
844     if (phdr->p_type != PT_GNU_RELRO) {
845       continue;
846     }
847 
848     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
849     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
850     ssize_t size = seg_page_end - seg_page_start;
851 
852     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
853     if (written != size) {
854       return -1;
855     }
856     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
857                      MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
858     if (map == MAP_FAILED) {
859       return -1;
860     }
861     *file_offset += size;
862   }
863   return 0;
864 }
865 
866 /* Where possible, replace the GNU relro segments with mappings of the given
867  * file descriptor. This can be performed after relocations to allow a file
868  * previously created by phdr_table_serialize_gnu_relro in another process to
869  * replace the dirty relocated pages, saving memory, if it was loaded at the
870  * same address. We have to compare the data before we map over it, since some
871  * parts of the relro segment may not be identical due to other libraries in
872  * the process being loaded at different addresses.
873  *
874  * Input:
875  *   phdr_table  -> program header table
876  *   phdr_count  -> number of entries in tables
877  *   load_bias   -> load bias
878  *   fd          -> readable file descriptor to use
879  *   file_offset -> pointer to offset into file descriptor to use/update
880  * Return:
881  *   0 on error, -1 on failure (error code in errno).
882  */
phdr_table_map_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)883 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
884                              size_t phdr_count,
885                              ElfW(Addr) load_bias,
886                              int fd,
887                              size_t* file_offset) {
888   // Map the file at a temporary location so we can compare its contents.
889   struct stat file_stat;
890   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
891     return -1;
892   }
893   off_t file_size = file_stat.st_size;
894   void* temp_mapping = nullptr;
895   if (file_size > 0) {
896     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
897     if (temp_mapping == MAP_FAILED) {
898       return -1;
899     }
900   }
901 
902   // Iterate over the relro segments and compare/remap the pages.
903   const ElfW(Phdr)* phdr = phdr_table;
904   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
905 
906   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
907     if (phdr->p_type != PT_GNU_RELRO) {
908       continue;
909     }
910 
911     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
912     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
913 
914     char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
915     char* mem_base = reinterpret_cast<char*>(seg_page_start);
916     size_t match_offset = 0;
917     size_t size = seg_page_end - seg_page_start;
918 
919     if (file_size - *file_offset < size) {
920       // File is too short to compare to this segment. The contents are likely
921       // different as well (it's probably for a different library version) so
922       // just don't bother checking.
923       break;
924     }
925 
926     while (match_offset < size) {
927       // Skip over dissimilar pages.
928       while (match_offset < size &&
929              memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
930         match_offset += PAGE_SIZE;
931       }
932 
933       // Count similar pages.
934       size_t mismatch_offset = match_offset;
935       while (mismatch_offset < size &&
936              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
937         mismatch_offset += PAGE_SIZE;
938       }
939 
940       // Map over similar pages.
941       if (mismatch_offset > match_offset) {
942         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
943                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
944         if (map == MAP_FAILED) {
945           munmap(temp_mapping, file_size);
946           return -1;
947         }
948       }
949 
950       match_offset = mismatch_offset;
951     }
952 
953     // Add to the base file offset in case there are multiple relro segments.
954     *file_offset += size;
955   }
956   munmap(temp_mapping, file_size);
957   return 0;
958 }
959 
960 
961 #if defined(__arm__)
962 
963 #  ifndef PT_ARM_EXIDX
964 #    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
965 #  endif
966 
967 /* Return the address and size of the .ARM.exidx section in memory,
968  * if present.
969  *
970  * Input:
971  *   phdr_table  -> program header table
972  *   phdr_count  -> number of entries in tables
973  *   load_bias   -> load bias
974  * Output:
975  *   arm_exidx       -> address of table in memory (null on failure).
976  *   arm_exidx_count -> number of items in table (0 on failure).
977  * Return:
978  *   0 on error, -1 on failure (_no_ error code in errno)
979  */
phdr_table_get_arm_exidx(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)** arm_exidx,size_t * arm_exidx_count)980 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
981                              ElfW(Addr) load_bias,
982                              ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
983   const ElfW(Phdr)* phdr = phdr_table;
984   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
985 
986   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
987     if (phdr->p_type != PT_ARM_EXIDX) {
988       continue;
989     }
990 
991     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
992     *arm_exidx_count = phdr->p_memsz / 8;
993     return 0;
994   }
995   *arm_exidx = nullptr;
996   *arm_exidx_count = 0;
997   return -1;
998 }
999 #endif
1000 
1001 /* Return the address and size of the ELF file's .dynamic section in memory,
1002  * or null if missing.
1003  *
1004  * Input:
1005  *   phdr_table  -> program header table
1006  *   phdr_count  -> number of entries in tables
1007  *   load_bias   -> load bias
1008  * Output:
1009  *   dynamic       -> address of table in memory (null on failure).
1010  *   dynamic_flags -> protection flags for section (unset on failure)
1011  * Return:
1012  *   void
1013  */
phdr_table_get_dynamic_section(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Dyn)** dynamic,ElfW (Word)* dynamic_flags)1014 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1015                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1016                                     ElfW(Word)* dynamic_flags) {
1017   *dynamic = nullptr;
1018   for (size_t i = 0; i<phdr_count; ++i) {
1019     const ElfW(Phdr)& phdr = phdr_table[i];
1020     if (phdr.p_type == PT_DYNAMIC) {
1021       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
1022       if (dynamic_flags) {
1023         *dynamic_flags = phdr.p_flags;
1024       }
1025       return;
1026     }
1027   }
1028 }
1029 
1030 /* Return the program interpreter string, or nullptr if missing.
1031  *
1032  * Input:
1033  *   phdr_table  -> program header table
1034  *   phdr_count  -> number of entries in tables
1035  *   load_bias   -> load bias
1036  * Return:
1037  *   pointer to the program interpreter string.
1038  */
phdr_table_get_interpreter_name(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)1039 const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1040                                             ElfW(Addr) load_bias) {
1041   for (size_t i = 0; i<phdr_count; ++i) {
1042     const ElfW(Phdr)& phdr = phdr_table[i];
1043     if (phdr.p_type == PT_INTERP) {
1044       return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1045     }
1046   }
1047   return nullptr;
1048 }
1049 
1050 // Sets loaded_phdr_ to the address of the program header table as it appears
1051 // in the loaded segments in memory. This is in contrast with phdr_table_,
1052 // which is temporary and will be released before the library is relocated.
FindPhdr()1053 bool ElfReader::FindPhdr() {
1054   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1055 
1056   // If there is a PT_PHDR, use it directly.
1057   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1058     if (phdr->p_type == PT_PHDR) {
1059       return CheckPhdr(load_bias_ + phdr->p_vaddr);
1060     }
1061   }
1062 
1063   // Otherwise, check the first loadable segment. If its file offset
1064   // is 0, it starts with the ELF header, and we can trivially find the
1065   // loaded program header from it.
1066   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1067     if (phdr->p_type == PT_LOAD) {
1068       if (phdr->p_offset == 0) {
1069         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
1070         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
1071         ElfW(Addr)  offset = ehdr->e_phoff;
1072         return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
1073       }
1074       break;
1075     }
1076   }
1077 
1078   DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
1079   return false;
1080 }
1081 
1082 // Ensures that our program header is actually within a loadable
1083 // segment. This should help catch badly-formed ELF files that
1084 // would cause the linker to crash later when trying to access it.
CheckPhdr(ElfW (Addr)loaded)1085 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1086   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1087   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
1088   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1089     if (phdr->p_type != PT_LOAD) {
1090       continue;
1091     }
1092     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1093     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
1094     if (seg_start <= loaded && loaded_end <= seg_end) {
1095       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
1096       return true;
1097     }
1098   }
1099   DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1100          name_.c_str(), reinterpret_cast<void*>(loaded));
1101   return false;
1102 }
1103