• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/prctl.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <unistd.h>
38 
39 #include "linker.h"
40 #include "linker_debug.h"
41 #include "linker_dlwarning.h"
42 #include "linker_globals.h"
43 #include "linker_logger.h"
44 #include "linker_main.h"
45 #include "linker_soinfo.h"
46 #include "linker_utils.h"
47 
48 #include "private/bionic_asm_note.h"
49 #include "private/CFIShadow.h" // For kLibraryAlignment
50 #include "private/elf_note.h"
51 
52 #include <android-base/file.h>
53 #include <android-base/properties.h>
54 
GetTargetElfMachine()55 static int GetTargetElfMachine() {
56 #if defined(__arm__)
57   return EM_ARM;
58 #elif defined(__aarch64__)
59   return EM_AARCH64;
60 #elif defined(__i386__)
61   return EM_386;
62 #elif defined(__riscv)
63   return EM_RISCV;
64 #elif defined(__x86_64__)
65   return EM_X86_64;
66 #endif
67 }
68 
69 /**
70   TECHNICAL NOTE ON ELF LOADING.
71 
72   An ELF file's program header table contains one or more PT_LOAD
73   segments, which corresponds to portions of the file that need to
74   be mapped into the process' address space.
75 
76   Each loadable segment has the following important properties:
77 
78     p_offset  -> segment file offset
79     p_filesz  -> segment file size
80     p_memsz   -> segment memory size (always >= p_filesz)
81     p_vaddr   -> segment's virtual address
82     p_flags   -> segment flags (e.g. readable, writable, executable)
83     p_align   -> segment's in-memory and in-file alignment
84 
85   We will ignore the p_paddr field of ElfW(Phdr) for now.
86 
87   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
88   ranges of virtual addresses. A few rules apply:
89 
90   - the virtual address ranges should not overlap.
91 
92   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
93     between them should always be initialized to 0.
94 
95   - ranges do not necessarily start or end at page boundaries. Two distinct
96     segments can have their start and end on the same page. In this case, the
97     page inherits the mapping flags of the latter segment.
98 
99   Finally, the real load addrs of each segment is not p_vaddr. Instead the
100   loader decides where to load the first segment, then will load all others
101   relative to the first one to respect the initial range layout.
102 
103   For example, consider the following list:
104 
105     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
106     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
107 
108   This corresponds to two segments that cover these virtual address ranges:
109 
110        0x30000...0x34000
111        0x40000...0x48000
112 
113   If the loader decides to load the first segment at address 0xa0000000
114   then the segments' load address ranges will be:
115 
116        0xa0030000...0xa0034000
117        0xa0040000...0xa0048000
118 
119   In other words, all segments must be loaded at an address that has the same
120   constant offset from their p_vaddr value. This offset is computed as the
121   difference between the first segment's load address, and its p_vaddr value.
122 
123   However, in practice, segments do _not_ start at page boundaries. Since we
124   can only memory-map at page boundaries, this means that the bias is
125   computed as:
126 
127        load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
128 
129   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
130           possible wrap around UINT32_MAX for possible large p_vaddr values).
131 
132   And that the phdr0_load_address must start at a page boundary, with
133   the segment's real content starting at:
134 
135        phdr0_load_address + page_offset(phdr0->p_vaddr)
136 
137   Note that ELF requires the following condition to make the mmap()-ing work:
138 
139       page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
140 
141   The load_bias must be added to any p_vaddr value read from the ELF file to
142   determine the corresponding memory address.
143 
144  **/
145 
146 static const size_t kPageSize = page_size();
147 
148 /*
149  * Generic PMD size calculation:
150  *    - Each page table (PT) is of size 1 page.
151  *    - Each page table entry (PTE) is of size 64 bits.
152  *    - Each PTE locates one physical page frame (PFN) of size 1 page.
153  *    - A PMD entry locates 1 page table (PT)
154  *
155  *   PMD size = Num entries in a PT * page_size
156  */
157 static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
158 
ElfReader()159 ElfReader::ElfReader()
160     : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
161       phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
162       strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), max_align_(0), min_align_(0),
163       loaded_phdr_(nullptr), mapped_by_caller_(false) {
164 }
165 
Read(const char * name,int fd,off64_t file_offset,off64_t file_size)166 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
167   if (did_read_) {
168     return true;
169   }
170   name_ = name;
171   fd_ = fd;
172   file_offset_ = file_offset;
173   file_size_ = file_size;
174 
175   if (ReadElfHeader() &&
176       VerifyElfHeader() &&
177       ReadProgramHeaders() &&
178       CheckProgramHeaderAlignment() &&
179       ReadSectionHeaders() &&
180       ReadDynamicSection() &&
181       ReadPadSegmentNote()) {
182     did_read_ = true;
183   }
184 
185   if (kPageSize == 16*1024 && min_align_ == 4096) {
186     // This prop needs to be read on 16KiB devices for each ELF where min_palign is 4KiB.
187     // It cannot be cached since the developer may toggle app compat on/off.
188     // This check will be removed once app compat is made the default on 16KiB devices.
189     should_use_16kib_app_compat_ =
190         ::android::base::GetBoolProperty("bionic.linker.16kb.app_compat.enabled", false) ||
191         get_16kb_appcompat_mode();
192   }
193 
194   return did_read_;
195 }
196 
Load(address_space_params * address_space)197 bool ElfReader::Load(address_space_params* address_space) {
198   CHECK(did_read_);
199   if (did_load_) {
200     return true;
201   }
202   bool reserveSuccess = ReserveAddressSpace(address_space);
203   if (reserveSuccess && LoadSegments() && FindPhdr() &&
204       FindGnuPropertySection()) {
205     did_load_ = true;
206 #if defined(__aarch64__)
207     // For Armv8.5-A loaded executable segments may require PROT_BTI.
208     if (note_gnu_property_.IsBTICompatible()) {
209       did_load_ =
210           (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, should_pad_segments_,
211                                        should_use_16kib_app_compat_, &note_gnu_property_) == 0);
212     }
213 #endif
214   }
215   if (reserveSuccess && !did_load_) {
216     if (load_start_ != nullptr && load_size_ != 0) {
217       if (!mapped_by_caller_) {
218         munmap(load_start_, load_size_);
219       }
220     }
221   }
222 
223   return did_load_;
224 }
225 
get_string(ElfW (Word)index) const226 const char* ElfReader::get_string(ElfW(Word) index) const {
227   CHECK(strtab_ != nullptr);
228   CHECK(index < strtab_size_);
229 
230   return strtab_ + index;
231 }
232 
ReadElfHeader()233 bool ElfReader::ReadElfHeader() {
234   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
235   if (rc < 0) {
236     DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
237     return false;
238   }
239 
240   if (rc != sizeof(header_)) {
241     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
242            static_cast<size_t>(rc));
243     return false;
244   }
245   return true;
246 }
247 
EM_to_string(int em)248 static const char* EM_to_string(int em) {
249   if (em == EM_386) return "EM_386";
250   if (em == EM_AARCH64) return "EM_AARCH64";
251   if (em == EM_ARM) return "EM_ARM";
252   if (em == EM_RISCV) return "EM_RISCV";
253   if (em == EM_X86_64) return "EM_X86_64";
254   return "EM_???";
255 }
256 
VerifyElfHeader()257 bool ElfReader::VerifyElfHeader() {
258   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
259     DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
260            header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
261     return false;
262   }
263 
264   // Try to give a clear diagnostic for ELF class mismatches, since they're
265   // an easy mistake to make during the 32-bit/64-bit transition period.
266   int elf_class = header_.e_ident[EI_CLASS];
267 #if defined(__LP64__)
268   if (elf_class != ELFCLASS64) {
269     if (elf_class == ELFCLASS32) {
270       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
271     } else {
272       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
273     }
274     return false;
275   }
276 #else
277   if (elf_class != ELFCLASS32) {
278     if (elf_class == ELFCLASS64) {
279       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
280     } else {
281       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
282     }
283     return false;
284   }
285 #endif
286 
287   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
288     DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
289     return false;
290   }
291 
292   if (header_.e_type != ET_DYN) {
293     DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
294     return false;
295   }
296 
297   if (header_.e_version != EV_CURRENT) {
298     DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
299     return false;
300   }
301 
302   if (header_.e_machine != GetTargetElfMachine()) {
303     DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
304            name_.c_str(),
305            EM_to_string(header_.e_machine), header_.e_machine,
306            EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
307     return false;
308   }
309 
310   if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
311     if (DL_ERROR_AFTER(26, "\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
312                        name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)))) {
313       return false;
314     }
315     add_dlwarning(name_.c_str(), "has invalid ELF header");
316   }
317 
318   if (header_.e_shstrndx == 0) {
319     if (DL_ERROR_AFTER(26, "\"%s\" has invalid e_shstrndx", name_.c_str())) {
320       return false;
321     }
322     add_dlwarning(name_.c_str(), "has invalid ELF header");
323   }
324 
325   return true;
326 }
327 
CheckFileRange(ElfW (Addr)offset,size_t size,size_t alignment)328 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
329   off64_t range_start;
330   off64_t range_end;
331 
332   // Only header can be located at the 0 offset... This function called to
333   // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
334   // at offset 0.
335 
336   return offset > 0 &&
337          safe_add(&range_start, file_offset_, offset) &&
338          safe_add(&range_end, range_start, size) &&
339          (range_start < file_size_) &&
340          (range_end <= file_size_) &&
341          ((offset % alignment) == 0);
342 }
343 
344 // Loads the program header table from an ELF file into a read-only private
345 // anonymous mmap-ed block.
ReadProgramHeaders()346 bool ElfReader::ReadProgramHeaders() {
347   phdr_num_ = header_.e_phnum;
348 
349   // Like the kernel, we only accept program header tables that
350   // are smaller than 64KiB.
351   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
352     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
353     return false;
354   }
355 
356   // Boundary checks
357   size_t size = phdr_num_ * sizeof(ElfW(Phdr));
358   if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
359     DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
360                    name_.c_str(),
361                    static_cast<size_t>(header_.e_phoff),
362                    size);
363     return false;
364   }
365 
366   if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
367     DL_ERR("\"%s\" phdr mmap failed: %m", name_.c_str());
368     return false;
369   }
370 
371   phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
372   return true;
373 }
374 
ReadSectionHeaders()375 bool ElfReader::ReadSectionHeaders() {
376   shdr_num_ = header_.e_shnum;
377 
378   if (shdr_num_ == 0) {
379     DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
380     return false;
381   }
382 
383   size_t size = shdr_num_ * sizeof(ElfW(Shdr));
384   if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
385     DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
386                    name_.c_str(),
387                    static_cast<size_t>(header_.e_shoff),
388                    size);
389     return false;
390   }
391 
392   if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
393     DL_ERR("\"%s\" shdr mmap failed: %m", name_.c_str());
394     return false;
395   }
396 
397   shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
398   return true;
399 }
400 
ReadDynamicSection()401 bool ElfReader::ReadDynamicSection() {
402   // 1. Find .dynamic section (in section headers)
403   const ElfW(Shdr)* dynamic_shdr = nullptr;
404   for (size_t i = 0; i < shdr_num_; ++i) {
405     if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
406       dynamic_shdr = &shdr_table_ [i];
407       break;
408     }
409   }
410 
411   if (dynamic_shdr == nullptr) {
412     DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
413     return false;
414   }
415 
416   // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
417   size_t pt_dynamic_offset = 0;
418   size_t pt_dynamic_filesz = 0;
419   for (size_t i = 0; i < phdr_num_; ++i) {
420     const ElfW(Phdr)* phdr = &phdr_table_[i];
421     if (phdr->p_type == PT_DYNAMIC) {
422       pt_dynamic_offset = phdr->p_offset;
423       pt_dynamic_filesz = phdr->p_filesz;
424     }
425   }
426 
427   if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
428     if (DL_ERROR_AFTER(26, "\"%s\" .dynamic section has invalid offset: 0x%zx, "
429                        "expected to match PT_DYNAMIC offset: 0x%zx",
430                        name_.c_str(),
431                        static_cast<size_t>(dynamic_shdr->sh_offset),
432                        pt_dynamic_offset)) {
433       return false;
434     }
435     add_dlwarning(name_.c_str(), "invalid .dynamic section");
436   }
437 
438   if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
439     if (DL_ERROR_AFTER(26, "\"%s\" .dynamic section has invalid size: 0x%zx "
440                        "(expected to match PT_DYNAMIC filesz 0x%zx)",
441                        name_.c_str(),
442                        static_cast<size_t>(dynamic_shdr->sh_size),
443                        pt_dynamic_filesz)) {
444       return false;
445     }
446     add_dlwarning(name_.c_str(), "invalid .dynamic section");
447   }
448 
449   if (dynamic_shdr->sh_link >= shdr_num_) {
450     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
451                    name_.c_str(),
452                    dynamic_shdr->sh_link);
453     return false;
454   }
455 
456   const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
457 
458   if (strtab_shdr->sh_type != SHT_STRTAB) {
459     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
460                    name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
461     return false;
462   }
463 
464   if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
465     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
466     return false;
467   }
468 
469   if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
470     DL_ERR("\"%s\" dynamic section mmap failed: %m", name_.c_str());
471     return false;
472   }
473 
474   dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
475 
476   if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
477     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
478                    name_.c_str());
479     return false;
480   }
481 
482   if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
483     DL_ERR("\"%s\" strtab section mmap failed: %m", name_.c_str());
484     return false;
485   }
486 
487   strtab_ = static_cast<const char*>(strtab_fragment_.data());
488   strtab_size_ = strtab_fragment_.size();
489   return true;
490 }
491 
492 /* Returns the size of the extent of all the possibly non-contiguous
493  * loadable segments in an ELF program header table. This corresponds
494  * to the page-aligned size in bytes that needs to be reserved in the
495  * process' address space. If there are no loadable segments, 0 is
496  * returned.
497  *
498  * If out_min_vaddr or out_max_vaddr are not null, they will be
499  * set to the minimum and maximum addresses of pages to be reserved,
500  * or 0 if there is nothing to load.
501  */
phdr_table_get_load_size(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)* out_min_vaddr,ElfW (Addr)* out_max_vaddr)502 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
503                                 ElfW(Addr)* out_min_vaddr,
504                                 ElfW(Addr)* out_max_vaddr) {
505   ElfW(Addr) min_vaddr = UINTPTR_MAX;
506   ElfW(Addr) max_vaddr = 0;
507 
508   bool found_pt_load = false;
509   for (size_t i = 0; i < phdr_count; ++i) {
510     const ElfW(Phdr)* phdr = &phdr_table[i];
511 
512     if (phdr->p_type != PT_LOAD) {
513       continue;
514     }
515     found_pt_load = true;
516 
517     if (phdr->p_vaddr < min_vaddr) {
518       min_vaddr = phdr->p_vaddr;
519     }
520 
521     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
522       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
523     }
524   }
525   if (!found_pt_load) {
526     min_vaddr = 0;
527   }
528 
529   min_vaddr = page_start(min_vaddr);
530   max_vaddr = page_end(max_vaddr);
531 
532   if (out_min_vaddr != nullptr) {
533     *out_min_vaddr = min_vaddr;
534   }
535   if (out_max_vaddr != nullptr) {
536     *out_max_vaddr = max_vaddr;
537   }
538   return max_vaddr - min_vaddr;
539 }
540 
CheckProgramHeaderAlignment()541 bool ElfReader::CheckProgramHeaderAlignment() {
542   max_align_ = min_align_ = page_size();
543 
544   for (size_t i = 0; i < phdr_num_; ++i) {
545     const ElfW(Phdr)* phdr = &phdr_table_[i];
546 
547     if (phdr->p_type != PT_LOAD) {
548       continue;
549     }
550 
551     // For loadable segments, p_align must be 0, 1,
552     // or a positive, integral power of two.
553     // The kernel ignores loadable segments with other values,
554     // so we just warn rather than reject them.
555     if ((phdr->p_align & (phdr->p_align - 1)) != 0) {
556       DL_WARN("\"%s\" has invalid p_align %zx in phdr %zu", name_.c_str(),
557                      static_cast<size_t>(phdr->p_align), i);
558       continue;
559     }
560 
561     max_align_ = std::max(max_align_, static_cast<size_t>(phdr->p_align));
562 
563     if (phdr->p_align > 1) {
564       min_align_ = std::min(min_align_, static_cast<size_t>(phdr->p_align));
565     }
566   }
567 
568   return true;
569 }
570 
571 // Reserve a virtual address range such that if it's limits were extended to the next 2**align
572 // boundary, it would not overlap with any existing mappings.
ReserveWithAlignmentPadding(size_t size,size_t mapping_align,size_t start_align,void ** out_gap_start,size_t * out_gap_size)573 static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
574                                          void** out_gap_start, size_t* out_gap_size) {
575   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
576   // Reserve enough space to properly align the library's start address.
577   mapping_align = std::max(mapping_align, start_align);
578   if (mapping_align == page_size()) {
579     void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
580     if (mmap_ptr == MAP_FAILED) {
581       return nullptr;
582     }
583     return mmap_ptr;
584   }
585 
586   // Minimum alignment of shared library gap. For efficiency, this should match the second level
587   // page size of the platform.
588 #if defined(__LP64__)
589   constexpr size_t kGapAlignment = 2 * 1024 * 1024;
590 #endif
591   // Maximum gap size, in the units of kGapAlignment.
592   constexpr size_t kMaxGapUnits = 32;
593   // Allocate enough space so that the end of the desired region aligned up is still inside the
594   // mapping.
595   size_t mmap_size = __builtin_align_up(size, mapping_align) + mapping_align - page_size();
596   uint8_t* mmap_ptr =
597       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
598   if (mmap_ptr == MAP_FAILED) {
599     return nullptr;
600   }
601   size_t gap_size = 0;
602   size_t first_byte = reinterpret_cast<size_t>(__builtin_align_up(mmap_ptr, mapping_align));
603   size_t last_byte = reinterpret_cast<size_t>(__builtin_align_down(mmap_ptr + mmap_size, mapping_align) - 1);
604 #if defined(__LP64__)
605   if (first_byte / kGapAlignment != last_byte / kGapAlignment) {
606     // This library crosses a 2MB boundary and will fragment a new huge page.
607     // Lets take advantage of that and insert a random number of inaccessible huge pages before that
608     // to improve address randomization and make it harder to locate this library code by probing.
609     munmap(mmap_ptr, mmap_size);
610     mapping_align = std::max(mapping_align, kGapAlignment);
611     gap_size =
612         kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
613     mmap_size = __builtin_align_up(size + gap_size, mapping_align) + mapping_align - page_size();
614     mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
615     if (mmap_ptr == MAP_FAILED) {
616       return nullptr;
617     }
618   }
619 #endif
620 
621   uint8_t* gap_end = mmap_ptr + mmap_size;
622 #if defined(__LP64__)
623   if (gap_size) {
624     gap_end = __builtin_align_down(gap_end, kGapAlignment);
625   }
626 #endif
627   uint8_t* gap_start = gap_end - gap_size;
628 
629   uint8_t* first = __builtin_align_up(mmap_ptr, mapping_align);
630   uint8_t* last = __builtin_align_down(gap_start, mapping_align) - size;
631 
632   // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
633   // created. Don't randomize then.
634   size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
635   uint8_t* start = first + n * start_align;
636   // Unmap the extra space around the allocation.
637   // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
638   // to defeat ASLR by probing for readable memory mappings.
639   munmap(mmap_ptr, start - mmap_ptr);
640   munmap(start + size, gap_start - (start + size));
641   if (gap_end != mmap_ptr + mmap_size) {
642     munmap(gap_end, mmap_ptr + mmap_size - gap_end);
643   }
644   *out_gap_start = gap_start;
645   *out_gap_size = gap_size;
646   return start;
647 }
648 
649 // Reserve a virtual address range big enough to hold all loadable
650 // segments of a program header table. This is done by creating a
651 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace(address_space_params * address_space)652 bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
653   ElfW(Addr) min_vaddr;
654   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
655   if (load_size_ == 0) {
656     DL_ERR("\"%s\" has no loadable segments", name_.c_str());
657     return false;
658   }
659 
660   if (should_use_16kib_app_compat_) {
661     // Reserve additional space for aligning the permission boundary in compat loading
662     // Up to kPageSize-kCompatPageSize additional space is needed, but reservation
663     // is done with mmap which gives kPageSize multiple-sized reservations.
664     load_size_ += kPageSize;
665   }
666 
667   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
668   void* start;
669 
670   if (load_size_ > address_space->reserved_size) {
671     if (address_space->must_use_address) {
672       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
673              load_size_ - address_space->reserved_size, load_size_, name_.c_str());
674       return false;
675     }
676     size_t start_alignment = page_size();
677     if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
678       // Limit alignment to PMD size as other alignments reduce the number of
679       // bits available for ASLR for no benefit.
680       start_alignment = max_align_ == kPmdSize ? kPmdSize : page_size();
681     }
682     start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
683                                         &gap_size_);
684     if (start == nullptr) {
685       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
686       return false;
687     }
688   } else {
689     start = address_space->start_addr;
690     gap_start_ = nullptr;
691     gap_size_ = 0;
692     mapped_by_caller_ = true;
693 
694     // Update the reserved address space to subtract the space used by this library.
695     address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
696     address_space->reserved_size -= load_size_;
697   }
698 
699   load_start_ = start;
700   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
701 
702   if (should_use_16kib_app_compat_) {
703     // In compat mode make the initial mapping RW since the ELF contents will be read
704     // into it; instead of mapped over it.
705     mprotect(reinterpret_cast<void*>(start), load_size_, PROT_READ | PROT_WRITE);
706   }
707 
708   return true;
709 }
710 
711 /*
712  * Returns true if the kernel supports page size migration for this process.
713  */
page_size_migration_supported()714 bool page_size_migration_supported() {
715 #if defined(__LP64__)
716   static bool pgsize_migration_enabled = []() {
717     std::string enabled;
718     if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
719       return false;
720     }
721     return enabled.find("1") != std::string::npos;
722   }();
723   return pgsize_migration_enabled;
724 #else
725   return false;
726 #endif
727 }
728 
729 // Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
ReadPadSegmentNote()730 bool ElfReader::ReadPadSegmentNote() {
731   if (!page_size_migration_supported()) {
732     // Don't attempt to read the note, since segment extension isn't
733     // supported; but return true so that loading can continue normally.
734     return true;
735   }
736 
737   // The ELF can have multiple PT_NOTE's, check them all
738   for (size_t i = 0; i < phdr_num_; ++i) {
739     const ElfW(Phdr)* phdr = &phdr_table_[i];
740 
741     if (phdr->p_type != PT_NOTE) {
742       continue;
743     }
744 
745     // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
746     // point to any part of the ELF (p_memsz == 0). Skip these since there is
747     // nothing to decode. See: b/324468126
748     if (phdr->p_memsz == 0) {
749       continue;
750     }
751 
752     // Reject notes that claim to extend past the end of the file.
753     off64_t note_end_off = file_offset_;
754     if (__builtin_add_overflow(note_end_off, phdr->p_offset, &note_end_off) ||
755         __builtin_add_overflow(note_end_off, phdr->p_filesz, &note_end_off) ||
756         phdr->p_filesz != phdr->p_memsz ||
757         note_end_off > file_size_) {
758 
759       if (get_application_target_sdk_version() < 37) {
760         // Some in-market apps have invalid ELF notes (http://b/390328213),
761         // so ignore them until/unless they bump their target sdk version.
762         continue;
763       }
764 
765       DL_ERR_AND_LOG("\"%s\": ELF note (phdr %zu) runs off end of file", name_.c_str(), i);
766       return false;
767     }
768 
769     // We scope note_fragment to within the loop so that there is
770     // at most one PT_NOTE mapped at any time.
771     MappedFileFragment note_fragment;
772     if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_filesz)) {
773       DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
774              name_.c_str(), reinterpret_cast<void*>(phdr->p_filesz), fd_,
775              reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
776       return false;
777     }
778 
779     const ElfW(Nhdr)* note_hdr = nullptr;
780     const char* note_desc = nullptr;
781     if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
782                         reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
783                         phdr, &note_hdr, &note_desc)) {
784       continue;
785     }
786 
787     if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
788       DL_ERR("\"%s\": NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
789              name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
790       return false;
791     }
792 
793     // 1 == enabled, 0 == disabled
794     should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
795     return true;
796   }
797 
798   return true;
799 }
800 
_extend_load_segment_vma(const ElfW (Phdr)* phdr_table,size_t phdr_count,size_t phdr_idx,ElfW (Addr)* p_memsz,ElfW (Addr)* p_filesz,bool should_pad_segments,bool should_use_16kib_app_compat)801 static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
802                                             size_t phdr_idx, ElfW(Addr)* p_memsz,
803                                             ElfW(Addr)* p_filesz, bool should_pad_segments,
804                                             bool should_use_16kib_app_compat) {
805   // NOTE: Segment extension is only applicable where the ELF's max-page-size > runtime page size;
806   // to save kernel VMA slab memory. 16KiB compat mode is the exact opposite scenario.
807   if (should_use_16kib_app_compat) {
808     return;
809   }
810 
811   const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
812   const ElfW(Phdr)* next = nullptr;
813   size_t next_idx = phdr_idx + 1;
814 
815   // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
816   // field e.g. 2MiB p_align for THPs and are relatively small in number.
817   //
818   // The kernel can only represent padding for p_align up to 64KiB. This is because
819   // the kernel uses 4 available bits in the vm_area_struct to represent padding
820   // extent; and so cannot enable mitigations to avoid breaking app compatibility for
821   // p_aligns > 64KiB.
822   //
823   // Don't perform segment extension on these to avoid app compatibility issues.
824   if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
825     return;
826   }
827 
828   if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
829     next = &phdr_table[next_idx];
830   }
831 
832   // If this is the last LOAD segment, no extension is needed
833   if (!next || *p_memsz != *p_filesz) {
834     return;
835   }
836 
837   ElfW(Addr) next_start = page_start(next->p_vaddr);
838   ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
839 
840   // If adjacent segment mappings overlap, no extension is needed.
841   if (curr_end >= next_start) {
842     return;
843   }
844 
845   // Extend the LOAD segment mapping to be contiguous with that of
846   // the next LOAD segment.
847   ElfW(Addr) extend = next_start - curr_end;
848   *p_memsz += extend;
849   *p_filesz += extend;
850 }
851 
MapSegment(size_t seg_idx,size_t len)852 bool ElfReader::MapSegment(size_t seg_idx, size_t len) {
853   const ElfW(Phdr)* phdr = &phdr_table_[seg_idx];
854 
855   void* start = reinterpret_cast<void*>(page_start(phdr->p_vaddr + load_bias_));
856 
857   // The ELF could be being loaded directly from a zipped APK,
858   // the zip offset must be added to find the segment offset.
859   const ElfW(Addr) offset = file_offset_ + page_start(phdr->p_offset);
860 
861   int prot = PFLAGS_TO_PROT(phdr->p_flags);
862 
863   void* seg_addr = mmap64(start, len, prot, MAP_FIXED | MAP_PRIVATE, fd_, offset);
864 
865   if (seg_addr == MAP_FAILED) {
866     DL_ERR("couldn't map \"%s\" segment %zd: %m", name_.c_str(), seg_idx);
867     return false;
868   }
869 
870   // Mark segments as huge page eligible if they meet the requirements
871   if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
872       get_transparent_hugepages_supported()) {
873     madvise(seg_addr, len, MADV_HUGEPAGE);
874   }
875 
876   return true;
877 }
878 
ZeroFillSegment(const ElfW (Phdr)* phdr)879 void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
880   // NOTE: In 16KiB app compat mode, the ELF mapping is anonymous, meaning that
881   // RW segments are COW-ed from the kernel's zero page. So there is no need to
882   // explicitly zero-fill until the last page's limit.
883   if (should_use_16kib_app_compat_) {
884     return;
885   }
886 
887   ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
888   uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
889 
890   // If the segment is writable, and does not end on a page boundary,
891   // zero-fill it until the page limit.
892   //
893   // Do not attempt to zero the extended region past the first partial page,
894   // since doing so may:
895   //   1) Result in a SIGBUS, as the region is not backed by the underlying
896   //      file.
897   //   2) Break the COW backing, faulting in new anon pages for a region
898   //      that will not be used.
899   if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
900     memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
901            kPageSize - page_offset(unextended_seg_file_end));
902   }
903 }
904 
DropPaddingPages(const ElfW (Phdr)* phdr,uint64_t seg_file_end)905 void ElfReader::DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end) {
906   // NOTE: Padding pages are only applicable where the ELF's max-page-size > runtime page size;
907   // 16KiB compat mode is the exact opposite scenario.
908   if (should_use_16kib_app_compat_) {
909     return;
910   }
911 
912   ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
913   uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
914 
915   uint64_t pad_start = page_end(unextended_seg_file_end);
916   uint64_t pad_end = page_end(seg_file_end);
917   CHECK(pad_start <= pad_end);
918 
919   uint64_t pad_len = pad_end - pad_start;
920   if (pad_len == 0 || !page_size_migration_supported()) {
921     return;
922   }
923 
924   // Pages may be brought in due to readahead.
925   // Drop the padding (zero) pages, to avoid reclaim work later.
926   //
927   // NOTE: The madvise() here is special, as it also serves to hint to the
928   // kernel the portion of the LOAD segment that is padding.
929   //
930   // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
931   //      [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
932   if (madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
933     DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
934             name_.c_str(), pad_start, pad_len);
935   }
936 }
937 
MapBssSection(const ElfW (Phdr)* phdr,ElfW (Addr)seg_page_end,ElfW (Addr)seg_file_end)938 bool ElfReader::MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
939                               ElfW(Addr) seg_file_end) {
940   // NOTE: We do not need to handle .bss in 16KiB compat mode since the mapping
941   // reservation is anonymous and RW to begin with.
942   if (should_use_16kib_app_compat_) {
943     return true;
944   }
945 
946   // seg_file_end is now the first page address after the file content.
947   seg_file_end = page_end(seg_file_end);
948 
949   if (seg_page_end <= seg_file_end) {
950     return true;
951   }
952 
953   // If seg_page_end is larger than seg_file_end, we need to zero
954   // anything between them. This is done by using a private anonymous
955   // map for all extra pages
956   size_t zeromap_size = seg_page_end - seg_file_end;
957   void* zeromap =
958       mmap(reinterpret_cast<void*>(seg_file_end), zeromap_size, PFLAGS_TO_PROT(phdr->p_flags),
959            MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
960   if (zeromap == MAP_FAILED) {
961     DL_ERR("couldn't map .bss section for \"%s\": %m", name_.c_str());
962     return false;
963   }
964 
965   // Set the VMA name using prctl
966   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
967 
968   return true;
969 }
970 
LoadSegments()971 bool ElfReader::LoadSegments() {
972   // NOTE: The compat(legacy) page size (4096) must be used when aligning
973   // the 4KiB segments for loading in compat mode. The larger 16KiB page size
974   // will lead to overwriting adjacent segments since the ELF's segment(s)
975   // are not 16KiB aligned.
976   size_t seg_align = should_use_16kib_app_compat_ ? kCompatPageSize : kPageSize;
977 
978   // Only enforce this on 16 KB systems with app compat disabled.
979   // Apps may rely on undefined behavior here on 4 KB systems,
980   // which is the norm before this change is introduced
981   if (kPageSize >= 16384 && min_align_ < kPageSize && !should_use_16kib_app_compat_) {
982     DL_ERR_AND_LOG("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
983                    name_.c_str(), min_align_, kPageSize);
984     return false;
985   }
986 
987   if (!Setup16KiBAppCompat()) {
988     DL_ERR("\"%s\" failed to setup 16KiB App Compat", name_.c_str());
989     return false;
990   }
991 
992   for (size_t i = 0; i < phdr_num_; ++i) {
993     const ElfW(Phdr)* phdr = &phdr_table_[i];
994 
995     if (phdr->p_type != PT_LOAD) {
996       continue;
997     }
998 
999     ElfW(Addr) p_memsz = phdr->p_memsz;
1000     ElfW(Addr) p_filesz = phdr->p_filesz;
1001     _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_,
1002                              should_use_16kib_app_compat_);
1003 
1004     // Segment addresses in memory.
1005     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1006     ElfW(Addr) seg_end = seg_start + p_memsz;
1007 
1008     ElfW(Addr) seg_page_end = __builtin_align_up(seg_end, seg_align);
1009 
1010     ElfW(Addr) seg_file_end = seg_start + p_filesz;
1011 
1012     // File offsets.
1013     ElfW(Addr) file_start = phdr->p_offset;
1014     ElfW(Addr) file_end = file_start + p_filesz;
1015 
1016     ElfW(Addr) file_page_start = __builtin_align_down(file_start, seg_align);
1017     ElfW(Addr) file_length = file_end - file_page_start;
1018 
1019     if (file_size_ <= 0) {
1020       DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
1021       return false;
1022     }
1023 
1024     if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
1025       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
1026           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
1027           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
1028           reinterpret_cast<void*>(phdr->p_filesz),
1029           reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
1030       return false;
1031     }
1032 
1033     if (file_length != 0) {
1034       int prot = PFLAGS_TO_PROT(phdr->p_flags);
1035       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
1036         if (DL_ERROR_AFTER(26, "\"%s\" has load segments that are both writable and executable",
1037                            name_.c_str())) {
1038           return false;
1039         }
1040         add_dlwarning(name_.c_str(), "W+E load segments");
1041       }
1042 
1043       // Pass the file_length, since it may have been extended by _extend_load_segment_vma().
1044       if (should_use_16kib_app_compat_) {
1045         if (!CompatMapSegment(i, file_length)) {
1046           return false;
1047         }
1048       } else {
1049         if (!MapSegment(i, file_length)) {
1050           return false;
1051         }
1052       }
1053     }
1054 
1055     ZeroFillSegment(phdr);
1056 
1057     DropPaddingPages(phdr, seg_file_end);
1058 
1059     if (!MapBssSection(phdr, seg_page_end, seg_file_end)) {
1060       return false;
1061     }
1062   }
1063   return true;
1064 }
1065 
1066 /* Used internally. Used to set the protection bits of all loaded segments
1067  * with optional extra flags (i.e. really PROT_WRITE). Used by
1068  * phdr_table_protect_segments and phdr_table_unprotect_segments.
1069  */
_phdr_table_set_load_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int extra_prot_flags,bool should_pad_segments,bool should_use_16kib_app_compat)1070 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1071                                      ElfW(Addr) load_bias, int extra_prot_flags,
1072                                      bool should_pad_segments, bool should_use_16kib_app_compat) {
1073   for (size_t i = 0; i < phdr_count; ++i) {
1074     const ElfW(Phdr)* phdr = &phdr_table[i];
1075 
1076     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
1077       continue;
1078     }
1079 
1080     ElfW(Addr) p_memsz = phdr->p_memsz;
1081     ElfW(Addr) p_filesz = phdr->p_filesz;
1082     _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments,
1083                              should_use_16kib_app_compat);
1084 
1085     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
1086     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1087 
1088     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
1089     if ((prot & PROT_WRITE) != 0) {
1090       // make sure we're never simultaneously writable / executable
1091       prot &= ~PROT_EXEC;
1092     }
1093 #if defined(__aarch64__)
1094     if ((prot & PROT_EXEC) == 0) {
1095       // Though it is not specified don't add PROT_BTI if segment is not
1096       // executable.
1097       prot &= ~PROT_BTI;
1098     }
1099 #endif
1100 
1101     int ret =
1102         mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
1103     if (ret < 0) {
1104       return -1;
1105     }
1106   }
1107   return 0;
1108 }
1109 
1110 /* Restore the original protection modes for all loadable segments.
1111  * You should only call this after phdr_table_unprotect_segments and
1112  * applying all relocations.
1113  *
1114  * AArch64: also called from linker_main and ElfReader::Load to apply
1115  *     PROT_BTI for loaded main so and other so-s.
1116  *
1117  * Input:
1118  *   phdr_table  -> program header table
1119  *   phdr_count  -> number of entries in tables
1120  *   load_bias   -> load bias
1121  *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
1122  *   should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
1123  *   prop        -> GnuPropertySection or nullptr
1124  * Return:
1125  *   0 on success, -1 on failure (error code in errno).
1126  */
phdr_table_protect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments,bool should_use_16kib_app_compat,const GnuPropertySection * prop __unused)1127 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1128                                 ElfW(Addr) load_bias, bool should_pad_segments,
1129                                 bool should_use_16kib_app_compat,
1130                                 const GnuPropertySection* prop __unused) {
1131   int prot = 0;
1132 #if defined(__aarch64__)
1133   if ((prop != nullptr) && prop->IsBTICompatible()) {
1134     prot |= PROT_BTI;
1135   }
1136 #endif
1137   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments,
1138                                    should_use_16kib_app_compat);
1139 }
1140 
segment_needs_memtag_globals_remapping(const ElfW (Phdr)* phdr)1141 static bool segment_needs_memtag_globals_remapping(const ElfW(Phdr) * phdr) {
1142   // For now, MTE globals is only supported on writeable data segments.
1143   return phdr->p_type == PT_LOAD && !(phdr->p_flags & PF_X) && (phdr->p_flags & PF_W);
1144 }
1145 
1146 /* When MTE globals are requested by the binary, and when the hardware supports
1147  * it, remap the executable's PT_LOAD data pages to have PROT_MTE.
1148  *
1149  * Returns 0 on success, -1 on failure (error code in errno).
1150  */
remap_memtag_globals_segments(const ElfW (Phdr)* phdr_table __unused,size_t phdr_count __unused,ElfW (Addr)load_bias __unused)1151 int remap_memtag_globals_segments(const ElfW(Phdr) * phdr_table __unused,
1152                                   size_t phdr_count __unused, ElfW(Addr) load_bias __unused) {
1153 #if defined(__aarch64__)
1154   for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
1155     if (!segment_needs_memtag_globals_remapping(phdr)) {
1156       continue;
1157     }
1158 
1159     uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1160     uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1161     size_t seg_page_aligned_size = seg_page_end - seg_page_start;
1162 
1163     int prot = PFLAGS_TO_PROT(phdr->p_flags);
1164     // For anonymous private mappings, it may be possible to simply mprotect()
1165     // the PROT_MTE flag over the top. For file-based mappings, this will fail,
1166     // and we'll need to fall back. We also allow PROT_WRITE here to allow
1167     // writing memory tags (in `soinfo::tag_globals()`), and set these sections
1168     // back to read-only after tags are applied (similar to RELRO).
1169     prot |= PROT_MTE;
1170     if (mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size,
1171                  prot | PROT_WRITE) == 0) {
1172       continue;
1173     }
1174 
1175     void* mapping_copy = mmap(nullptr, seg_page_aligned_size, PROT_READ | PROT_WRITE,
1176                               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1177     linker_memcpy(mapping_copy, reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size);
1178 
1179     void* seg_addr = mmap(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size,
1180                           prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1181     if (seg_addr == MAP_FAILED) return -1;
1182 
1183     linker_memcpy(seg_addr, mapping_copy, seg_page_aligned_size);
1184     munmap(mapping_copy, seg_page_aligned_size);
1185   }
1186 #endif  // defined(__aarch64__)
1187   return 0;
1188 }
1189 
protect_memtag_globals_ro_segments(const ElfW (Phdr)* phdr_table __unused,size_t phdr_count __unused,ElfW (Addr)load_bias __unused)1190 void protect_memtag_globals_ro_segments(const ElfW(Phdr) * phdr_table __unused,
1191                                         size_t phdr_count __unused, ElfW(Addr) load_bias __unused) {
1192 #if defined(__aarch64__)
1193   for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
1194     int prot = PFLAGS_TO_PROT(phdr->p_flags);
1195     if (!segment_needs_memtag_globals_remapping(phdr) || (prot & PROT_WRITE)) {
1196       continue;
1197     }
1198 
1199     prot |= PROT_MTE;
1200 
1201     uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1202     uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1203     size_t seg_page_aligned_size = seg_page_end - seg_page_start;
1204     mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_aligned_size, prot);
1205   }
1206 #endif  // defined(__aarch64__)
1207 }
1208 
name_memtag_globals_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,const char * soname,std::list<std::string> * vma_names)1209 void name_memtag_globals_segments(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1210                                   ElfW(Addr) load_bias, const char* soname,
1211                                   std::list<std::string>* vma_names) {
1212   for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_table + phdr_count; phdr++) {
1213     if (!segment_needs_memtag_globals_remapping(phdr)) {
1214       continue;
1215     }
1216 
1217     uintptr_t seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1218     uintptr_t seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1219     size_t seg_page_aligned_size = seg_page_end - seg_page_start;
1220 
1221     // For file-based mappings that we're now forcing to be anonymous mappings, set the VMA name to
1222     // make debugging easier.
1223     // Once we are targeting only devices that run kernel 5.10 or newer (and thus include
1224     // https://android-review.git.corp.google.com/c/kernel/common/+/1934723 which causes the
1225     // VMA_ANON_NAME to be copied into the kernel), we can get rid of the storage here.
1226     // For now, that is not the case:
1227     // https://source.android.com/docs/core/architecture/kernel/android-common#compatibility-matrix
1228     constexpr int kVmaNameLimit = 80;
1229     std::string& vma_name = vma_names->emplace_back(kVmaNameLimit, '\0');
1230     int full_vma_length =
1231         async_safe_format_buffer(vma_name.data(), kVmaNameLimit, "mt:%s+%" PRIxPTR, soname,
1232                                  page_start(phdr->p_vaddr)) +
1233         /* include the null terminator */ 1;
1234     // There's an upper limit of 80 characters, including the null terminator, in the anonymous VMA
1235     // name. If we run over that limit, we end up truncating the segment offset and parts of the
1236     // DSO's name, starting on the right hand side of the basename. Because the basename is the most
1237     // important thing, chop off the soname from the left hand side first.
1238     //
1239     // Example (with '#' as the null terminator):
1240     //   - "mt:/data/nativetest64/bionic-unit-tests/bionic-loader-test-libs/libdlext_test.so+e000#"
1241     //     is a `full_vma_length` == 86.
1242     //
1243     // We need to left-truncate (86 - 80) 6 characters from the soname, plus the
1244     // `vma_truncation_prefix`, so 9 characters total.
1245     if (full_vma_length > kVmaNameLimit) {
1246       const char vma_truncation_prefix[] = "...";
1247       int soname_truncated_bytes =
1248           full_vma_length - kVmaNameLimit + sizeof(vma_truncation_prefix) - 1;
1249       async_safe_format_buffer(vma_name.data(), kVmaNameLimit, "mt:%s%s+%" PRIxPTR,
1250                                vma_truncation_prefix, soname + soname_truncated_bytes,
1251                                page_start(phdr->p_vaddr));
1252     }
1253     if (prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(seg_page_start),
1254               seg_page_aligned_size, vma_name.data()) != 0) {
1255       DL_WARN("Failed to rename memtag global segment: %m");
1256     }
1257   }
1258 }
1259 
1260 /* Change the protection of all loaded segments in memory to writable.
1261  * This is useful before performing relocations. Once completed, you
1262  * will have to call phdr_table_protect_segments to restore the original
1263  * protection flags on all segments.
1264  *
1265  * Note that some writable segments can also have their content turned
1266  * to read-only by calling phdr_table_protect_gnu_relro. This is no
1267  * performed here.
1268  *
1269  * Input:
1270  *   phdr_table  -> program header table
1271  *   phdr_count  -> number of entries in tables
1272  *   load_bias   -> load bias
1273  *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
1274  *   should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
1275  * Return:
1276  *   0 on success, -1 on failure (error code in errno).
1277  */
phdr_table_unprotect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments,bool should_use_16kib_app_compat)1278 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1279                                   ElfW(Addr) load_bias, bool should_pad_segments,
1280                                   bool should_use_16kib_app_compat) {
1281   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
1282                                    should_pad_segments, should_use_16kib_app_compat);
1283 }
1284 
_extend_gnu_relro_prot_end(const ElfW (Phdr)* relro_phdr,const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)* seg_page_end,bool should_pad_segments,bool should_use_16kib_app_compat)1285 static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1286                                               const ElfW(Phdr)* phdr_table, size_t phdr_count,
1287                                               ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
1288                                               bool should_pad_segments,
1289                                               bool should_use_16kib_app_compat) {
1290   // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1291   for (size_t index = 0; index < phdr_count; ++index) {
1292     const ElfW(Phdr)* phdr = &phdr_table[index];
1293 
1294     if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1295       // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1296       // LOAD segment mem size, we need to protect only a partial region of the
1297       // LOAD segment and therefore cannot avoid a VMA split.
1298       //
1299       // Note: Don't check the page-aligned mem sizes since the extended protection
1300       // may incorrectly write protect non-relocation data.
1301       //
1302       // Example:
1303       //
1304       //               |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1305       //       ----------------------------------------------------------------
1306       //               |            |        |             |        |
1307       //        SEG X  |     RO     |   RO   |     RW      |        |   SEG Y
1308       //               |            |        |             |        |
1309       //       ----------------------------------------------------------------
1310       //                            |        |             |
1311       //                            |        |             |
1312       //                            |        |             |
1313       //                    relro_vaddr   relro_vaddr   relro_vaddr
1314       //                    (load_vaddr)       +            +
1315       //                                  relro_memsz   load_memsz
1316       //
1317       //       ----------------------------------------------------------------
1318       //               |         PAGE        |         PAGE         |
1319       //       ----------------------------------------------------------------
1320       //                                     |       Potential      |
1321       //                                     |----- Extended RO ----|
1322       //                                     |      Protection      |
1323       //
1324       // If the check below uses  page aligned mem sizes it will cause incorrect write
1325       // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1326       if (relro_phdr->p_memsz < phdr->p_memsz) {
1327         return;
1328       }
1329 
1330       ElfW(Addr) p_memsz = phdr->p_memsz;
1331       ElfW(Addr) p_filesz = phdr->p_filesz;
1332 
1333       // Attempt extending the VMA (mprotect range). Without extending the range,
1334       // mprotect will only RO protect a part of the extended RW LOAD segment, which
1335       // will leave an extra split RW VMA (the gap).
1336       _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
1337                                should_pad_segments, should_use_16kib_app_compat);
1338 
1339       *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1340       return;
1341     }
1342   }
1343 }
1344 
1345 /* Used internally by phdr_table_protect_gnu_relro and
1346  * phdr_table_unprotect_gnu_relro.
1347  */
_phdr_table_set_gnu_relro_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int prot_flags,bool should_pad_segments,bool should_use_16kib_app_compat)1348 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1349                                           ElfW(Addr) load_bias, int prot_flags,
1350                                           bool should_pad_segments,
1351                                           bool should_use_16kib_app_compat) {
1352   const ElfW(Phdr)* phdr = phdr_table;
1353   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1354 
1355   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1356     if (phdr->p_type != PT_GNU_RELRO) {
1357       continue;
1358     }
1359 
1360     // Tricky: what happens when the relro segment does not start
1361     // or end at page boundaries? We're going to be over-protective
1362     // here and put every page touched by the segment as read-only.
1363 
1364     // This seems to match Ian Lance Taylor's description of the
1365     // feature at http://www.airs.com/blog/archives/189.
1366 
1367     //    Extract:
1368     //       Note that the current dynamic linker code will only work
1369     //       correctly if the PT_GNU_RELRO segment starts on a page
1370     //       boundary. This is because the dynamic linker rounds the
1371     //       p_vaddr field down to the previous page boundary. If
1372     //       there is anything on the page which should not be read-only,
1373     //       the program is likely to fail at runtime. So in effect the
1374     //       linker must only emit a PT_GNU_RELRO segment if it ensures
1375     //       that it starts on a page boundary.
1376     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1377     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1378     _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
1379                                should_pad_segments, should_use_16kib_app_compat);
1380 
1381     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
1382                        seg_page_end - seg_page_start,
1383                        prot_flags);
1384     if (ret < 0) {
1385       return -1;
1386     }
1387   }
1388   return 0;
1389 }
1390 
1391 /* Apply GNU relro protection if specified by the program header. This will
1392  * turn some of the pages of a writable PT_LOAD segment to read-only, as
1393  * specified by one or more PT_GNU_RELRO segments. This must be always
1394  * performed after relocations.
1395  *
1396  * The areas typically covered are .got and .data.rel.ro, these are
1397  * read-only from the program's POV, but contain absolute addresses
1398  * that need to be relocated before use.
1399  *
1400  * Input:
1401  *   phdr_table  -> program header table
1402  *   phdr_count  -> number of entries in tables
1403  *   load_bias   -> load bias
1404  *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
1405  *   should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
1406  * Return:
1407  *   0 on success, -1 on failure (error code in errno).
1408  */
phdr_table_protect_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments,bool should_use_16kib_app_compat)1409 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1410                                  ElfW(Addr) load_bias, bool should_pad_segments,
1411                                  bool should_use_16kib_app_compat) {
1412   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
1413                                         should_pad_segments, should_use_16kib_app_compat);
1414 }
1415 
1416 /*
1417  * Apply RX protection to the compat relro region of the ELF being loaded in
1418  * 16KiB compat mode.
1419  *
1420  * Input:
1421  *   start  -> start address of the compat relro region.
1422  *   size   -> size of the compat relro region in bytes.
1423  * Return:
1424  *   0 on success, -1 on failure (error code in errno).
1425  */
phdr_table_protect_gnu_relro_16kib_compat(ElfW (Addr)start,ElfW (Addr)size)1426 int phdr_table_protect_gnu_relro_16kib_compat(ElfW(Addr) start, ElfW(Addr) size) {
1427   return mprotect(reinterpret_cast<void*>(start), size, PROT_READ | PROT_EXEC);
1428 }
1429 
1430 /* Serialize the GNU relro segments to the given file descriptor. This can be
1431  * performed after relocations to allow another process to later share the
1432  * relocated segment, if it was loaded at the same address.
1433  *
1434  * Input:
1435  *   phdr_table  -> program header table
1436  *   phdr_count  -> number of entries in tables
1437  *   load_bias   -> load bias
1438  *   fd          -> writable file descriptor to use
1439  *   file_offset -> pointer to offset into file descriptor to use/update
1440  * Return:
1441  *   0 on success, -1 on failure (error code in errno).
1442  */
phdr_table_serialize_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)1443 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1444                                    size_t phdr_count,
1445                                    ElfW(Addr) load_bias,
1446                                    int fd,
1447                                    size_t* file_offset) {
1448   const ElfW(Phdr)* phdr = phdr_table;
1449   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1450 
1451   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1452     if (phdr->p_type != PT_GNU_RELRO) {
1453       continue;
1454     }
1455 
1456     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1457     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1458     ssize_t size = seg_page_end - seg_page_start;
1459 
1460     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1461     if (written != size) {
1462       return -1;
1463     }
1464     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
1465                      MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
1466     if (map == MAP_FAILED) {
1467       return -1;
1468     }
1469     *file_offset += size;
1470   }
1471   return 0;
1472 }
1473 
1474 /* Where possible, replace the GNU relro segments with mappings of the given
1475  * file descriptor. This can be performed after relocations to allow a file
1476  * previously created by phdr_table_serialize_gnu_relro in another process to
1477  * replace the dirty relocated pages, saving memory, if it was loaded at the
1478  * same address. We have to compare the data before we map over it, since some
1479  * parts of the relro segment may not be identical due to other libraries in
1480  * the process being loaded at different addresses.
1481  *
1482  * Input:
1483  *   phdr_table  -> program header table
1484  *   phdr_count  -> number of entries in tables
1485  *   load_bias   -> load bias
1486  *   fd          -> readable file descriptor to use
1487  *   file_offset -> pointer to offset into file descriptor to use/update
1488  * Return:
1489  *   0 on success, -1 on failure (error code in errno).
1490  */
phdr_table_map_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)1491 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1492                              size_t phdr_count,
1493                              ElfW(Addr) load_bias,
1494                              int fd,
1495                              size_t* file_offset) {
1496   // Map the file at a temporary location so we can compare its contents.
1497   struct stat file_stat;
1498   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1499     return -1;
1500   }
1501   off_t file_size = file_stat.st_size;
1502   void* temp_mapping = nullptr;
1503   if (file_size > 0) {
1504     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
1505     if (temp_mapping == MAP_FAILED) {
1506       return -1;
1507     }
1508   }
1509 
1510   // Iterate over the relro segments and compare/remap the pages.
1511   const ElfW(Phdr)* phdr = phdr_table;
1512   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1513 
1514   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1515     if (phdr->p_type != PT_GNU_RELRO) {
1516       continue;
1517     }
1518 
1519     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1520     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1521 
1522     char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
1523     char* mem_base = reinterpret_cast<char*>(seg_page_start);
1524     size_t match_offset = 0;
1525     size_t size = seg_page_end - seg_page_start;
1526 
1527     if (file_size - *file_offset < size) {
1528       // File is too short to compare to this segment. The contents are likely
1529       // different as well (it's probably for a different library version) so
1530       // just don't bother checking.
1531       break;
1532     }
1533 
1534     while (match_offset < size) {
1535       // Skip over dissimilar pages.
1536       while (match_offset < size &&
1537              memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1538         match_offset += page_size();
1539       }
1540 
1541       // Count similar pages.
1542       size_t mismatch_offset = match_offset;
1543       while (mismatch_offset < size &&
1544              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1545         mismatch_offset += page_size();
1546       }
1547 
1548       // Map over similar pages.
1549       if (mismatch_offset > match_offset) {
1550         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
1551                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
1552         if (map == MAP_FAILED) {
1553           munmap(temp_mapping, file_size);
1554           return -1;
1555         }
1556       }
1557 
1558       match_offset = mismatch_offset;
1559     }
1560 
1561     // Add to the base file offset in case there are multiple relro segments.
1562     *file_offset += size;
1563   }
1564   munmap(temp_mapping, file_size);
1565   return 0;
1566 }
1567 
1568 
1569 #if defined(__arm__)
1570 /* Return the address and size of the .ARM.exidx section in memory,
1571  * if present.
1572  *
1573  * Input:
1574  *   phdr_table  -> program header table
1575  *   phdr_count  -> number of entries in tables
1576  *   load_bias   -> load bias
1577  * Output:
1578  *   arm_exidx       -> address of table in memory (null on failure).
1579  *   arm_exidx_count -> number of items in table (0 on failure).
1580  * Return:
1581  *   0 on success, -1 on failure (_no_ error code in errno)
1582  */
phdr_table_get_arm_exidx(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)** arm_exidx,size_t * arm_exidx_count)1583 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1584                              ElfW(Addr) load_bias,
1585                              ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
1586   const ElfW(Phdr)* phdr = phdr_table;
1587   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1588 
1589   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1590     if (phdr->p_type != PT_ARM_EXIDX) {
1591       continue;
1592     }
1593 
1594     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
1595     *arm_exidx_count = phdr->p_memsz / 8;
1596     return 0;
1597   }
1598   *arm_exidx = nullptr;
1599   *arm_exidx_count = 0;
1600   return -1;
1601 }
1602 #endif
1603 
1604 /* Return the address and size of the ELF file's .dynamic section in memory,
1605  * or null if missing.
1606  *
1607  * Input:
1608  *   phdr_table  -> program header table
1609  *   phdr_count  -> number of entries in tables
1610  *   load_bias   -> load bias
1611  * Output:
1612  *   dynamic       -> address of table in memory (null on failure).
1613  *   dynamic_flags -> protection flags for section (unset on failure)
1614  * Return:
1615  *   void
1616  */
phdr_table_get_dynamic_section(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Dyn)** dynamic,ElfW (Word)* dynamic_flags)1617 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1618                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1619                                     ElfW(Word)* dynamic_flags) {
1620   *dynamic = nullptr;
1621   for (size_t i = 0; i<phdr_count; ++i) {
1622     const ElfW(Phdr)& phdr = phdr_table[i];
1623     if (phdr.p_type == PT_DYNAMIC) {
1624       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
1625       if (dynamic_flags) {
1626         *dynamic_flags = phdr.p_flags;
1627       }
1628       return;
1629     }
1630   }
1631 }
1632 
1633 /* Return the program interpreter string, or nullptr if missing.
1634  *
1635  * Input:
1636  *   phdr_table  -> program header table
1637  *   phdr_count  -> number of entries in tables
1638  *   load_bias   -> load bias
1639  * Return:
1640  *   pointer to the program interpreter string.
1641  */
phdr_table_get_interpreter_name(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)1642 const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1643                                             ElfW(Addr) load_bias) {
1644   for (size_t i = 0; i<phdr_count; ++i) {
1645     const ElfW(Phdr)& phdr = phdr_table[i];
1646     if (phdr.p_type == PT_INTERP) {
1647       return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1648     }
1649   }
1650   return nullptr;
1651 }
1652 
1653 // Sets loaded_phdr_ to the address of the program header table as it appears
1654 // in the loaded segments in memory. This is in contrast with phdr_table_,
1655 // which is temporary and will be released before the library is relocated.
FindPhdr()1656 bool ElfReader::FindPhdr() {
1657   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1658 
1659   // If there is a PT_PHDR, use it directly.
1660   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1661     if (phdr->p_type == PT_PHDR) {
1662       return CheckPhdr(load_bias_ + phdr->p_vaddr);
1663     }
1664   }
1665 
1666   // Otherwise, check the first loadable segment. If its file offset
1667   // is 0, it starts with the ELF header, and we can trivially find the
1668   // loaded program header from it.
1669   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1670     if (phdr->p_type == PT_LOAD) {
1671       if (phdr->p_offset == 0) {
1672         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
1673         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
1674         ElfW(Addr)  offset = ehdr->e_phoff;
1675         return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
1676       }
1677       break;
1678     }
1679   }
1680 
1681   DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
1682   return false;
1683 }
1684 
1685 // Tries to find .note.gnu.property section.
1686 // It is not considered an error if such section is missing.
FindGnuPropertySection()1687 bool ElfReader::FindGnuPropertySection() {
1688 #if defined(__aarch64__)
1689   note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1690 #endif
1691   return true;
1692 }
1693 
1694 // Ensures that our program header is actually within a loadable
1695 // segment. This should help catch badly-formed ELF files that
1696 // would cause the linker to crash later when trying to access it.
CheckPhdr(ElfW (Addr)loaded)1697 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1698   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1699   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
1700   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1701     if (phdr->p_type != PT_LOAD) {
1702       continue;
1703     }
1704     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1705     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
1706     if (seg_start <= loaded && loaded_end <= seg_end) {
1707       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
1708       return true;
1709     }
1710   }
1711   DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1712          name_.c_str(), reinterpret_cast<void*>(loaded));
1713   return false;
1714 }
1715