• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/prctl.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <unistd.h>
38 
39 #include "linker.h"
40 #include "linker_dlwarning.h"
41 #include "linker_globals.h"
42 #include "linker_debug.h"
43 #include "linker_utils.h"
44 
45 #include "private/bionic_asm_note.h"
46 #include "private/CFIShadow.h" // For kLibraryAlignment
47 #include "private/elf_note.h"
48 
49 #include <android-base/file.h>
50 
GetTargetElfMachine()51 static int GetTargetElfMachine() {
52 #if defined(__arm__)
53   return EM_ARM;
54 #elif defined(__aarch64__)
55   return EM_AARCH64;
56 #elif defined(__i386__)
57   return EM_386;
58 #elif defined(__riscv)
59   return EM_RISCV;
60 #elif defined(__x86_64__)
61   return EM_X86_64;
62 #endif
63 }
64 
65 /**
66   TECHNICAL NOTE ON ELF LOADING.
67 
68   An ELF file's program header table contains one or more PT_LOAD
69   segments, which corresponds to portions of the file that need to
70   be mapped into the process' address space.
71 
72   Each loadable segment has the following important properties:
73 
74     p_offset  -> segment file offset
75     p_filesz  -> segment file size
76     p_memsz   -> segment memory size (always >= p_filesz)
77     p_vaddr   -> segment's virtual address
78     p_flags   -> segment flags (e.g. readable, writable, executable)
79     p_align   -> segment's in-memory and in-file alignment
80 
81   We will ignore the p_paddr field of ElfW(Phdr) for now.
82 
83   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
84   ranges of virtual addresses. A few rules apply:
85 
86   - the virtual address ranges should not overlap.
87 
88   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
89     between them should always be initialized to 0.
90 
91   - ranges do not necessarily start or end at page boundaries. Two distinct
92     segments can have their start and end on the same page. In this case, the
93     page inherits the mapping flags of the latter segment.
94 
95   Finally, the real load addrs of each segment is not p_vaddr. Instead the
96   loader decides where to load the first segment, then will load all others
97   relative to the first one to respect the initial range layout.
98 
99   For example, consider the following list:
100 
101     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
102     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
103 
104   This corresponds to two segments that cover these virtual address ranges:
105 
106        0x30000...0x34000
107        0x40000...0x48000
108 
109   If the loader decides to load the first segment at address 0xa0000000
110   then the segments' load address ranges will be:
111 
112        0xa0030000...0xa0034000
113        0xa0040000...0xa0048000
114 
115   In other words, all segments must be loaded at an address that has the same
116   constant offset from their p_vaddr value. This offset is computed as the
117   difference between the first segment's load address, and its p_vaddr value.
118 
119   However, in practice, segments do _not_ start at page boundaries. Since we
120   can only memory-map at page boundaries, this means that the bias is
121   computed as:
122 
123        load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
124 
125   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
126           possible wrap around UINT32_MAX for possible large p_vaddr values).
127 
128   And that the phdr0_load_address must start at a page boundary, with
129   the segment's real content starting at:
130 
131        phdr0_load_address + page_offset(phdr0->p_vaddr)
132 
133   Note that ELF requires the following condition to make the mmap()-ing work:
134 
135       page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
136 
137   The load_bias must be added to any p_vaddr value read from the ELF file to
138   determine the corresponding memory address.
139 
140  **/
141 
142 #define MAYBE_MAP_FLAG(x, from, to)  (((x) & (from)) ? (to) : 0)
143 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
144                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
145                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
146 
147 static const size_t kPageSize = page_size();
148 
149 /*
150  * Generic PMD size calculation:
151  *    - Each page table (PT) is of size 1 page.
152  *    - Each page table entry (PTE) is of size 64 bits.
153  *    - Each PTE locates one physical page frame (PFN) of size 1 page.
154  *    - A PMD entry locates 1 page table (PT)
155  *
156  *   PMD size = Num entries in a PT * page_size
157  */
158 static const size_t kPmdSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
159 
ElfReader()160 ElfReader::ElfReader()
161     : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
162       phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
163       strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
164       mapped_by_caller_(false) {
165 }
166 
Read(const char * name,int fd,off64_t file_offset,off64_t file_size)167 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
168   if (did_read_) {
169     return true;
170   }
171   name_ = name;
172   fd_ = fd;
173   file_offset_ = file_offset;
174   file_size_ = file_size;
175 
176   if (ReadElfHeader() &&
177       VerifyElfHeader() &&
178       ReadProgramHeaders() &&
179       ReadSectionHeaders() &&
180       ReadDynamicSection() &&
181       ReadPadSegmentNote()) {
182     did_read_ = true;
183   }
184 
185   return did_read_;
186 }
187 
Load(address_space_params * address_space)188 bool ElfReader::Load(address_space_params* address_space) {
189   CHECK(did_read_);
190   if (did_load_) {
191     return true;
192   }
193   bool reserveSuccess = ReserveAddressSpace(address_space);
194   if (reserveSuccess && LoadSegments() && FindPhdr() &&
195       FindGnuPropertySection()) {
196     did_load_ = true;
197 #if defined(__aarch64__)
198     // For Armv8.5-A loaded executable segments may require PROT_BTI.
199     if (note_gnu_property_.IsBTICompatible()) {
200       did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
201                                                should_pad_segments_, &note_gnu_property_) == 0);
202     }
203 #endif
204   }
205   if (reserveSuccess && !did_load_) {
206     if (load_start_ != nullptr && load_size_ != 0) {
207       if (!mapped_by_caller_) {
208         munmap(load_start_, load_size_);
209       }
210     }
211   }
212 
213   return did_load_;
214 }
215 
get_string(ElfW (Word)index) const216 const char* ElfReader::get_string(ElfW(Word) index) const {
217   CHECK(strtab_ != nullptr);
218   CHECK(index < strtab_size_);
219 
220   return strtab_ + index;
221 }
222 
ReadElfHeader()223 bool ElfReader::ReadElfHeader() {
224   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
225   if (rc < 0) {
226     DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
227     return false;
228   }
229 
230   if (rc != sizeof(header_)) {
231     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
232            static_cast<size_t>(rc));
233     return false;
234   }
235   return true;
236 }
237 
EM_to_string(int em)238 static const char* EM_to_string(int em) {
239   if (em == EM_386) return "EM_386";
240   if (em == EM_AARCH64) return "EM_AARCH64";
241   if (em == EM_ARM) return "EM_ARM";
242   if (em == EM_RISCV) return "EM_RISCV";
243   if (em == EM_X86_64) return "EM_X86_64";
244   return "EM_???";
245 }
246 
VerifyElfHeader()247 bool ElfReader::VerifyElfHeader() {
248   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
249     DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
250            header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
251     return false;
252   }
253 
254   // Try to give a clear diagnostic for ELF class mismatches, since they're
255   // an easy mistake to make during the 32-bit/64-bit transition period.
256   int elf_class = header_.e_ident[EI_CLASS];
257 #if defined(__LP64__)
258   if (elf_class != ELFCLASS64) {
259     if (elf_class == ELFCLASS32) {
260       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
261     } else {
262       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
263     }
264     return false;
265   }
266 #else
267   if (elf_class != ELFCLASS32) {
268     if (elf_class == ELFCLASS64) {
269       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
270     } else {
271       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
272     }
273     return false;
274   }
275 #endif
276 
277   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
278     DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
279     return false;
280   }
281 
282   if (header_.e_type != ET_DYN) {
283     DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
284     return false;
285   }
286 
287   if (header_.e_version != EV_CURRENT) {
288     DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
289     return false;
290   }
291 
292   if (header_.e_machine != GetTargetElfMachine()) {
293     DL_ERR("\"%s\" is for %s (%d) instead of %s (%d)",
294            name_.c_str(),
295            EM_to_string(header_.e_machine), header_.e_machine,
296            EM_to_string(GetTargetElfMachine()), GetTargetElfMachine());
297     return false;
298   }
299 
300   if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
301     if (get_application_target_sdk_version() >= 26) {
302       DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
303                      name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
304       return false;
305     }
306     DL_WARN_documented_change(26,
307                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
308                               "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
309                               name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
310     add_dlwarning(name_.c_str(), "has invalid ELF header");
311   }
312 
313   if (header_.e_shstrndx == 0) {
314     if (get_application_target_sdk_version() >= 26) {
315       DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
316       return false;
317     }
318     DL_WARN_documented_change(26,
319                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
320                               "\"%s\" has invalid e_shstrndx", name_.c_str());
321     add_dlwarning(name_.c_str(), "has invalid ELF header");
322   }
323 
324   return true;
325 }
326 
CheckFileRange(ElfW (Addr)offset,size_t size,size_t alignment)327 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
328   off64_t range_start;
329   off64_t range_end;
330 
331   // Only header can be located at the 0 offset... This function called to
332   // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
333   // at offset 0.
334 
335   return offset > 0 &&
336          safe_add(&range_start, file_offset_, offset) &&
337          safe_add(&range_end, range_start, size) &&
338          (range_start < file_size_) &&
339          (range_end <= file_size_) &&
340          ((offset % alignment) == 0);
341 }
342 
343 // Loads the program header table from an ELF file into a read-only private
344 // anonymous mmap-ed block.
ReadProgramHeaders()345 bool ElfReader::ReadProgramHeaders() {
346   phdr_num_ = header_.e_phnum;
347 
348   // Like the kernel, we only accept program header tables that
349   // are smaller than 64KiB.
350   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
351     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
352     return false;
353   }
354 
355   // Boundary checks
356   size_t size = phdr_num_ * sizeof(ElfW(Phdr));
357   if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
358     DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
359                    name_.c_str(),
360                    static_cast<size_t>(header_.e_phoff),
361                    size);
362     return false;
363   }
364 
365   if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
366     DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
367     return false;
368   }
369 
370   phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
371   return true;
372 }
373 
ReadSectionHeaders()374 bool ElfReader::ReadSectionHeaders() {
375   shdr_num_ = header_.e_shnum;
376 
377   if (shdr_num_ == 0) {
378     DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
379     return false;
380   }
381 
382   size_t size = shdr_num_ * sizeof(ElfW(Shdr));
383   if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
384     DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
385                    name_.c_str(),
386                    static_cast<size_t>(header_.e_shoff),
387                    size);
388     return false;
389   }
390 
391   if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
392     DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
393     return false;
394   }
395 
396   shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
397   return true;
398 }
399 
ReadDynamicSection()400 bool ElfReader::ReadDynamicSection() {
401   // 1. Find .dynamic section (in section headers)
402   const ElfW(Shdr)* dynamic_shdr = nullptr;
403   for (size_t i = 0; i < shdr_num_; ++i) {
404     if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
405       dynamic_shdr = &shdr_table_ [i];
406       break;
407     }
408   }
409 
410   if (dynamic_shdr == nullptr) {
411     DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
412     return false;
413   }
414 
415   // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
416   size_t pt_dynamic_offset = 0;
417   size_t pt_dynamic_filesz = 0;
418   for (size_t i = 0; i < phdr_num_; ++i) {
419     const ElfW(Phdr)* phdr = &phdr_table_[i];
420     if (phdr->p_type == PT_DYNAMIC) {
421       pt_dynamic_offset = phdr->p_offset;
422       pt_dynamic_filesz = phdr->p_filesz;
423     }
424   }
425 
426   if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
427     if (get_application_target_sdk_version() >= 26) {
428       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
429                      "expected to match PT_DYNAMIC offset: 0x%zx",
430                      name_.c_str(),
431                      static_cast<size_t>(dynamic_shdr->sh_offset),
432                      pt_dynamic_offset);
433       return false;
434     }
435     DL_WARN_documented_change(26,
436                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
437                               "\"%s\" .dynamic section has invalid offset: 0x%zx "
438                               "(expected to match PT_DYNAMIC offset 0x%zx)",
439                               name_.c_str(),
440                               static_cast<size_t>(dynamic_shdr->sh_offset),
441                               pt_dynamic_offset);
442     add_dlwarning(name_.c_str(), "invalid .dynamic section");
443   }
444 
445   if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
446     if (get_application_target_sdk_version() >= 26) {
447       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
448                      "expected to match PT_DYNAMIC filesz: 0x%zx",
449                      name_.c_str(),
450                      static_cast<size_t>(dynamic_shdr->sh_size),
451                      pt_dynamic_filesz);
452       return false;
453     }
454     DL_WARN_documented_change(26,
455                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
456                               "\"%s\" .dynamic section has invalid size: 0x%zx "
457                               "(expected to match PT_DYNAMIC filesz 0x%zx)",
458                               name_.c_str(),
459                               static_cast<size_t>(dynamic_shdr->sh_size),
460                               pt_dynamic_filesz);
461     add_dlwarning(name_.c_str(), "invalid .dynamic section");
462   }
463 
464   if (dynamic_shdr->sh_link >= shdr_num_) {
465     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
466                    name_.c_str(),
467                    dynamic_shdr->sh_link);
468     return false;
469   }
470 
471   const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
472 
473   if (strtab_shdr->sh_type != SHT_STRTAB) {
474     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
475                    name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
476     return false;
477   }
478 
479   if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
480     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
481     return false;
482   }
483 
484   if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
485     DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
486     return false;
487   }
488 
489   dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
490 
491   if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
492     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
493                    name_.c_str());
494     return false;
495   }
496 
497   if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
498     DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
499     return false;
500   }
501 
502   strtab_ = static_cast<const char*>(strtab_fragment_.data());
503   strtab_size_ = strtab_fragment_.size();
504   return true;
505 }
506 
507 /* Returns the size of the extent of all the possibly non-contiguous
508  * loadable segments in an ELF program header table. This corresponds
509  * to the page-aligned size in bytes that needs to be reserved in the
510  * process' address space. If there are no loadable segments, 0 is
511  * returned.
512  *
513  * If out_min_vaddr or out_max_vaddr are not null, they will be
514  * set to the minimum and maximum addresses of pages to be reserved,
515  * or 0 if there is nothing to load.
516  */
phdr_table_get_load_size(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)* out_min_vaddr,ElfW (Addr)* out_max_vaddr)517 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
518                                 ElfW(Addr)* out_min_vaddr,
519                                 ElfW(Addr)* out_max_vaddr) {
520   ElfW(Addr) min_vaddr = UINTPTR_MAX;
521   ElfW(Addr) max_vaddr = 0;
522 
523   bool found_pt_load = false;
524   for (size_t i = 0; i < phdr_count; ++i) {
525     const ElfW(Phdr)* phdr = &phdr_table[i];
526 
527     if (phdr->p_type != PT_LOAD) {
528       continue;
529     }
530     found_pt_load = true;
531 
532     if (phdr->p_vaddr < min_vaddr) {
533       min_vaddr = phdr->p_vaddr;
534     }
535 
536     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
537       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
538     }
539   }
540   if (!found_pt_load) {
541     min_vaddr = 0;
542   }
543 
544   min_vaddr = page_start(min_vaddr);
545   max_vaddr = page_end(max_vaddr);
546 
547   if (out_min_vaddr != nullptr) {
548     *out_min_vaddr = min_vaddr;
549   }
550   if (out_max_vaddr != nullptr) {
551     *out_max_vaddr = max_vaddr;
552   }
553   return max_vaddr - min_vaddr;
554 }
555 
556 // Returns the maximum p_align associated with a loadable segment in the ELF
557 // program header table. Used to determine whether the file should be loaded at
558 // a specific virtual address alignment for use with huge pages.
phdr_table_get_maximum_alignment(const ElfW (Phdr)* phdr_table,size_t phdr_count)559 size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
560   size_t maximum_alignment = page_size();
561 
562   for (size_t i = 0; i < phdr_count; ++i) {
563     const ElfW(Phdr)* phdr = &phdr_table[i];
564 
565     // p_align must be 0, 1, or a positive, integral power of two.
566     if (phdr->p_type != PT_LOAD || ((phdr->p_align & (phdr->p_align - 1)) != 0)) {
567       continue;
568     }
569 
570     if (phdr->p_align > maximum_alignment) {
571       maximum_alignment = phdr->p_align;
572     }
573   }
574 
575 #if defined(__LP64__)
576   return maximum_alignment;
577 #else
578   return page_size();
579 #endif
580 }
581 
582 // Reserve a virtual address range such that if it's limits were extended to the next 2**align
583 // boundary, it would not overlap with any existing mappings.
ReserveWithAlignmentPadding(size_t size,size_t mapping_align,size_t start_align,void ** out_gap_start,size_t * out_gap_size)584 static void* ReserveWithAlignmentPadding(size_t size, size_t mapping_align, size_t start_align,
585                                          void** out_gap_start, size_t* out_gap_size) {
586   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
587   // Reserve enough space to properly align the library's start address.
588   mapping_align = std::max(mapping_align, start_align);
589   if (mapping_align == page_size()) {
590     void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
591     if (mmap_ptr == MAP_FAILED) {
592       return nullptr;
593     }
594     return mmap_ptr;
595   }
596 
597   // Minimum alignment of shared library gap. For efficiency, this should match the second level
598   // page size of the platform.
599 #if defined(__LP64__)
600   constexpr size_t kGapAlignment = 1ul << 21;  // 2MB
601 #else
602   constexpr size_t kGapAlignment = 0;
603 #endif
604   // Maximum gap size, in the units of kGapAlignment.
605   constexpr size_t kMaxGapUnits = 32;
606   // Allocate enough space so that the end of the desired region aligned up is still inside the
607   // mapping.
608   size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
609   uint8_t* mmap_ptr =
610       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
611   if (mmap_ptr == MAP_FAILED) {
612     return nullptr;
613   }
614   size_t gap_size = 0;
615   size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
616   size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
617   if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
618     // This library crosses a 2MB boundary and will fragment a new huge page.
619     // Lets take advantage of that and insert a random number of inaccessible huge pages before that
620     // to improve address randomization and make it harder to locate this library code by probing.
621     munmap(mmap_ptr, mmap_size);
622     mapping_align = std::max(mapping_align, kGapAlignment);
623     gap_size =
624         kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
625     mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
626     mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
627     if (mmap_ptr == MAP_FAILED) {
628       return nullptr;
629     }
630   }
631 
632   uint8_t *gap_end, *gap_start;
633   if (gap_size) {
634     gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
635     gap_start = gap_end - gap_size;
636   } else {
637     gap_start = gap_end = mmap_ptr + mmap_size;
638   }
639 
640   uint8_t* first = align_up(mmap_ptr, mapping_align);
641   uint8_t* last = align_down(gap_start, mapping_align) - size;
642 
643   // arc4random* is not available in first stage init because /dev/urandom hasn't yet been
644   // created. Don't randomize then.
645   size_t n = is_first_stage_init() ? 0 : arc4random_uniform((last - first) / start_align + 1);
646   uint8_t* start = first + n * start_align;
647   // Unmap the extra space around the allocation.
648   // Keep it mapped PROT_NONE on 64-bit targets where address space is plentiful to make it harder
649   // to defeat ASLR by probing for readable memory mappings.
650   munmap(mmap_ptr, start - mmap_ptr);
651   munmap(start + size, gap_start - (start + size));
652   if (gap_end != mmap_ptr + mmap_size) {
653     munmap(gap_end, mmap_ptr + mmap_size - gap_end);
654   }
655   *out_gap_start = gap_start;
656   *out_gap_size = gap_size;
657   return start;
658 }
659 
660 // Reserve a virtual address range big enough to hold all loadable
661 // segments of a program header table. This is done by creating a
662 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace(address_space_params * address_space)663 bool ElfReader::ReserveAddressSpace(address_space_params* address_space) {
664   ElfW(Addr) min_vaddr;
665   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
666   if (load_size_ == 0) {
667     DL_ERR("\"%s\" has no loadable segments", name_.c_str());
668     return false;
669   }
670 
671   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
672   void* start;
673 
674   if (load_size_ > address_space->reserved_size) {
675     if (address_space->must_use_address) {
676       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
677              load_size_ - address_space->reserved_size, load_size_, name_.c_str());
678       return false;
679     }
680     size_t start_alignment = page_size();
681     if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
682       size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
683       // Limit alignment to PMD size as other alignments reduce the number of
684       // bits available for ASLR for no benefit.
685       start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
686     }
687     start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
688                                         &gap_size_);
689     if (start == nullptr) {
690       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
691       return false;
692     }
693   } else {
694     start = address_space->start_addr;
695     gap_start_ = nullptr;
696     gap_size_ = 0;
697     mapped_by_caller_ = true;
698 
699     // Update the reserved address space to subtract the space used by this library.
700     address_space->start_addr = reinterpret_cast<uint8_t*>(address_space->start_addr) + load_size_;
701     address_space->reserved_size -= load_size_;
702   }
703 
704   load_start_ = start;
705   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
706   return true;
707 }
708 
709 /*
710  * Returns true if the kernel supports page size migration, else false.
711  */
page_size_migration_supported()712 bool page_size_migration_supported() {
713   static bool pgsize_migration_enabled = []() {
714     std::string enabled;
715     if (!android::base::ReadFileToString("/sys/kernel/mm/pgsize_migration/enabled", &enabled)) {
716       return false;
717     }
718     return enabled.find("1") != std::string::npos;
719   }();
720   return pgsize_migration_enabled;
721 }
722 
723 // Find the ELF note of type NT_ANDROID_TYPE_PAD_SEGMENT and check that the desc value is 1.
ReadPadSegmentNote()724 bool ElfReader::ReadPadSegmentNote() {
725   if (!page_size_migration_supported()) {
726     // Don't attempt to read the note, since segment extension isn't
727     // supported; but return true so that loading can continue normally.
728     return true;
729   }
730 
731   // The ELF can have multiple PT_NOTE's, check them all
732   for (size_t i = 0; i < phdr_num_; ++i) {
733     const ElfW(Phdr)* phdr = &phdr_table_[i];
734 
735     if (phdr->p_type != PT_NOTE) {
736       continue;
737     }
738 
739     // Some obfuscated ELFs may contain "empty" PT_NOTE program headers that don't
740     // point to any part of the ELF (p_memsz == 0). Skip these since there is
741     // nothing to decode. See: b/324468126
742     if (phdr->p_memsz == 0) {
743       continue;
744     }
745 
746     // If the PT_NOTE extends beyond the file. The ELF is doing something
747     // strange -- obfuscation, embedding hidden loaders, ...
748     //
749     // It doesn't contain the pad_segment note. Skip it to avoid SIGBUS
750     // by accesses beyond the file.
751     off64_t note_end_off = file_offset_ + phdr->p_offset + phdr->p_filesz;
752     if (note_end_off > file_size_) {
753       continue;
754     }
755 
756     // note_fragment is scoped to within the loop so that there is
757     // at most 1 PT_NOTE mapped at anytime during this search.
758     MappedFileFragment note_fragment;
759     if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
760       DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
761              name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
762              reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
763       return false;
764     }
765 
766     const ElfW(Nhdr)* note_hdr = nullptr;
767     const char* note_desc = nullptr;
768     if (!__get_elf_note(NT_ANDROID_TYPE_PAD_SEGMENT, "Android",
769                         reinterpret_cast<ElfW(Addr)>(note_fragment.data()),
770                         phdr, &note_hdr, &note_desc)) {
771       continue;
772     }
773 
774     if (note_hdr->n_descsz != sizeof(ElfW(Word))) {
775       DL_ERR("\"%s\" NT_ANDROID_TYPE_PAD_SEGMENT note has unexpected n_descsz: %u",
776              name_.c_str(), reinterpret_cast<unsigned int>(note_hdr->n_descsz));
777       return false;
778     }
779 
780     // 1 == enabled, 0 == disabled
781     should_pad_segments_ = *reinterpret_cast<const ElfW(Word)*>(note_desc) == 1;
782     return true;
783   }
784 
785   return true;
786 }
787 
_extend_load_segment_vma(const ElfW (Phdr)* phdr_table,size_t phdr_count,size_t phdr_idx,ElfW (Addr)* p_memsz,ElfW (Addr)* p_filesz,bool should_pad_segments)788 static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
789                                              size_t phdr_idx, ElfW(Addr)* p_memsz,
790                                              ElfW(Addr)* p_filesz, bool should_pad_segments) {
791   const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
792   const ElfW(Phdr)* next = nullptr;
793   size_t next_idx = phdr_idx + 1;
794 
795   // Don't do segment extension for p_align > 64KiB, such ELFs already existed in the
796   // field e.g. 2MiB p_align for THPs and are relatively small in number.
797   //
798   // The kernel can only represent padding for p_align up to 64KiB. This is because
799   // the kernel uses 4 available bits in the vm_area_struct to represent padding
800   // extent; and so cannot enable mitigations to avoid breaking app compatibility for
801   // p_aligns > 64KiB.
802   //
803   // Don't perform segment extension on these to avoid app compatibility issues.
804   if (phdr->p_align <= kPageSize || phdr->p_align > 64*1024 || !should_pad_segments) {
805     return;
806   }
807 
808   if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
809     next = &phdr_table[next_idx];
810   }
811 
812   // If this is the last LOAD segment, no extension is needed
813   if (!next || *p_memsz != *p_filesz) {
814     return;
815   }
816 
817   ElfW(Addr) next_start = page_start(next->p_vaddr);
818   ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
819 
820   // If adjacent segment mappings overlap, no extension is needed.
821   if (curr_end >= next_start) {
822     return;
823   }
824 
825   // Extend the LOAD segment mapping to be contiguous with that of
826   // the next LOAD segment.
827   ElfW(Addr) extend = next_start - curr_end;
828   *p_memsz += extend;
829   *p_filesz += extend;
830 }
831 
LoadSegments()832 bool ElfReader::LoadSegments() {
833   for (size_t i = 0; i < phdr_num_; ++i) {
834     const ElfW(Phdr)* phdr = &phdr_table_[i];
835 
836     if (phdr->p_type != PT_LOAD) {
837       continue;
838     }
839 
840     ElfW(Addr) p_memsz = phdr->p_memsz;
841     ElfW(Addr) p_filesz = phdr->p_filesz;
842     _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
843 
844     // Segment addresses in memory.
845     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
846     ElfW(Addr) seg_end = seg_start + p_memsz;
847 
848     ElfW(Addr) seg_page_start = page_start(seg_start);
849     ElfW(Addr) seg_page_end = page_end(seg_end);
850 
851     ElfW(Addr) seg_file_end = seg_start + p_filesz;
852 
853     // File offsets.
854     ElfW(Addr) file_start = phdr->p_offset;
855     ElfW(Addr) file_end = file_start + p_filesz;
856 
857     ElfW(Addr) file_page_start = page_start(file_start);
858     ElfW(Addr) file_length = file_end - file_page_start;
859 
860     if (file_size_ <= 0) {
861       DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
862       return false;
863     }
864 
865     if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
866       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
867           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
868           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
869           reinterpret_cast<void*>(phdr->p_filesz),
870           reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
871       return false;
872     }
873 
874     if (file_length != 0) {
875       int prot = PFLAGS_TO_PROT(phdr->p_flags);
876       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
877         // W + E PT_LOAD segments are not allowed in O.
878         if (get_application_target_sdk_version() >= 26) {
879           DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
880           return false;
881         }
882         DL_WARN_documented_change(26,
883                                   "writable-and-executable-segments-enforced-for-api-level-26",
884                                   "\"%s\" has load segments that are both writable and executable",
885                                   name_.c_str());
886         add_dlwarning(name_.c_str(), "W+E load segments");
887       }
888 
889       void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
890                             file_length,
891                             prot,
892                             MAP_FIXED|MAP_PRIVATE,
893                             fd_,
894                             file_offset_ + file_page_start);
895       if (seg_addr == MAP_FAILED) {
896         DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
897         return false;
898       }
899 
900       // Mark segments as huge page eligible if they meet the requirements
901       // (executable and PMD aligned).
902       if ((phdr->p_flags & PF_X) && phdr->p_align == kPmdSize &&
903           get_transparent_hugepages_supported()) {
904         madvise(seg_addr, file_length, MADV_HUGEPAGE);
905       }
906     }
907 
908     // if the segment is writable, and does not end on a page boundary,
909     // zero-fill it until the page limit.
910     //
911     // Do not attempt to zero the extended region past the first partial page,
912     // since doing so may:
913     //   1) Result in a SIGBUS, as the region is not backed by the underlying
914     //      file.
915     //   2) Break the COW backing, faulting in new anon pages for a region
916     //      that will not be used.
917 
918     uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
919     if ((phdr->p_flags & PF_W) != 0 && page_offset(unextended_seg_file_end) > 0) {
920       memset(reinterpret_cast<void*>(unextended_seg_file_end), 0,
921              kPageSize - page_offset(unextended_seg_file_end));
922     }
923 
924     // Pages may be brought in due to readahead.
925     // Drop the padding (zero) pages, to avoid reclaim work later.
926     //
927     // NOTE: The madvise() here is special, as it also serves to hint to the
928     // kernel the portion of the LOAD segment that is padding.
929     //
930     // See: [1] https://android-review.googlesource.com/c/kernel/common/+/3032411
931     //      [2] https://android-review.googlesource.com/c/kernel/common/+/3048835
932     uint64_t pad_start = page_end(unextended_seg_file_end);
933     uint64_t pad_end = page_end(seg_file_end);
934     CHECK(pad_start <= pad_end);
935     uint64_t pad_len = pad_end - pad_start;
936     if (page_size_migration_supported() && pad_len > 0 &&
937         madvise(reinterpret_cast<void*>(pad_start), pad_len, MADV_DONTNEED)) {
938       DL_WARN("\"%s\": madvise(0x%" PRIx64 ", 0x%" PRIx64 ", MADV_DONTNEED) failed: %m",
939               name_.c_str(), pad_start, pad_len);
940     }
941 
942     seg_file_end = page_end(seg_file_end);
943 
944     // seg_file_end is now the first page address after the file
945     // content. If seg_end is larger, we need to zero anything
946     // between them. This is done by using a private anonymous
947     // map for all extra pages.
948     if (seg_page_end > seg_file_end) {
949       size_t zeromap_size = seg_page_end - seg_file_end;
950       void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
951                            zeromap_size,
952                            PFLAGS_TO_PROT(phdr->p_flags),
953                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
954                            -1,
955                            0);
956       if (zeromap == MAP_FAILED) {
957         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
958         return false;
959       }
960 
961       prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
962     }
963   }
964   return true;
965 }
966 
967 /* Used internally. Used to set the protection bits of all loaded segments
968  * with optional extra flags (i.e. really PROT_WRITE). Used by
969  * phdr_table_protect_segments and phdr_table_unprotect_segments.
970  */
_phdr_table_set_load_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int extra_prot_flags,bool should_pad_segments)971 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
972                                      ElfW(Addr) load_bias, int extra_prot_flags,
973                                      bool should_pad_segments) {
974   for (size_t i = 0; i < phdr_count; ++i) {
975     const ElfW(Phdr)* phdr = &phdr_table[i];
976 
977     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
978       continue;
979     }
980 
981     ElfW(Addr) p_memsz = phdr->p_memsz;
982     ElfW(Addr) p_filesz = phdr->p_filesz;
983     _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
984 
985     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
986     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
987 
988     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
989     if ((prot & PROT_WRITE) != 0) {
990       // make sure we're never simultaneously writable / executable
991       prot &= ~PROT_EXEC;
992     }
993 #if defined(__aarch64__)
994     if ((prot & PROT_EXEC) == 0) {
995       // Though it is not specified don't add PROT_BTI if segment is not
996       // executable.
997       prot &= ~PROT_BTI;
998     }
999 #endif
1000 
1001     int ret =
1002         mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, prot);
1003     if (ret < 0) {
1004       return -1;
1005     }
1006   }
1007   return 0;
1008 }
1009 
1010 /* Restore the original protection modes for all loadable segments.
1011  * You should only call this after phdr_table_unprotect_segments and
1012  * applying all relocations.
1013  *
1014  * AArch64: also called from linker_main and ElfReader::Load to apply
1015  *     PROT_BTI for loaded main so and other so-s.
1016  *
1017  * Input:
1018  *   phdr_table  -> program header table
1019  *   phdr_count  -> number of entries in tables
1020  *   load_bias   -> load bias
1021  *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
1022  *   prop        -> GnuPropertySection or nullptr
1023  * Return:
1024  *   0 on success, -1 on failure (error code in errno).
1025  */
phdr_table_protect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments,const GnuPropertySection * prop __unused)1026 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1027                                 ElfW(Addr) load_bias, bool should_pad_segments,
1028                                 const GnuPropertySection* prop __unused) {
1029   int prot = 0;
1030 #if defined(__aarch64__)
1031   if ((prop != nullptr) && prop->IsBTICompatible()) {
1032     prot |= PROT_BTI;
1033   }
1034 #endif
1035   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
1036 }
1037 
1038 /* Change the protection of all loaded segments in memory to writable.
1039  * This is useful before performing relocations. Once completed, you
1040  * will have to call phdr_table_protect_segments to restore the original
1041  * protection flags on all segments.
1042  *
1043  * Note that some writable segments can also have their content turned
1044  * to read-only by calling phdr_table_protect_gnu_relro. This is no
1045  * performed here.
1046  *
1047  * Input:
1048  *   phdr_table  -> program header table
1049  *   phdr_count  -> number of entries in tables
1050  *   load_bias   -> load bias
1051  *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
1052  * Return:
1053  *   0 on success, -1 on failure (error code in errno).
1054  */
phdr_table_unprotect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments)1055 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
1056                                   size_t phdr_count, ElfW(Addr) load_bias,
1057                                   bool should_pad_segments) {
1058   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
1059                                    should_pad_segments);
1060 }
1061 
_extend_gnu_relro_prot_end(const ElfW (Phdr)* relro_phdr,const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)* seg_page_end,bool should_pad_segments)1062 static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
1063                                               const ElfW(Phdr)* phdr_table, size_t phdr_count,
1064                                               ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
1065                                               bool should_pad_segments) {
1066   // Find the index and phdr of the LOAD containing the GNU_RELRO segment
1067   for (size_t index = 0; index < phdr_count; ++index) {
1068     const ElfW(Phdr)* phdr = &phdr_table[index];
1069 
1070     if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
1071       // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
1072       // LOAD segment mem size, we need to protect only a partial region of the
1073       // LOAD segment and therefore cannot avoid a VMA split.
1074       //
1075       // Note: Don't check the page-aligned mem sizes since the extended protection
1076       // may incorrectly write protect non-relocation data.
1077       //
1078       // Example:
1079       //
1080       //               |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
1081       //       ----------------------------------------------------------------
1082       //               |            |        |             |        |
1083       //        SEG X  |     RO     |   RO   |     RW      |        |   SEG Y
1084       //               |            |        |             |        |
1085       //       ----------------------------------------------------------------
1086       //                            |        |             |
1087       //                            |        |             |
1088       //                            |        |             |
1089       //                    relro_vaddr   relro_vaddr   relro_vaddr
1090       //                    (load_vaddr)       +            +
1091       //                                  relro_memsz   load_memsz
1092       //
1093       //       ----------------------------------------------------------------
1094       //               |         PAGE        |         PAGE         |
1095       //       ----------------------------------------------------------------
1096       //                                     |       Potential      |
1097       //                                     |----- Extended RO ----|
1098       //                                     |      Protection      |
1099       //
1100       // If the check below uses  page aligned mem sizes it will cause incorrect write
1101       // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
1102       if (relro_phdr->p_memsz < phdr->p_memsz) {
1103         return;
1104       }
1105 
1106       ElfW(Addr) p_memsz = phdr->p_memsz;
1107       ElfW(Addr) p_filesz = phdr->p_filesz;
1108 
1109       // Attempt extending the VMA (mprotect range). Without extending the range,
1110       // mprotect will only RO protect a part of the extended RW LOAD segment, which
1111       // will leave an extra split RW VMA (the gap).
1112       _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
1113                                should_pad_segments);
1114 
1115       *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
1116       return;
1117     }
1118   }
1119 }
1120 
1121 /* Used internally by phdr_table_protect_gnu_relro and
1122  * phdr_table_unprotect_gnu_relro.
1123  */
_phdr_table_set_gnu_relro_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int prot_flags,bool should_pad_segments)1124 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1125                                           ElfW(Addr) load_bias, int prot_flags,
1126                                           bool should_pad_segments) {
1127   const ElfW(Phdr)* phdr = phdr_table;
1128   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1129 
1130   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1131     if (phdr->p_type != PT_GNU_RELRO) {
1132       continue;
1133     }
1134 
1135     // Tricky: what happens when the relro segment does not start
1136     // or end at page boundaries? We're going to be over-protective
1137     // here and put every page touched by the segment as read-only.
1138 
1139     // This seems to match Ian Lance Taylor's description of the
1140     // feature at http://www.airs.com/blog/archives/189.
1141 
1142     //    Extract:
1143     //       Note that the current dynamic linker code will only work
1144     //       correctly if the PT_GNU_RELRO segment starts on a page
1145     //       boundary. This is because the dynamic linker rounds the
1146     //       p_vaddr field down to the previous page boundary. If
1147     //       there is anything on the page which should not be read-only,
1148     //       the program is likely to fail at runtime. So in effect the
1149     //       linker must only emit a PT_GNU_RELRO segment if it ensures
1150     //       that it starts on a page boundary.
1151     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1152     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1153     _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
1154                                should_pad_segments);
1155 
1156     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
1157                        seg_page_end - seg_page_start,
1158                        prot_flags);
1159     if (ret < 0) {
1160       return -1;
1161     }
1162   }
1163   return 0;
1164 }
1165 
1166 /* Apply GNU relro protection if specified by the program header. This will
1167  * turn some of the pages of a writable PT_LOAD segment to read-only, as
1168  * specified by one or more PT_GNU_RELRO segments. This must be always
1169  * performed after relocations.
1170  *
1171  * The areas typically covered are .got and .data.rel.ro, these are
1172  * read-only from the program's POV, but contain absolute addresses
1173  * that need to be relocated before use.
1174  *
1175  * Input:
1176  *   phdr_table  -> program header table
1177  *   phdr_count  -> number of entries in tables
1178  *   load_bias   -> load bias
1179  *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
1180  * Return:
1181  *   0 on success, -1 on failure (error code in errno).
1182  */
phdr_table_protect_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,bool should_pad_segments)1183 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1184                                  ElfW(Addr) load_bias, bool should_pad_segments) {
1185   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
1186                                         should_pad_segments);
1187 }
1188 
1189 /* Serialize the GNU relro segments to the given file descriptor. This can be
1190  * performed after relocations to allow another process to later share the
1191  * relocated segment, if it was loaded at the same address.
1192  *
1193  * Input:
1194  *   phdr_table  -> program header table
1195  *   phdr_count  -> number of entries in tables
1196  *   load_bias   -> load bias
1197  *   fd          -> writable file descriptor to use
1198  *   file_offset -> pointer to offset into file descriptor to use/update
1199  * Return:
1200  *   0 on success, -1 on failure (error code in errno).
1201  */
phdr_table_serialize_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)1202 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
1203                                    size_t phdr_count,
1204                                    ElfW(Addr) load_bias,
1205                                    int fd,
1206                                    size_t* file_offset) {
1207   const ElfW(Phdr)* phdr = phdr_table;
1208   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1209 
1210   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1211     if (phdr->p_type != PT_GNU_RELRO) {
1212       continue;
1213     }
1214 
1215     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1216     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1217     ssize_t size = seg_page_end - seg_page_start;
1218 
1219     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
1220     if (written != size) {
1221       return -1;
1222     }
1223     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
1224                      MAP_PRIVATE|MAP_FIXED, fd, *file_offset);
1225     if (map == MAP_FAILED) {
1226       return -1;
1227     }
1228     *file_offset += size;
1229   }
1230   return 0;
1231 }
1232 
1233 /* Where possible, replace the GNU relro segments with mappings of the given
1234  * file descriptor. This can be performed after relocations to allow a file
1235  * previously created by phdr_table_serialize_gnu_relro in another process to
1236  * replace the dirty relocated pages, saving memory, if it was loaded at the
1237  * same address. We have to compare the data before we map over it, since some
1238  * parts of the relro segment may not be identical due to other libraries in
1239  * the process being loaded at different addresses.
1240  *
1241  * Input:
1242  *   phdr_table  -> program header table
1243  *   phdr_count  -> number of entries in tables
1244  *   load_bias   -> load bias
1245  *   fd          -> readable file descriptor to use
1246  *   file_offset -> pointer to offset into file descriptor to use/update
1247  * Return:
1248  *   0 on success, -1 on failure (error code in errno).
1249  */
phdr_table_map_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd,size_t * file_offset)1250 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
1251                              size_t phdr_count,
1252                              ElfW(Addr) load_bias,
1253                              int fd,
1254                              size_t* file_offset) {
1255   // Map the file at a temporary location so we can compare its contents.
1256   struct stat file_stat;
1257   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
1258     return -1;
1259   }
1260   off_t file_size = file_stat.st_size;
1261   void* temp_mapping = nullptr;
1262   if (file_size > 0) {
1263     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
1264     if (temp_mapping == MAP_FAILED) {
1265       return -1;
1266     }
1267   }
1268 
1269   // Iterate over the relro segments and compare/remap the pages.
1270   const ElfW(Phdr)* phdr = phdr_table;
1271   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1272 
1273   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1274     if (phdr->p_type != PT_GNU_RELRO) {
1275       continue;
1276     }
1277 
1278     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
1279     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
1280 
1281     char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
1282     char* mem_base = reinterpret_cast<char*>(seg_page_start);
1283     size_t match_offset = 0;
1284     size_t size = seg_page_end - seg_page_start;
1285 
1286     if (file_size - *file_offset < size) {
1287       // File is too short to compare to this segment. The contents are likely
1288       // different as well (it's probably for a different library version) so
1289       // just don't bother checking.
1290       break;
1291     }
1292 
1293     while (match_offset < size) {
1294       // Skip over dissimilar pages.
1295       while (match_offset < size &&
1296              memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
1297         match_offset += page_size();
1298       }
1299 
1300       // Count similar pages.
1301       size_t mismatch_offset = match_offset;
1302       while (mismatch_offset < size &&
1303              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
1304         mismatch_offset += page_size();
1305       }
1306 
1307       // Map over similar pages.
1308       if (mismatch_offset > match_offset) {
1309         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
1310                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, *file_offset + match_offset);
1311         if (map == MAP_FAILED) {
1312           munmap(temp_mapping, file_size);
1313           return -1;
1314         }
1315       }
1316 
1317       match_offset = mismatch_offset;
1318     }
1319 
1320     // Add to the base file offset in case there are multiple relro segments.
1321     *file_offset += size;
1322   }
1323   munmap(temp_mapping, file_size);
1324   return 0;
1325 }
1326 
1327 
1328 #if defined(__arm__)
1329 /* Return the address and size of the .ARM.exidx section in memory,
1330  * if present.
1331  *
1332  * Input:
1333  *   phdr_table  -> program header table
1334  *   phdr_count  -> number of entries in tables
1335  *   load_bias   -> load bias
1336  * Output:
1337  *   arm_exidx       -> address of table in memory (null on failure).
1338  *   arm_exidx_count -> number of items in table (0 on failure).
1339  * Return:
1340  *   0 on success, -1 on failure (_no_ error code in errno)
1341  */
phdr_table_get_arm_exidx(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)** arm_exidx,size_t * arm_exidx_count)1342 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1343                              ElfW(Addr) load_bias,
1344                              ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
1345   const ElfW(Phdr)* phdr = phdr_table;
1346   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1347 
1348   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1349     if (phdr->p_type != PT_ARM_EXIDX) {
1350       continue;
1351     }
1352 
1353     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
1354     *arm_exidx_count = phdr->p_memsz / 8;
1355     return 0;
1356   }
1357   *arm_exidx = nullptr;
1358   *arm_exidx_count = 0;
1359   return -1;
1360 }
1361 #endif
1362 
1363 /* Return the address and size of the ELF file's .dynamic section in memory,
1364  * or null if missing.
1365  *
1366  * Input:
1367  *   phdr_table  -> program header table
1368  *   phdr_count  -> number of entries in tables
1369  *   load_bias   -> load bias
1370  * Output:
1371  *   dynamic       -> address of table in memory (null on failure).
1372  *   dynamic_flags -> protection flags for section (unset on failure)
1373  * Return:
1374  *   void
1375  */
phdr_table_get_dynamic_section(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Dyn)** dynamic,ElfW (Word)* dynamic_flags)1376 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1377                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1378                                     ElfW(Word)* dynamic_flags) {
1379   *dynamic = nullptr;
1380   for (size_t i = 0; i<phdr_count; ++i) {
1381     const ElfW(Phdr)& phdr = phdr_table[i];
1382     if (phdr.p_type == PT_DYNAMIC) {
1383       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
1384       if (dynamic_flags) {
1385         *dynamic_flags = phdr.p_flags;
1386       }
1387       return;
1388     }
1389   }
1390 }
1391 
1392 /* Return the program interpreter string, or nullptr if missing.
1393  *
1394  * Input:
1395  *   phdr_table  -> program header table
1396  *   phdr_count  -> number of entries in tables
1397  *   load_bias   -> load bias
1398  * Return:
1399  *   pointer to the program interpreter string.
1400  */
phdr_table_get_interpreter_name(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)1401 const char* phdr_table_get_interpreter_name(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1402                                             ElfW(Addr) load_bias) {
1403   for (size_t i = 0; i<phdr_count; ++i) {
1404     const ElfW(Phdr)& phdr = phdr_table[i];
1405     if (phdr.p_type == PT_INTERP) {
1406       return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1407     }
1408   }
1409   return nullptr;
1410 }
1411 
1412 // Sets loaded_phdr_ to the address of the program header table as it appears
1413 // in the loaded segments in memory. This is in contrast with phdr_table_,
1414 // which is temporary and will be released before the library is relocated.
FindPhdr()1415 bool ElfReader::FindPhdr() {
1416   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1417 
1418   // If there is a PT_PHDR, use it directly.
1419   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1420     if (phdr->p_type == PT_PHDR) {
1421       return CheckPhdr(load_bias_ + phdr->p_vaddr);
1422     }
1423   }
1424 
1425   // Otherwise, check the first loadable segment. If its file offset
1426   // is 0, it starts with the ELF header, and we can trivially find the
1427   // loaded program header from it.
1428   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1429     if (phdr->p_type == PT_LOAD) {
1430       if (phdr->p_offset == 0) {
1431         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
1432         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
1433         ElfW(Addr)  offset = ehdr->e_phoff;
1434         return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
1435       }
1436       break;
1437     }
1438   }
1439 
1440   DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
1441   return false;
1442 }
1443 
1444 // Tries to find .note.gnu.property section.
1445 // It is not considered an error if such section is missing.
FindGnuPropertySection()1446 bool ElfReader::FindGnuPropertySection() {
1447 #if defined(__aarch64__)
1448   note_gnu_property_ = GnuPropertySection(phdr_table_, phdr_num_, load_start(), name_.c_str());
1449 #endif
1450   return true;
1451 }
1452 
1453 // Ensures that our program header is actually within a loadable
1454 // segment. This should help catch badly-formed ELF files that
1455 // would cause the linker to crash later when trying to access it.
CheckPhdr(ElfW (Addr)loaded)1456 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1457   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1458   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
1459   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1460     if (phdr->p_type != PT_LOAD) {
1461       continue;
1462     }
1463     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1464     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
1465     if (seg_start <= loaded && loaded_end <= seg_end) {
1466       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
1467       return true;
1468     }
1469   }
1470   DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1471          name_.c_str(), reinterpret_cast<void*>(loaded));
1472   return false;
1473 }
1474