• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <sys/mman.h>
33 
34 #include "linker.h"
35 #include "linker_debug.h"
36 
37 /**
38   TECHNICAL NOTE ON ELF LOADING.
39 
40   An ELF file's program header table contains one or more PT_LOAD
41   segments, which corresponds to portions of the file that need to
42   be mapped into the process' address space.
43 
44   Each loadable segment has the following important properties:
45 
46     p_offset  -> segment file offset
47     p_filesz  -> segment file size
48     p_memsz   -> segment memory size (always >= p_filesz)
49     p_vaddr   -> segment's virtual address
50     p_flags   -> segment flags (e.g. readable, writable, executable)
51 
52   We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
53 
54   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
55   ranges of virtual addresses. A few rules apply:
56 
57   - the virtual address ranges should not overlap.
58 
59   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
60     between them should always be initialized to 0.
61 
62   - ranges do not necessarily start or end at page boundaries. Two distinct
63     segments can have their start and end on the same page. In this case, the
64     page inherits the mapping flags of the latter segment.
65 
66   Finally, the real load addrs of each segment is not p_vaddr. Instead the
67   loader decides where to load the first segment, then will load all others
68   relative to the first one to respect the initial range layout.
69 
70   For example, consider the following list:
71 
72     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
73     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
74 
75   This corresponds to two segments that cover these virtual address ranges:
76 
77        0x30000...0x34000
78        0x40000...0x48000
79 
80   If the loader decides to load the first segment at address 0xa0000000
81   then the segments' load address ranges will be:
82 
83        0xa0030000...0xa0034000
84        0xa0040000...0xa0048000
85 
86   In other words, all segments must be loaded at an address that has the same
87   constant offset from their p_vaddr value. This offset is computed as the
88   difference between the first segment's load address, and its p_vaddr value.
89 
90   However, in practice, segments do _not_ start at page boundaries. Since we
91   can only memory-map at page boundaries, this means that the bias is
92   computed as:
93 
94        load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
95 
96   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
97           possible wrap around UINT32_MAX for possible large p_vaddr values).
98 
99   And that the phdr0_load_address must start at a page boundary, with
100   the segment's real content starting at:
101 
102        phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
103 
104   Note that ELF requires the following condition to make the mmap()-ing work:
105 
106       PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
107 
108   The load_bias must be added to any p_vaddr value read from the ELF file to
109   determine the corresponding memory address.
110 
111  **/
112 
113 #define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
114 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
115                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
116                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
117 
ElfReader(const char * name,int fd)118 ElfReader::ElfReader(const char* name, int fd)
119     : name_(name), fd_(fd),
120       phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
121       load_start_(NULL), load_size_(0), load_bias_(0),
122       loaded_phdr_(NULL) {
123 }
124 
~ElfReader()125 ElfReader::~ElfReader() {
126   if (fd_ != -1) {
127     close(fd_);
128   }
129   if (phdr_mmap_ != NULL) {
130     munmap(phdr_mmap_, phdr_size_);
131   }
132 }
133 
Load()134 bool ElfReader::Load() {
135   return ReadElfHeader() &&
136          VerifyElfHeader() &&
137          ReadProgramHeader() &&
138          ReserveAddressSpace() &&
139          LoadSegments() &&
140          FindPhdr();
141 }
142 
ReadElfHeader()143 bool ElfReader::ReadElfHeader() {
144   ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
145   if (rc < 0) {
146     DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
147     return false;
148   }
149   if (rc != sizeof(header_)) {
150     DL_ERR("\"%s\" is too small to be an ELF executable", name_);
151     return false;
152   }
153   return true;
154 }
155 
VerifyElfHeader()156 bool ElfReader::VerifyElfHeader() {
157   if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
158       header_.e_ident[EI_MAG1] != ELFMAG1 ||
159       header_.e_ident[EI_MAG2] != ELFMAG2 ||
160       header_.e_ident[EI_MAG3] != ELFMAG3) {
161     DL_ERR("\"%s\" has bad ELF magic", name_);
162     return false;
163   }
164 
165   if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
166     DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]);
167     return false;
168   }
169   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
170     DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
171     return false;
172   }
173 
174   if (header_.e_type != ET_DYN) {
175     DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
176     return false;
177   }
178 
179   if (header_.e_version != EV_CURRENT) {
180     DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
181     return false;
182   }
183 
184   if (header_.e_machine !=
185 #ifdef ANDROID_ARM_LINKER
186       EM_ARM
187 #elif defined(ANDROID_MIPS_LINKER)
188       EM_MIPS
189 #elif defined(ANDROID_X86_LINKER)
190       EM_386
191 #endif
192   ) {
193     DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
194     return false;
195   }
196 
197   return true;
198 }
199 
200 // Loads the program header table from an ELF file into a read-only private
201 // anonymous mmap-ed block.
ReadProgramHeader()202 bool ElfReader::ReadProgramHeader() {
203   phdr_num_ = header_.e_phnum;
204 
205   // Like the kernel, we only accept program header tables that
206   // are smaller than 64KiB.
207   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
208     DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
209     return false;
210   }
211 
212   Elf32_Addr page_min = PAGE_START(header_.e_phoff);
213   Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
214   Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
215 
216   phdr_size_ = page_max - page_min;
217 
218   void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
219   if (mmap_result == MAP_FAILED) {
220     DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
221     return false;
222   }
223 
224   phdr_mmap_ = mmap_result;
225   phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
226   return true;
227 }
228 
229 /* Returns the size of the extent of all the possibly non-contiguous
230  * loadable segments in an ELF program header table. This corresponds
231  * to the page-aligned size in bytes that needs to be reserved in the
232  * process' address space. If there are no loadable segments, 0 is
233  * returned.
234  *
235  * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
236  * set to the minimum and maximum addresses of pages to be reserved,
237  * or 0 if there is nothing to load.
238  */
phdr_table_get_load_size(const Elf32_Phdr * phdr_table,size_t phdr_count,Elf32_Addr * out_min_vaddr,Elf32_Addr * out_max_vaddr)239 size_t phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
240                                 size_t phdr_count,
241                                 Elf32_Addr* out_min_vaddr,
242                                 Elf32_Addr* out_max_vaddr)
243 {
244     Elf32_Addr min_vaddr = 0xFFFFFFFFU;
245     Elf32_Addr max_vaddr = 0x00000000U;
246 
247     bool found_pt_load = false;
248     for (size_t i = 0; i < phdr_count; ++i) {
249         const Elf32_Phdr* phdr = &phdr_table[i];
250 
251         if (phdr->p_type != PT_LOAD) {
252             continue;
253         }
254         found_pt_load = true;
255 
256         if (phdr->p_vaddr < min_vaddr) {
257             min_vaddr = phdr->p_vaddr;
258         }
259 
260         if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
261             max_vaddr = phdr->p_vaddr + phdr->p_memsz;
262         }
263     }
264     if (!found_pt_load) {
265         min_vaddr = 0x00000000U;
266     }
267 
268     min_vaddr = PAGE_START(min_vaddr);
269     max_vaddr = PAGE_END(max_vaddr);
270 
271     if (out_min_vaddr != NULL) {
272         *out_min_vaddr = min_vaddr;
273     }
274     if (out_max_vaddr != NULL) {
275         *out_max_vaddr = max_vaddr;
276     }
277     return max_vaddr - min_vaddr;
278 }
279 
280 // Reserve a virtual address range big enough to hold all loadable
281 // segments of a program header table. This is done by creating a
282 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace()283 bool ElfReader::ReserveAddressSpace() {
284   Elf32_Addr min_vaddr;
285   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
286   if (load_size_ == 0) {
287     DL_ERR("\"%s\" has no loadable segments", name_);
288     return false;
289   }
290 
291   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
292   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
293   void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
294   if (start == MAP_FAILED) {
295     DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
296     return false;
297   }
298 
299   load_start_ = start;
300   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
301   return true;
302 }
303 
304 // Map all loadable segments in process' address space.
305 // This assumes you already called phdr_table_reserve_memory to
306 // reserve the address space range for the library.
307 // TODO: assert assumption.
LoadSegments()308 bool ElfReader::LoadSegments() {
309   for (size_t i = 0; i < phdr_num_; ++i) {
310     const Elf32_Phdr* phdr = &phdr_table_[i];
311 
312     if (phdr->p_type != PT_LOAD) {
313       continue;
314     }
315 
316     // Segment addresses in memory.
317     Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
318     Elf32_Addr seg_end   = seg_start + phdr->p_memsz;
319 
320     Elf32_Addr seg_page_start = PAGE_START(seg_start);
321     Elf32_Addr seg_page_end   = PAGE_END(seg_end);
322 
323     Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;
324 
325     // File offsets.
326     Elf32_Addr file_start = phdr->p_offset;
327     Elf32_Addr file_end   = file_start + phdr->p_filesz;
328 
329     Elf32_Addr file_page_start = PAGE_START(file_start);
330     Elf32_Addr file_length = file_end - file_page_start;
331 
332     if (file_length != 0) {
333       void* seg_addr = mmap((void*)seg_page_start,
334                             file_length,
335                             PFLAGS_TO_PROT(phdr->p_flags),
336                             MAP_FIXED|MAP_PRIVATE,
337                             fd_,
338                             file_page_start);
339       if (seg_addr == MAP_FAILED) {
340         DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
341         return false;
342       }
343     }
344 
345     // if the segment is writable, and does not end on a page boundary,
346     // zero-fill it until the page limit.
347     if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
348       memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
349     }
350 
351     seg_file_end = PAGE_END(seg_file_end);
352 
353     // seg_file_end is now the first page address after the file
354     // content. If seg_end is larger, we need to zero anything
355     // between them. This is done by using a private anonymous
356     // map for all extra pages.
357     if (seg_page_end > seg_file_end) {
358       void* zeromap = mmap((void*)seg_file_end,
359                            seg_page_end - seg_file_end,
360                            PFLAGS_TO_PROT(phdr->p_flags),
361                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
362                            -1,
363                            0);
364       if (zeromap == MAP_FAILED) {
365         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
366         return false;
367       }
368     }
369   }
370   return true;
371 }
372 
373 /* Used internally. Used to set the protection bits of all loaded segments
374  * with optional extra flags (i.e. really PROT_WRITE). Used by
375  * phdr_table_protect_segments and phdr_table_unprotect_segments.
376  */
377 static int
_phdr_table_set_load_prot(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,int extra_prot_flags)378 _phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
379                           int               phdr_count,
380                           Elf32_Addr        load_bias,
381                           int               extra_prot_flags)
382 {
383     const Elf32_Phdr* phdr = phdr_table;
384     const Elf32_Phdr* phdr_limit = phdr + phdr_count;
385 
386     for (; phdr < phdr_limit; phdr++) {
387         if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
388             continue;
389 
390         Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
391         Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
392 
393         int ret = mprotect((void*)seg_page_start,
394                            seg_page_end - seg_page_start,
395                            PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
396         if (ret < 0) {
397             return -1;
398         }
399     }
400     return 0;
401 }
402 
403 /* Restore the original protection modes for all loadable segments.
404  * You should only call this after phdr_table_unprotect_segments and
405  * applying all relocations.
406  *
407  * Input:
408  *   phdr_table  -> program header table
409  *   phdr_count  -> number of entries in tables
410  *   load_bias   -> load bias
411  * Return:
412  *   0 on error, -1 on failure (error code in errno).
413  */
414 int
phdr_table_protect_segments(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)415 phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
416                             int               phdr_count,
417                             Elf32_Addr        load_bias)
418 {
419     return _phdr_table_set_load_prot(phdr_table, phdr_count,
420                                       load_bias, 0);
421 }
422 
423 /* Change the protection of all loaded segments in memory to writable.
424  * This is useful before performing relocations. Once completed, you
425  * will have to call phdr_table_protect_segments to restore the original
426  * protection flags on all segments.
427  *
428  * Note that some writable segments can also have their content turned
429  * to read-only by calling phdr_table_protect_gnu_relro. This is no
430  * performed here.
431  *
432  * Input:
433  *   phdr_table  -> program header table
434  *   phdr_count  -> number of entries in tables
435  *   load_bias   -> load bias
436  * Return:
437  *   0 on error, -1 on failure (error code in errno).
438  */
439 int
phdr_table_unprotect_segments(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)440 phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
441                               int               phdr_count,
442                               Elf32_Addr        load_bias)
443 {
444     return _phdr_table_set_load_prot(phdr_table, phdr_count,
445                                       load_bias, PROT_WRITE);
446 }
447 
448 /* Used internally by phdr_table_protect_gnu_relro and
449  * phdr_table_unprotect_gnu_relro.
450  */
451 static int
_phdr_table_set_gnu_relro_prot(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,int prot_flags)452 _phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
453                                int               phdr_count,
454                                Elf32_Addr        load_bias,
455                                int               prot_flags)
456 {
457     const Elf32_Phdr* phdr = phdr_table;
458     const Elf32_Phdr* phdr_limit = phdr + phdr_count;
459 
460     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
461         if (phdr->p_type != PT_GNU_RELRO)
462             continue;
463 
464         /* Tricky: what happens when the relro segment does not start
465          * or end at page boundaries?. We're going to be over-protective
466          * here and put every page touched by the segment as read-only.
467          *
468          * This seems to match Ian Lance Taylor's description of the
469          * feature at http://www.airs.com/blog/archives/189.
470          *
471          * Extract:
472          *    Note that the current dynamic linker code will only work
473          *    correctly if the PT_GNU_RELRO segment starts on a page
474          *    boundary. This is because the dynamic linker rounds the
475          *    p_vaddr field down to the previous page boundary. If
476          *    there is anything on the page which should not be read-only,
477          *    the program is likely to fail at runtime. So in effect the
478          *    linker must only emit a PT_GNU_RELRO segment if it ensures
479          *    that it starts on a page boundary.
480          */
481         Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
482         Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
483 
484         int ret = mprotect((void*)seg_page_start,
485                            seg_page_end - seg_page_start,
486                            prot_flags);
487         if (ret < 0) {
488             return -1;
489         }
490     }
491     return 0;
492 }
493 
494 /* Apply GNU relro protection if specified by the program header. This will
495  * turn some of the pages of a writable PT_LOAD segment to read-only, as
496  * specified by one or more PT_GNU_RELRO segments. This must be always
497  * performed after relocations.
498  *
499  * The areas typically covered are .got and .data.rel.ro, these are
500  * read-only from the program's POV, but contain absolute addresses
501  * that need to be relocated before use.
502  *
503  * Input:
504  *   phdr_table  -> program header table
505  *   phdr_count  -> number of entries in tables
506  *   load_bias   -> load bias
507  * Return:
508  *   0 on error, -1 on failure (error code in errno).
509  */
510 int
phdr_table_protect_gnu_relro(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)511 phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
512                              int               phdr_count,
513                              Elf32_Addr        load_bias)
514 {
515     return _phdr_table_set_gnu_relro_prot(phdr_table,
516                                           phdr_count,
517                                           load_bias,
518                                           PROT_READ);
519 }
520 
521 #ifdef ANDROID_ARM_LINKER
522 
523 #  ifndef PT_ARM_EXIDX
524 #    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
525 #  endif
526 
527 /* Return the address and size of the .ARM.exidx section in memory,
528  * if present.
529  *
530  * Input:
531  *   phdr_table  -> program header table
532  *   phdr_count  -> number of entries in tables
533  *   load_bias   -> load bias
534  * Output:
535  *   arm_exidx       -> address of table in memory (NULL on failure).
536  *   arm_exidx_count -> number of items in table (0 on failure).
537  * Return:
538  *   0 on error, -1 on failure (_no_ error code in errno)
539  */
540 int
phdr_table_get_arm_exidx(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,Elf32_Addr ** arm_exidx,unsigned * arm_exidx_count)541 phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
542                          int               phdr_count,
543                          Elf32_Addr        load_bias,
544                          Elf32_Addr**      arm_exidx,
545                          unsigned*         arm_exidx_count)
546 {
547     const Elf32_Phdr* phdr = phdr_table;
548     const Elf32_Phdr* phdr_limit = phdr + phdr_count;
549 
550     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
551         if (phdr->p_type != PT_ARM_EXIDX)
552             continue;
553 
554         *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
555         *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
556         return 0;
557     }
558     *arm_exidx = NULL;
559     *arm_exidx_count = 0;
560     return -1;
561 }
562 #endif /* ANDROID_ARM_LINKER */
563 
564 /* Return the address and size of the ELF file's .dynamic section in memory,
565  * or NULL if missing.
566  *
567  * Input:
568  *   phdr_table  -> program header table
569  *   phdr_count  -> number of entries in tables
570  *   load_bias   -> load bias
571  * Output:
572  *   dynamic       -> address of table in memory (NULL on failure).
573  *   dynamic_count -> number of items in table (0 on failure).
574  *   dynamic_flags -> protection flags for section (unset on failure)
575  * Return:
576  *   void
577  */
578 void
phdr_table_get_dynamic_section(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,Elf32_Dyn ** dynamic,size_t * dynamic_count,Elf32_Word * dynamic_flags)579 phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
580                                int               phdr_count,
581                                Elf32_Addr        load_bias,
582                                Elf32_Dyn**       dynamic,
583                                size_t*           dynamic_count,
584                                Elf32_Word*       dynamic_flags)
585 {
586     const Elf32_Phdr* phdr = phdr_table;
587     const Elf32_Phdr* phdr_limit = phdr + phdr_count;
588 
589     for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
590         if (phdr->p_type != PT_DYNAMIC) {
591             continue;
592         }
593 
594         *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr);
595         if (dynamic_count) {
596             *dynamic_count = (unsigned)(phdr->p_memsz / 8);
597         }
598         if (dynamic_flags) {
599             *dynamic_flags = phdr->p_flags;
600         }
601         return;
602     }
603     *dynamic = NULL;
604     if (dynamic_count) {
605         *dynamic_count = 0;
606     }
607 }
608 
609 // Returns the address of the program header table as it appears in the loaded
610 // segments in memory. This is in contrast with 'phdr_table_' which
611 // is temporary and will be released before the library is relocated.
FindPhdr()612 bool ElfReader::FindPhdr() {
613   const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
614 
615   // If there is a PT_PHDR, use it directly.
616   for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
617     if (phdr->p_type == PT_PHDR) {
618       return CheckPhdr(load_bias_ + phdr->p_vaddr);
619     }
620   }
621 
622   // Otherwise, check the first loadable segment. If its file offset
623   // is 0, it starts with the ELF header, and we can trivially find the
624   // loaded program header from it.
625   for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
626     if (phdr->p_type == PT_LOAD) {
627       if (phdr->p_offset == 0) {
628         Elf32_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
629         const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
630         Elf32_Addr  offset = ehdr->e_phoff;
631         return CheckPhdr((Elf32_Addr)ehdr + offset);
632       }
633       break;
634     }
635   }
636 
637   DL_ERR("can't find loaded phdr for \"%s\"", name_);
638   return false;
639 }
640 
641 // Ensures that our program header is actually within a loadable
642 // segment. This should help catch badly-formed ELF files that
643 // would cause the linker to crash later when trying to access it.
CheckPhdr(Elf32_Addr loaded)644 bool ElfReader::CheckPhdr(Elf32_Addr loaded) {
645   const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
646   Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr));
647   for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
648     if (phdr->p_type != PT_LOAD) {
649       continue;
650     }
651     Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
652     Elf32_Addr seg_end = phdr->p_filesz + seg_start;
653     if (seg_start <= loaded && loaded_end <= seg_end) {
654       loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded);
655       return true;
656     }
657   }
658   DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded);
659   return false;
660 }
661