1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <sys/mman.h>
31
32 #include "linker_phdr.h"
33
34 /**
35 TECHNICAL NOTE ON ELF LOADING.
36
37 An ELF file's program header table contains one or more PT_LOAD
38 segments, which corresponds to portions of the file that need to
39 be mapped into the process' address space.
40
41 Each loadable segment has the following important properties:
42
43 p_offset -> segment file offset
44 p_filesz -> segment file size
45 p_memsz -> segment memory size (always >= p_filesz)
46 p_vaddr -> segment's virtual address
47 p_flags -> segment flags (e.g. readable, writable, executable)
48
49 We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
50
51 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
52 ranges of virtual addresses. A few rules apply:
53
54 - the virtual address ranges should not overlap.
55
56 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
57 between them should always be initialized to 0.
58
59 - ranges do not necessarily start or end at page boundaries. Two distinct
60 segments can have their start and end on the same page. In this case, the
61 page inherits the mapping flags of the latter segment.
62
63 Finally, the real load addrs of each segment is not p_vaddr. Instead the
64 loader decides where to load the first segment, then will load all others
65 relative to the first one to respect the initial range layout.
66
67 For example, consider the following list:
68
69 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
70 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
71
72 This corresponds to two segments that cover these virtual address ranges:
73
74 0x30000...0x34000
75 0x40000...0x48000
76
77 If the loader decides to load the first segment at address 0xa0000000
78 then the segments' load address ranges will be:
79
80 0xa0030000...0xa0034000
81 0xa0040000...0xa0048000
82
83 In other words, all segments must be loaded at an address that has the same
84 constant offset from their p_vaddr value. This offset is computed as the
85 difference between the first segment's load address, and its p_vaddr value.
86
87 However, in practice, segments do _not_ start at page boundaries. Since we
88 can only memory-map at page boundaries, this means that the bias is
89 computed as:
90
91 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
92
93 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
94 possible wrap around UINT32_MAX for possible large p_vaddr values).
95
96 And that the phdr0_load_address must start at a page boundary, with
97 the segment's real content starting at:
98
99 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
100
101 Note that ELF requires the following condition to make the mmap()-ing work:
102
103 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
104
105 The load_bias must be added to any p_vaddr value read from the ELF file to
106 determine the corresponding memory address.
107
108 **/
109
110 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
111 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
112 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
113 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
114
115 /* Load the program header table from an ELF file into a read-only private
116 * anonymous mmap-ed block.
117 *
118 * Input:
119 * fd -> file descriptor
120 * phdr_offset -> file offset of phdr table
121 * phdr_num -> number of entries in the table.
122 *
123 * Output:
124 * phdr_mmap -> address of mmap block in memory.
125 * phdr_memsize -> size of mmap block in memory.
126 * phdr_table -> address of first entry in memory.
127 *
128 * Return:
129 * -1 on error, or 0 on success.
130 */
phdr_table_load(int fd,Elf32_Addr phdr_offset,Elf32_Half phdr_num,void ** phdr_mmap,Elf32_Addr * phdr_size,const Elf32_Phdr ** phdr_table)131 int phdr_table_load(int fd,
132 Elf32_Addr phdr_offset,
133 Elf32_Half phdr_num,
134 void** phdr_mmap,
135 Elf32_Addr* phdr_size,
136 const Elf32_Phdr** phdr_table)
137 {
138 Elf32_Addr page_min, page_max, page_offset;
139 void* mmap_result;
140
141 /* Just like the kernel, we only accept program header tables that
142 * are smaller than 64KB. */
143 if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
144 errno = EINVAL;
145 return -1;
146 }
147
148 page_min = PAGE_START(phdr_offset);
149 page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
150 page_offset = PAGE_OFFSET(phdr_offset);
151
152 mmap_result = mmap(NULL,
153 page_max - page_min,
154 PROT_READ,
155 MAP_PRIVATE,
156 fd,
157 page_min);
158
159 if (mmap_result == MAP_FAILED) {
160 return -1;
161 }
162
163 *phdr_mmap = mmap_result;
164 *phdr_size = page_max - page_min;
165 *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);
166
167 return 0;
168 }
169
phdr_table_unload(void * phdr_mmap,Elf32_Addr phdr_memsize)170 void phdr_table_unload(void* phdr_mmap, Elf32_Addr phdr_memsize)
171 {
172 munmap(phdr_mmap, phdr_memsize);
173 }
174
175
176 /* Compute the extent of all loadable segments in an ELF program header
177 * table. This corresponds to the page-aligned size in bytes that needs to be
178 * reserved in the process' address space
179 *
180 * This returns 0 if there are no loadable segments.
181 */
phdr_table_get_load_size(const Elf32_Phdr * phdr_table,size_t phdr_count)182 Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
183 size_t phdr_count)
184 {
185 Elf32_Addr min_vaddr = 0xFFFFFFFFU;
186 Elf32_Addr max_vaddr = 0x00000000U;
187
188 for (size_t i = 0; i < phdr_count; ++i) {
189 const Elf32_Phdr* phdr = &phdr_table[i];
190
191 if (phdr->p_type != PT_LOAD) {
192 continue;
193 }
194
195 if (phdr->p_vaddr < min_vaddr) {
196 min_vaddr = phdr->p_vaddr;
197 }
198
199 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
200 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
201 }
202 }
203
204 if (min_vaddr > max_vaddr) {
205 return 0;
206 }
207
208 min_vaddr = PAGE_START(min_vaddr);
209 max_vaddr = PAGE_END(max_vaddr);
210
211 return max_vaddr - min_vaddr;
212 }
213
214 /* Reserve a virtual address range big enough to hold all loadable
215 * segments of a program header table. This is done by creating a
216 * private anonymous mmap() with PROT_NONE.
217 *
218 * Input:
219 * phdr_table -> program header table
220 * phdr_count -> number of entries in the tables
221 * Output:
222 * load_start -> first page of reserved address space range
223 * load_size -> size in bytes of reserved address space range
224 * load_bias -> load bias, as described in technical note above.
225 *
226 * Return:
227 * 0 on success, -1 otherwise. Error code in errno.
228 */
229 int
phdr_table_reserve_memory(const Elf32_Phdr * phdr_table,size_t phdr_count,void ** load_start,Elf32_Addr * load_size,Elf32_Addr * load_bias)230 phdr_table_reserve_memory(const Elf32_Phdr* phdr_table,
231 size_t phdr_count,
232 void** load_start,
233 Elf32_Addr* load_size,
234 Elf32_Addr* load_bias)
235 {
236 Elf32_Addr size = phdr_table_get_load_size(phdr_table, phdr_count);
237 if (size == 0) {
238 errno = EINVAL;
239 return -1;
240 }
241
242 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
243 void* start = mmap(NULL, size, PROT_NONE, mmap_flags, -1, 0);
244 if (start == MAP_FAILED) {
245 return -1;
246 }
247
248 *load_start = start;
249 *load_size = size;
250 *load_bias = 0;
251
252 for (size_t i = 0; i < phdr_count; ++i) {
253 const Elf32_Phdr* phdr = &phdr_table[i];
254 if (phdr->p_type == PT_LOAD) {
255 *load_bias = (Elf32_Addr)start - PAGE_START(phdr->p_vaddr);
256 break;
257 }
258 }
259 return 0;
260 }
261
262 /* Map all loadable segments in process' address space.
263 * This assumes you already called phdr_table_reserve_memory to
264 * reserve the address space range for the library.
265 *
266 * Input:
267 * phdr_table -> program header table
268 * phdr_count -> number of entries in the table
269 * load_bias -> load offset.
270 * fd -> input file descriptor.
271 *
272 * Return:
273 * 0 on success, -1 otherwise. Error code in errno.
274 */
275 int
phdr_table_load_segments(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,int fd)276 phdr_table_load_segments(const Elf32_Phdr* phdr_table,
277 int phdr_count,
278 Elf32_Addr load_bias,
279 int fd)
280 {
281 int nn;
282
283 for (nn = 0; nn < phdr_count; nn++) {
284 const Elf32_Phdr* phdr = &phdr_table[nn];
285 void* seg_addr;
286
287 if (phdr->p_type != PT_LOAD)
288 continue;
289
290 /* Segment addresses in memory */
291 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
292 Elf32_Addr seg_end = seg_start + phdr->p_memsz;
293
294 Elf32_Addr seg_page_start = PAGE_START(seg_start);
295 Elf32_Addr seg_page_end = PAGE_END(seg_end);
296
297 Elf32_Addr seg_file_end = seg_start + phdr->p_filesz;
298
299 /* File offsets */
300 Elf32_Addr file_start = phdr->p_offset;
301 Elf32_Addr file_end = file_start + phdr->p_filesz;
302
303 Elf32_Addr file_page_start = PAGE_START(file_start);
304 Elf32_Addr file_page_end = PAGE_END(file_end);
305
306 seg_addr = mmap((void*)seg_page_start,
307 file_end - file_page_start,
308 PFLAGS_TO_PROT(phdr->p_flags),
309 MAP_FIXED|MAP_PRIVATE,
310 fd,
311 file_page_start);
312
313 if (seg_addr == MAP_FAILED) {
314 return -1;
315 }
316
317 /* if the segment is writable, and does not end on a page boundary,
318 * zero-fill it until the page limit. */
319 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
320 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
321 }
322
323 seg_file_end = PAGE_END(seg_file_end);
324
325 /* seg_file_end is now the first page address after the file
326 * content. If seg_end is larger, we need to zero anything
327 * between them. This is done by using a private anonymous
328 * map for all extra pages.
329 */
330 if (seg_page_end > seg_file_end) {
331 void* zeromap = mmap((void*)seg_file_end,
332 seg_page_end - seg_file_end,
333 PFLAGS_TO_PROT(phdr->p_flags),
334 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
335 -1,
336 0);
337 if (zeromap == MAP_FAILED) {
338 return -1;
339 }
340 }
341 }
342 return 0;
343 }
344
345 /* Used internally. Used to set the protection bits of all loaded segments
346 * with optional extra flags (i.e. really PROT_WRITE). Used by
347 * phdr_table_protect_segments and phdr_table_unprotect_segments.
348 */
349 static int
_phdr_table_set_load_prot(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,int extra_prot_flags)350 _phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
351 int phdr_count,
352 Elf32_Addr load_bias,
353 int extra_prot_flags)
354 {
355 const Elf32_Phdr* phdr = phdr_table;
356 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
357
358 for (; phdr < phdr_limit; phdr++) {
359 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
360 continue;
361
362 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
363 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
364
365 int ret = mprotect((void*)seg_page_start,
366 seg_page_end - seg_page_start,
367 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
368 if (ret < 0) {
369 return -1;
370 }
371 }
372 return 0;
373 }
374
375 /* Restore the original protection modes for all loadable segments.
376 * You should only call this after phdr_table_unprotect_segments and
377 * applying all relocations.
378 *
379 * Input:
380 * phdr_table -> program header table
381 * phdr_count -> number of entries in tables
382 * load_bias -> load bias
383 * Return:
384 * 0 on error, -1 on failure (error code in errno).
385 */
386 int
phdr_table_protect_segments(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)387 phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
388 int phdr_count,
389 Elf32_Addr load_bias)
390 {
391 return _phdr_table_set_load_prot(phdr_table, phdr_count,
392 load_bias, 0);
393 }
394
395 /* Change the protection of all loaded segments in memory to writable.
396 * This is useful before performing relocations. Once completed, you
397 * will have to call phdr_table_protect_segments to restore the original
398 * protection flags on all segments.
399 *
400 * Note that some writable segments can also have their content turned
401 * to read-only by calling phdr_table_protect_gnu_relro. This is no
402 * performed here.
403 *
404 * Input:
405 * phdr_table -> program header table
406 * phdr_count -> number of entries in tables
407 * load_bias -> load bias
408 * Return:
409 * 0 on error, -1 on failure (error code in errno).
410 */
411 int
phdr_table_unprotect_segments(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)412 phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
413 int phdr_count,
414 Elf32_Addr load_bias)
415 {
416 return _phdr_table_set_load_prot(phdr_table, phdr_count,
417 load_bias, PROT_WRITE);
418 }
419
420 /* Used internally by phdr_table_protect_gnu_relro and
421 * phdr_table_unprotect_gnu_relro.
422 */
423 static int
_phdr_table_set_gnu_relro_prot(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,int prot_flags)424 _phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
425 int phdr_count,
426 Elf32_Addr load_bias,
427 int prot_flags)
428 {
429 const Elf32_Phdr* phdr = phdr_table;
430 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
431
432 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
433 if (phdr->p_type != PT_GNU_RELRO)
434 continue;
435
436 /* Tricky: what happens when the relro segment does not start
437 * or end at page boundaries?. We're going to be over-protective
438 * here and put every page touched by the segment as read-only.
439 *
440 * This seems to match Ian Lance Taylor's description of the
441 * feature at http://www.airs.com/blog/archives/189.
442 *
443 * Extract:
444 * Note that the current dynamic linker code will only work
445 * correctly if the PT_GNU_RELRO segment starts on a page
446 * boundary. This is because the dynamic linker rounds the
447 * p_vaddr field down to the previous page boundary. If
448 * there is anything on the page which should not be read-only,
449 * the program is likely to fail at runtime. So in effect the
450 * linker must only emit a PT_GNU_RELRO segment if it ensures
451 * that it starts on a page boundary.
452 */
453 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
454 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
455
456 int ret = mprotect((void*)seg_page_start,
457 seg_page_end - seg_page_start,
458 prot_flags);
459 if (ret < 0) {
460 return -1;
461 }
462 }
463 return 0;
464 }
465
466 /* Apply GNU relro protection if specified by the program header. This will
467 * turn some of the pages of a writable PT_LOAD segment to read-only, as
468 * specified by one or more PT_GNU_RELRO segments. This must be always
469 * performed after relocations.
470 *
471 * The areas typically covered are .got and .data.rel.ro, these are
472 * read-only from the program's POV, but contain absolute addresses
473 * that need to be relocated before use.
474 *
475 * Input:
476 * phdr_table -> program header table
477 * phdr_count -> number of entries in tables
478 * load_bias -> load bias
479 * Return:
480 * 0 on error, -1 on failure (error code in errno).
481 */
482 int
phdr_table_protect_gnu_relro(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)483 phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
484 int phdr_count,
485 Elf32_Addr load_bias)
486 {
487 return _phdr_table_set_gnu_relro_prot(phdr_table,
488 phdr_count,
489 load_bias,
490 PROT_READ);
491 }
492
493 #ifdef ANDROID_ARM_LINKER
494
495 # ifndef PT_ARM_EXIDX
496 # define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
497 # endif
498
499 /* Return the address and size of the .ARM.exidx section in memory,
500 * if present.
501 *
502 * Input:
503 * phdr_table -> program header table
504 * phdr_count -> number of entries in tables
505 * load_bias -> load bias
506 * Output:
507 * arm_exidx -> address of table in memory (NULL on failure).
508 * arm_exidx_count -> number of items in table (0 on failure).
509 * Return:
510 * 0 on error, -1 on failure (_no_ error code in errno)
511 */
512 int
phdr_table_get_arm_exidx(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,Elf32_Addr ** arm_exidx,unsigned * arm_exidx_count)513 phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
514 int phdr_count,
515 Elf32_Addr load_bias,
516 Elf32_Addr** arm_exidx,
517 unsigned* arm_exidx_count)
518 {
519 const Elf32_Phdr* phdr = phdr_table;
520 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
521
522 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
523 if (phdr->p_type != PT_ARM_EXIDX)
524 continue;
525
526 *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
527 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
528 return 0;
529 }
530 *arm_exidx = NULL;
531 *arm_exidx_count = 0;
532 return -1;
533 }
534 #endif /* ANDROID_ARM_LINKER */
535
536 /* Return the address and size of the ELF file's .dynamic section in memory,
537 * or NULL if missing.
538 *
539 * Input:
540 * phdr_table -> program header table
541 * phdr_count -> number of entries in tables
542 * load_bias -> load bias
543 * Output:
544 * dynamic -> address of table in memory (NULL on failure).
545 * dynamic_count -> number of items in table (0 on failure).
546 * Return:
547 * void
548 */
549 void
phdr_table_get_dynamic_section(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias,Elf32_Addr ** dynamic,size_t * dynamic_count)550 phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
551 int phdr_count,
552 Elf32_Addr load_bias,
553 Elf32_Addr** dynamic,
554 size_t* dynamic_count)
555 {
556 const Elf32_Phdr* phdr = phdr_table;
557 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
558
559 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
560 if (phdr->p_type != PT_DYNAMIC) {
561 continue;
562 }
563
564 *dynamic = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
565 if (dynamic_count) {
566 *dynamic_count = (unsigned)(phdr->p_memsz / 8);
567 }
568 return;
569 }
570 *dynamic = NULL;
571 if (dynamic_count) {
572 *dynamic_count = 0;
573 }
574 }
575
576 /* Return the address of the program header table as it appears in the loaded
577 * segments in memory. This is in contrast with the input 'phdr_table' which
578 * is temporary and will be released before the library is relocated.
579 *
580 * Input:
581 * phdr_table -> program header table
582 * phdr_count -> number of entries in tables
583 * load_bias -> load bias
584 * Return:
585 * Address of loaded program header table on success (it has
586 * 'phdr_count' entries), or NULL on failure (no error code).
587 */
588 const Elf32_Phdr*
phdr_table_get_loaded_phdr(const Elf32_Phdr * phdr_table,int phdr_count,Elf32_Addr load_bias)589 phdr_table_get_loaded_phdr(const Elf32_Phdr* phdr_table,
590 int phdr_count,
591 Elf32_Addr load_bias)
592 {
593 const Elf32_Phdr* phdr = phdr_table;
594 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
595 Elf32_Addr loaded = 0;
596 Elf32_Addr loaded_end;
597
598 /* If there is a PT_PHDR, use it directly */
599 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
600 if (phdr->p_type == PT_PHDR) {
601 loaded = load_bias + phdr->p_vaddr;
602 goto CHECK;
603 }
604 }
605
606 /* Otherwise, check the first loadable segment. If its file offset
607 * is 0, it starts with the ELF header, and we can trivially find the
608 * loaded program header from it. */
609 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
610 if (phdr->p_type == PT_LOAD) {
611 if (phdr->p_offset == 0) {
612 Elf32_Addr elf_addr = load_bias + phdr->p_vaddr;
613 const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
614 Elf32_Addr offset = ehdr->e_phoff;
615 loaded = (Elf32_Addr)ehdr + offset;
616 goto CHECK;
617 }
618 break;
619 }
620 }
621
622 /* We didn't find it, let the client know. He may be able to
623 * keep a copy of the input phdr_table instead. */
624 return NULL;
625
626 CHECK:
627 /* Ensure that our program header is actually within a loadable
628 * segment. This should help catch badly-formed ELF files that
629 * would cause the linker to crash later when trying to access it.
630 */
631 loaded_end = loaded + phdr_count*sizeof(Elf32_Phdr);
632
633 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
634 if (phdr->p_type != PT_LOAD)
635 continue;
636 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
637 Elf32_Addr seg_end = phdr->p_filesz + seg_start;
638
639 if (seg_start <= loaded && loaded_end <= seg_end) {
640 return (const Elf32_Phdr*)loaded;
641 }
642 }
643 return NULL;
644 }
645