/third_party/musl/ldso/ |
D | dlstart.c | 54 segs[0].p_vaddr = 0; in _dlstart_c() 62 dynv = (void *)(base + ph->p_vaddr); in _dlstart_c() 74 for (j=0; dyn[i]-segs[j].p_vaddr >= segs[j].p_memsz; j++); in _dlstart_c() 75 dyn[i] += segs[j].addr - segs[j].p_vaddr; in _dlstart_c() 85 for (j=0; rel[0]-segs[j].p_vaddr >= segs[j].p_memsz; j++); in _dlstart_c() 87 (rel[0] + segs[j].addr - segs[j].p_vaddr); in _dlstart_c() 90 - segs[rel_addr[1]].p_vaddr in _dlstart_c() 95 for (j=0; val-segs[j].p_vaddr >= segs[j].p_memsz; j++); in _dlstart_c() 96 *rel_addr = rel[2] + segs[j].addr - segs[j].p_vaddr + val; in _dlstart_c() 111 base = (size_t)dynv - ph->p_vaddr; in _dlstart_c()
|
D | dynlink.c | 174 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 175 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 183 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 190 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 558 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr); in reclaim_gaps() 559 reclaim(dso, ph->p_vaddr+ph->p_memsz, in reclaim_gaps() 560 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE); in reclaim_gaps() 663 dyn = ph->p_vaddr; in map_library() 665 tls_image = ph->p_vaddr; in map_library() 670 dso->relro_start = ph->p_vaddr & -PAGE_SIZE; in map_library() [all …]
|
/third_party/musl/src/internal/ |
D | fdpic_crt.h | 14 uintptr_t addr, p_vaddr, p_memsz; in __fdpic_fixup() member 19 while (*a-lm->segs[rseg].p_vaddr >= lm->segs[rseg].p_memsz) in __fdpic_fixup() 22 (*a + lm->segs[rseg].addr - lm->segs[rseg].p_vaddr); in __fdpic_fixup() 24 while (*r-lm->segs[vseg].p_vaddr >= lm->segs[vseg].p_memsz) in __fdpic_fixup() 26 *r += lm->segs[vseg].addr - lm->segs[vseg].p_vaddr; in __fdpic_fixup()
|
/third_party/libunwind/src/coredump/ |
D | _UCD_access_mem.c | 45 if (phdr->p_vaddr <= addr && addr_last < phdr->p_vaddr + phdr->p_memsz) in _UCD_access_mem() 58 if (addr_last >= phdr->p_vaddr + phdr->p_filesz) in _UCD_access_mem() 70 fileofs = addr - phdr->p_vaddr; in _UCD_access_mem() 76 fileofs = phdr->p_offset + (addr - phdr->p_vaddr); in _UCD_access_mem()
|
D | _UCD_create.c | 139 cur->p_vaddr = hdr64.p_vaddr ; in _UCD_create() 165 cur->p_vaddr = hdr32.p_vaddr ; in _UCD_create() 197 (unsigned long long) cur->p_vaddr, in _UCD_create() 323 if (phdr->p_vaddr != vaddr) in _UCD_add_backing_file_at_vaddr()
|
D | _UCD_elf_map_image.c | 95 if (phdr->p_vaddr <= ip && ip < phdr->p_vaddr + phdr->p_memsz) in _UCD_get_elf_image()
|
D | _UCD_get_mapinfo_linux.c | 87 && maps[i].start >= ui->phdrs[p].p_vaddr in _handle_file_note() 88 && maps[i].end <= ui->phdrs[p].p_vaddr + ui->phdrs[p].p_filesz) in _handle_file_note()
|
/third_party/libunwind/src/dwarf/ |
D | Gfind_unwind_table.c | 117 ei->load_bias = phdr[i].p_vaddr - phdr[i].p_offset; in dwarf_find_unwind_table() 121 if (phdr[i].p_vaddr < start_ip) in dwarf_find_unwind_table() 122 start_ip = phdr[i].p_vaddr; in dwarf_find_unwind_table() 124 if (phdr[i].p_vaddr + phdr[i].p_memsz > end_ip) in dwarf_find_unwind_table() 125 end_ip = phdr[i].p_vaddr + phdr[i].p_memsz; in dwarf_find_unwind_table() 132 ei->load_offset = phdr[i].p_vaddr - (phdr[i].p_offset & (~pagesize_alignment_mask)); in dwarf_find_unwind_table() 196 load_base = segbase - (ptxt->p_vaddr & (-PAGE_SIZE)); in dwarf_find_unwind_table() 298 edi->di_cache.u.rti.table_data = ((load_base + peh_hdr->p_vaddr) in dwarf_find_unwind_table() 305 edi->di_cache.u.rti.segbase = ((load_base + peh_hdr->p_vaddr) in dwarf_find_unwind_table() 319 edi->di_arm.u.rti.table_data = load_base + param_exidx->p_vaddr; in dwarf_find_unwind_table()
|
/third_party/libunwind/src/ia64/ |
D | Gtables.c | 499 Elf64_Addr vaddr = phdr->p_vaddr + load_base; in callback() 512 if (likely (p_unwind->p_vaddr >= p_text->p_vaddr in callback() 513 && p_unwind->p_vaddr < p_text->p_vaddr + p_text->p_memsz)) in callback() 515 segbase = p_text->p_vaddr + load_base; in callback() 523 if (phdr->p_type == PT_LOAD && p_unwind->p_vaddr >= phdr->p_vaddr in callback() 524 && p_unwind->p_vaddr < phdr->p_vaddr + phdr->p_memsz) in callback() 526 segbase = phdr->p_vaddr + load_base; in callback() 536 Elf64_Dyn *dyn = (Elf64_Dyn *)(p_dynamic->p_vaddr + load_base); in callback() 550 di->start_ip = p_text->p_vaddr + load_base; in callback() 551 di->end_ip = p_text->p_vaddr + load_base + p_text->p_memsz; in callback() [all …]
|
D | Gfind_unwind_table.c | 135 edi->di_cache.gp = find_gp (edi, pdyn, segbase - ptxt->p_vaddr); in ia64_find_unwind_table() 141 ((char *) edi->ei.image + (punw->p_vaddr - ptxt->p_vaddr)); in ia64_find_unwind_table()
|
/third_party/musl/porting/liteos_a/user/ldso/ |
D | dynlink.c | 172 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 173 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 181 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 188 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 556 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr); in reclaim_gaps() 557 reclaim(dso, ph->p_vaddr+ph->p_memsz, in reclaim_gaps() 558 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE); in reclaim_gaps() 661 dyn = ph->p_vaddr; in map_library() 663 tls_image = ph->p_vaddr; in map_library() 668 dso->relro_start = ph->p_vaddr & -PAGE_SIZE; in map_library() [all …]
|
/third_party/musl/porting/liteos_a/user_debug/ldso/ |
D | dynlink.c | 172 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 173 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 181 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 188 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 556 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr); in reclaim_gaps() 557 reclaim(dso, ph->p_vaddr+ph->p_memsz, in reclaim_gaps() 558 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE); in reclaim_gaps() 661 dyn = ph->p_vaddr; in map_library() 663 tls_image = ph->p_vaddr; in map_library() 668 dso->relro_start = ph->p_vaddr & -PAGE_SIZE; in map_library() [all …]
|
/third_party/musl/porting/linux/user/ldso/ |
D | dynlink.c | 367 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 368 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 376 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 383 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 1196 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr); in reclaim_gaps() 1197 reclaim(dso, ph->p_vaddr+ph->p_memsz, in reclaim_gaps() 1198 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE); in reclaim_gaps() 1399 dyn = ph->p_vaddr; in map_library() 1401 tls_image = ph->p_vaddr; in map_library() 1406 dso->relro_start = ph->p_vaddr & -PAGE_SIZE; in map_library() [all …]
|
/third_party/elfutils/tests/ |
D | dwfl-report-segment-contiguous.c | 51 .p_vaddr = 0xf00, in main() 68 .p_vaddr = 0x1000, in main()
|
/third_party/elfutils/libdwfl/ |
D | dwfl_report_elf.c | 188 vaddr = ph->p_vaddr & -ph->p_align; in __libdwfl_elf_address_range() 189 address_sync = ph->p_vaddr + ph->p_memsz; in __libdwfl_elf_address_range() 210 && ph->p_vaddr + ph->p_memsz > 0) in __libdwfl_elf_address_range() 212 end = bias + (ph->p_vaddr + ph->p_memsz); in __libdwfl_elf_address_range()
|
D | elf-from-memory.c | 245 GElf_Addr vaddr = class32 ? (*p32)[i].p_vaddr : (*p64)[i].p_vaddr; in elf_from_remote_memory() 302 GElf_Addr vaddr = class32 ? (*p32)[i].p_vaddr : (*p64)[i].p_vaddr; in elf_from_remote_memory()
|
D | core-file.c | 249 *pend_vaddr = (pphdr->p_vaddr + pphdr->p_memsz + align - 1) & -align; in update_end() 270 || pphdr->p_vaddr > *pend_vaddr) in do_more() 308 || ((phdr.p_vaddr + phdr.p_memsz + align - 1) & -align) <= vaddr); in dwfl_elf_phdr_memory_callback() 310 GElf_Off start = vaddr - phdr.p_vaddr + phdr.p_offset; in dwfl_elf_phdr_memory_callback() 440 *vaddrp = phdr->p_vaddr; in __libdwfl_dynamic_vaddr_get()
|
/third_party/rust/crates/rustix/src/backend/linux_raw/runtime/ |
D | tls.rs | 26 PT_PHDR => base = phdrs.as_ptr().cast::<u8>().offset(-(phdr.p_vaddr as isize)), in startup_tls_info() 34 addr: base.cast::<u8>().add((*tls_phdr).p_vaddr).cast(), in startup_tls_info()
|
/third_party/musl/src/env/ |
D | __init_tls.c | 93 base = aux[AT_PHDR] - phdr->p_vaddr; in static_init_tls() 95 base = (size_t)_DYNAMIC - phdr->p_vaddr; in static_init_tls() 106 main_tls.image = (void *)(base + tls_phdr->p_vaddr); in static_init_tls()
|
/third_party/musl/porting/linux/user/src/env/ |
D | __init_tls.c | 97 base = aux[AT_PHDR] - phdr->p_vaddr; in static_init_tls() 99 base = (size_t)_DYNAMIC - phdr->p_vaddr; in static_init_tls() 110 main_tls.image = (void *)(base + tls_phdr->p_vaddr); in static_init_tls()
|
/third_party/musl/src/ldso/ |
D | dl_iterate_phdr.c | 25 base = aux[AT_PHDR] - phdr->p_vaddr; in static_dl_iterate_phdr() 27 base = (size_t)_DYNAMIC - phdr->p_vaddr; in static_dl_iterate_phdr()
|
/third_party/musl/porting/liteos_a/user/src/env/ |
D | __init_tls.c | 125 base = aux[AT_PHDR] - phdr->p_vaddr; in static_init_tls() 127 base = (size_t)_DYNAMIC - phdr->p_vaddr; in static_init_tls() 138 main_tls.image = (void *)(base + tls_phdr->p_vaddr); in static_init_tls()
|
/third_party/mesa3d/src/util/ |
D | build_id.c | 66 map_start = (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); in build_id_find_nhdr_callback() 79 info->dlpi_phdr[i].p_vaddr); in build_id_find_nhdr_callback()
|
/third_party/elfutils/libelf/ |
D | gelf_update_phdr.c | 64 || unlikely (src->p_vaddr > 0xffffffffull) in gelf_update_phdr() 100 COPY (p_vaddr); in gelf_update_phdr()
|
D | elf32_getphdr.c | 162 CONVERT_TO (phdr[cnt].p_vaddr, notcvt[cnt].p_vaddr); in ElfW2() 212 CONVERT (phdr[cnt].p_vaddr); in ElfW2()
|