1 #define _GNU_SOURCE
2 #define SYSCALL_NO_TLS 1
3
4 #include "dynlink.h"
5
6 #include <stdbool.h>
7 #include <stdlib.h>
8 #include <stdarg.h>
9 #include <stddef.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <elf.h>
14 #include <sys/mman.h>
15 #include <limits.h>
16 #include <fcntl.h>
17 #include <sys/stat.h>
18 #include <errno.h>
19 #include <link.h>
20 #include <setjmp.h>
21 #include <pthread.h>
22 #include <ctype.h>
23 #include <dlfcn.h>
24 #include <semaphore.h>
25 #include <sys/membarrier.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <sys/prctl.h>
29 #include <sys/queue.h>
30
31 #include "cfi.h"
32 #include "dlfcn_ext.h"
33 #include "dynlink_rand.h"
34 #include "ld_log.h"
35 #include "libc.h"
36 #include "musl_fdsan.h"
37 #include "namespace.h"
38 #include "ns_config.h"
39 #include "pthread_impl.h"
40 #include "fork_impl.h"
41 #include "strops.h"
42 #include "trace/trace_marker.h"
43 #include "info/device_api_version.h"
44
45 #ifdef IS_ASAN
46 #if defined (__arm__)
47 #define LIB "/lib/"
48 #elif defined (__aarch64__)
49 #define LIB "/lib64/"
50 #else
51 #error "unsupported arch"
52 #endif
53 #define CHIP_PROD_ETC "/etc/"
54 #endif
55
56 #ifdef OHOS_ENABLE_PARAMETER
57 #include "sys_param.h"
58 #endif
59 #ifdef LOAD_ORDER_RANDOMIZATION
60 #include "zip_archive.h"
61 #endif
62
63 static size_t ldso_page_size;
64 #ifndef PAGE_SIZE
65 #define PAGE_SIZE ldso_page_size
66 #endif
67
68 #define malloc __libc_malloc
69 #define calloc __libc_calloc
70 #define realloc __libc_realloc
71 #define free __libc_free
72
73 static void error_impl(const char *, ...);
74 static void error_noop(const char *, ...);
75 static void (*error)(const char *, ...) = error_noop;
76
77 #define MAXP2(a,b) (-(-(a)&-(b)))
78 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
79 #define GNU_HASH_FILTER(ght, ghm, gho) \
80 const size_t *bloomwords = (const void *)(ght + 4); \
81 size_t f = bloomwords[gho & (ght[2] - 1)]; \
82 if (!(f & ghm)) continue; \
83 f >>= (gh >> ght[3]) % (8 * sizeof f); \
84 if (!(f & 1)) continue;
85
86 #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m)))
87 #define countof(a) ((sizeof (a))/(sizeof (a)[0]))
88 #define DSO_FLAGS_NODELETE 0x1
89
90 #ifdef HANDLE_RANDOMIZATION
91 #define NEXT_DYNAMIC_INDEX 2
92 #define MIN_DEPS_COUNT 2
93 #define NAME_INDEX_ZERO 0
94 #define NAME_INDEX_ONE 1
95 #define NAME_INDEX_TWO 2
96 #define NAME_INDEX_THREE 3
97 #define TLS_CNT_INCREASE 3
98 #define INVALID_FD_INHIBIT_FURTHER_SEARCH (-2)
99 #endif
100
101 #define MAP_XPM 0x40
102 #define PARENTS_BASE_CAPACITY 8
103 #define RELOC_CAN_SEARCH_DSO_BASE_CAPACITY 32
104 #define ANON_NAME_MAX_LEN 70
105
106 #define KPMD_SIZE (1UL << 21)
107 #define HUGEPAGES_SUPPORTED_STR_SIZE (32)
108
109 #ifdef UNIT_TEST_STATIC
110 #define UT_STATIC
111 #else
112 #define UT_STATIC static
113 #endif
114
115 /* Used for dlclose */
116 #define UNLOAD_NR_DLOPEN_CHECK 1
117 #define UNLOAD_COMMON_CHECK 2
118 #define UNLOAD_ALL_CHECK 3
119 struct dso_entry {
120 struct dso *dso;
121 TAILQ_ENTRY(dso_entry) entries;
122 };
123
124 struct debug {
125 int ver;
126 void *head;
127 void (*bp)(void);
128 int state;
129 void *base;
130 };
131
132 struct reserved_address_params {
133 void* start_addr;
134 size_t reserved_size;
135 bool must_use_reserved;
136 bool reserved_address_recursive;
137 #ifdef LOAD_ORDER_RANDOMIZATION
138 struct dso *target;
139 #endif
140 };
141
142 typedef void (*stage3_func)(size_t *, size_t *, size_t *);
143
144 static struct builtin_tls {
145 char c[8];
146 struct pthread pt;
147 void *space[16];
148 } builtin_tls[1];
149 #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
150
151 #define ADDEND_LIMIT 4096
152 static size_t *saved_addends, *apply_addends_to;
153 static bool g_is_asan;
154 static struct dso ldso;
155 static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head;
156 static struct dso_debug_info *debug_tail = NULL;
157 static char *env_path, *sys_path;
158 static unsigned long long gencnt;
159 static unsigned long long subcnt;
160 static int runtime;
161 static int ldd_mode;
162 static int ldso_fail;
163 static int noload;
164 static int shutting_down;
165 static jmp_buf *rtld_fail;
166 static pthread_rwlock_t lock;
167 static pthread_mutex_t dlclose_lock = {{{ PTHREAD_MUTEX_RECURSIVE }}}; // set mutex type to PTHREAD_MUTEX_RECURSIVE
168 static struct debug debug;
169 static struct tls_module *tls_tail;
170 static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
171 static size_t static_tls_cnt;
172 static pthread_mutex_t init_fini_lock;
173 static pthread_mutex_t dl_phdr_lock;
174 static pthread_cond_t ctor_cond;
175 static struct dso *builtin_deps[2];
176 static struct dso *const no_deps[1];
177 static struct dso *builtin_ctor_queue[4];
178 static struct dso **main_ctor_queue;
179 static struct fdpic_loadmap *app_loadmap;
180 static struct fdpic_dummy_loadmap app_dummy_loadmap;
181
182 struct debug *_dl_debug_addr = &debug;
183
184 extern weak hidden char __ehdr_start[];
185
186 extern hidden int __malloc_replaced;
187
188 hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
189
190 extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void);
191
192 #ifdef USE_GWP_ASAN
193 extern bool init_gwp_asan_by_libc(bool force_init);
194 #endif
195
196 weak_alias(__init_array_start, __init_array_end);
197 weak_alias(__fini_array_start, __fini_array_end);
198 #ifdef DFX_SIGNAL_LIBC
__InstallSignalHandler()199 UT_STATIC void __InstallSignalHandler()
200 {
201 }
202 weak_alias(__InstallSignalHandler, DFX_InstallSignalHandler);
203 #endif
204
205 #ifdef HANDLE_RANDOMIZATION
206 static int do_dlclose(struct dso *p, bool check_deps_all);
207 #endif
208
209 #ifdef LOAD_ORDER_RANDOMIZATION
210 static bool task_check_xpm(struct loadtask *task);
211 static bool map_library_header(struct loadtask *task);
212 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params);
213 static bool resolve_fd_to_realpath(struct loadtask *task);
214 static bool load_library_header(struct loadtask *task);
215 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params);
216 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks);
217 static void unmap_preloaded_sections(struct loadtasks *tasks);
218 static void preload_deps(struct dso *p, struct loadtasks *tasks);
219 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params);
220 UT_STATIC void assign_tls(struct dso *p);
221 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks);
222 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info);
223 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task, struct zip_info *z_info);
224 #endif
225
226 extern int __close(int fd);
227
228 /* Sharing relro */
229 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset);
230
231 /* asan path open */
232 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size);
233
234 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size);
235
236 static void find_and_set_bss_name(struct dso *p);
237
238 /* lldb debug function */
239 static void sync_with_debugger();
240 static void notify_addition_to_debugger(struct dso *p);
241 static void notify_remove_to_debugger(struct dso *p);
242 static void add_dso_info_to_debug_map(struct dso *p);
243 static void remove_dso_info_from_debug_map(struct dso *p);
244
245 /* add namespace function */
246 static void get_sys_path(ns_configor *conf);
247 static void dlclose_ns(struct dso *p);
248
249 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
250 struct gnu_property {
251 uint32_t pr_type;
252 uint32_t pr_datasz;
253 };
254
255 /* Security enhancement: add BTI releated constant*/
256 #define GNU_PROPERTY_TYPE_0_NAME "GNU"
257 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
258 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
259 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
260
261 #define ELF_GNU_PROPERTY_ALIGN 8
262 #define BUF_MAX 0x400
263
264 /* Security enhancement: Traverse PT_GNU_PROPERTY and PT_NOTE in the ELF to check
265 * whether GNU_PROPERTY_AARCH64_FEATURE_1_BTI exists.
266 * If so PROT_BTI is returned.
267 *
268 * If new protection is needed, please add in here */
parse_elf_property(uint32_t type,const char * data)269 static uint32_t parse_elf_property(uint32_t type, const char* data)
270 {
271 uint32_t prot = 0;
272 if (type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
273 const uint32_t *p = (const uint32_t *)data;
274 if ((*p & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) {
275 prot |= PROT_BTI;
276 }
277 }
278 return prot;
279 }
280
parse_prot(const char * data,ssize_t * off,ssize_t datasz)281 static uint32_t parse_prot(const char *data, ssize_t *off, ssize_t datasz)
282 {
283 ssize_t o;
284 const struct gnu_property *pr;
285 uint32_t ret;
286 o = *off;
287 ssize_t sz = datasz - *off;
288
289 if (sz < sizeof(*pr))
290 return 0;
291 pr = (const struct gnu_property *)(data + o);
292 o += sizeof(*pr);
293 if (pr->pr_datasz > sz)
294 return 0;
295 ret = parse_elf_property(pr->pr_type, data + o);
296 return ret;
297 }
298
parse_extra_prot_fd(int fd,Phdr * ph)299 static unsigned parse_extra_prot_fd(int fd, Phdr *ph)
300 {
301 union {
302 Elf64_Nhdr nh;
303 char data[BUF_MAX];
304 } gnu_data;
305 ssize_t sz = ph->p_filesz > BUF_MAX ? BUF_MAX : ph->p_filesz;
306 ssize_t len = pread(fd, gnu_data.data, sz, ph->p_offset);
307 if (len < 0) return 0;
308 if ((gnu_data.nh.n_type != NT_GNU_PROPERTY_TYPE_0) || (gnu_data.nh.n_namesz != NOTE_NAME_SZ)) {
309 return 0;
310 }
311
312 ssize_t off_gp = (sizeof(gnu_data.nh)) + NOTE_NAME_SZ;
313 off_gp = (off_gp + ELF_GNU_PROPERTY_ALIGN - 1) & (-(ELF_GNU_PROPERTY_ALIGN));
314 ssize_t datasz_gp = off_gp + gnu_data.nh.n_descsz;
315 datasz_gp = datasz_gp > BUF_MAX ? BUF_MAX : datasz_gp;
316 return parse_prot((char *)gnu_data.data, &off_gp, datasz_gp);
317 }
318 #endif
319
get_app_path(char * path,size_t size)320 static bool get_app_path(char *path, size_t size)
321 {
322 int l = 0;
323 l = readlink("/proc/self/exe", path, size);
324 if (l < 0 || l >= size) {
325 LD_LOGD("get_app_path readlink failed!");
326 return false;
327 }
328 path[l] = 0;
329 LD_LOGD("get_app_path path:%{public}s.", path);
330 return true;
331 }
332
init_default_namespace(struct dso * app)333 static void init_default_namespace(struct dso *app)
334 {
335 ns_t *default_ns = get_default_ns();
336 memset(default_ns, 0, sizeof *default_ns);
337 ns_set_name(default_ns, NS_DEFAULT_NAME);
338 if (env_path) ns_set_env_paths(default_ns, env_path);
339 ns_set_lib_paths(default_ns, sys_path);
340 ns_set_separated(default_ns, false);
341 app->namespace = default_ns;
342 ns_add_dso(default_ns, app);
343 LD_LOGD("init_default_namespace default_namespace:"
344 "nsname: default ,"
345 "lib_paths:%{public}s ,"
346 "env_path:%{public}s ,"
347 "separated: false.",
348 sys_path, env_path);
349 return;
350 }
351
set_ns_attrs(ns_t * ns,ns_configor * conf)352 UT_STATIC void set_ns_attrs(ns_t *ns, ns_configor *conf)
353 {
354 if (!ns || !conf) {
355 return;
356 }
357
358 char *lib_paths, *asan_lib_paths, *permitted_paths, *asan_permitted_paths, *allowed_libs;
359
360 ns_set_separated(ns, conf->get_separated(ns->ns_name));
361
362 lib_paths = conf->get_lib_paths(ns->ns_name);
363 if (lib_paths) ns_set_lib_paths(ns, lib_paths);
364
365 asan_lib_paths = conf->get_asan_lib_paths(ns->ns_name);
366 if (asan_lib_paths) ns_set_asan_lib_paths(ns, asan_lib_paths);
367
368 permitted_paths = conf->get_permitted_paths(ns->ns_name);
369 if (permitted_paths) ns_set_permitted_paths(ns, permitted_paths);
370
371 asan_permitted_paths = conf->get_asan_permitted_paths(ns->ns_name);
372 if (asan_permitted_paths) ns_set_asan_permitted_paths(ns, asan_permitted_paths);
373
374 allowed_libs = conf->get_allowed_libs(ns->ns_name);
375 if (allowed_libs) ns_set_allowed_libs(ns, allowed_libs);
376
377 LD_LOGD("set_ns_attrs :"
378 "ns_name: %{public}s ,"
379 "separated:%{public}d ,"
380 "lib_paths:%{public}s ,"
381 "asan_lib_paths:%{public}s ,"
382 "permitted_paths:%{public}s ,"
383 "asan_permitted_paths:%{public}s ,"
384 "allowed_libs: %{public}s .",
385 ns->ns_name, ns->separated, ns->lib_paths, ns->asan_lib_paths, permitted_paths,
386 asan_permitted_paths, allowed_libs);
387 }
388
set_ns_inherits(ns_t * ns,ns_configor * conf)389 UT_STATIC void set_ns_inherits(ns_t *ns, ns_configor *conf)
390 {
391 if (!ns || !conf) {
392 return;
393 }
394
395 strlist *inherits = conf->get_inherits(ns->ns_name);
396 if (inherits) {
397 for (size_t i = 0; i < inherits->num; i++) {
398 ns_t *inherited_ns = find_ns_by_name(inherits->strs[i]);
399 if (inherited_ns) {
400 char *shared_libs = conf->get_inherit_shared_libs(ns->ns_name, inherited_ns->ns_name);
401 ns_add_inherit(ns, inherited_ns, shared_libs);
402 LD_LOGD("set_ns_inherits :"
403 "ns_name: %{public}s ,"
404 "separated:%{public}d ,"
405 "lib_paths:%{public}s ,"
406 "asan_lib_paths:%{public}s ,",
407 inherited_ns->ns_name, inherited_ns->separated, inherited_ns->lib_paths,
408 inherited_ns->asan_lib_paths);
409 }
410 }
411 strlist_free(inherits);
412 } else {
413 LD_LOGD("set_ns_inherits inherits is NULL!");
414 }
415 }
416
init_namespace(struct dso * app)417 static void init_namespace(struct dso *app)
418 {
419 char app_path[PATH_MAX + 1];
420 if (!get_app_path(app_path, sizeof app_path)) {
421 strcpy(app_path, app->name);
422 }
423 char *t = strrchr(app_path, '/');
424 if (t) {
425 *t = 0;
426 } else {
427 app_path[0] = '.';
428 app_path[1] = 0;
429 }
430
431 nslist *nsl = nslist_init();
432 ns_configor *conf = configor_init();
433 char file_path[sizeof "/etc/ld-musl-namespace-" + sizeof (LDSO_ARCH) + sizeof ".ini" + 1] = {0};
434 (void)snprintf(file_path, sizeof file_path, "/etc/ld-musl-namespace-%s.ini", LDSO_ARCH);
435 LD_LOGI("init_namespace file_path:%{public}s", file_path);
436 trace_marker_reset();
437 trace_marker_begin(HITRACE_TAG_MUSL, "parse linker config", file_path);
438 int ret = conf->parse(file_path, app_path);
439 if (ret < 0) {
440 LD_LOGE("init_namespace ini file parse failed!");
441 /* Init_default_namespace is required even if the ini file parsing fails */
442 if (!sys_path) get_sys_path(conf);
443 init_default_namespace(app);
444 configor_free();
445 trace_marker_end(HITRACE_TAG_MUSL);
446 return;
447 }
448
449 /* sys_path needs to be parsed through ini file */
450 if (!sys_path) get_sys_path(conf);
451 init_default_namespace(app);
452
453 /* Init default namespace */
454 ns_t *d_ns = get_default_ns();
455 set_ns_attrs(d_ns, conf);
456
457 /* Init other namespace */
458 if (!nsl) {
459 LD_LOGE("init nslist fail!");
460 configor_free();
461 trace_marker_end(HITRACE_TAG_MUSL);
462 return;
463 }
464 strlist *s_ns = conf->get_namespaces();
465 if (s_ns) {
466 for (size_t i = 0; i < s_ns->num; i++) {
467 ns_t *ns = ns_alloc();
468 ns_set_name(ns, s_ns->strs[i]);
469 set_ns_attrs(ns, conf);
470 ns_add_dso(ns, app);
471 nslist_add_ns(ns);
472 }
473 strlist_free(s_ns);
474 }
475 /* Set inherited namespace */
476 set_ns_inherits(d_ns, conf);
477 for (size_t i = 0; i < nsl->num; i++) {
478 set_ns_inherits(nsl->nss[i], conf);
479 }
480 configor_free();
481 trace_marker_end(HITRACE_TAG_MUSL);
482 return;
483 }
484
485 /* Compute load address for a virtual address in a given dso. */
486 #if DL_FDPIC
laddr(const struct dso * p,size_t v)487 void *laddr(const struct dso *p, size_t v)
488 {
489 size_t j=0;
490 if (!p->loadmap) return p->base + v;
491 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
492 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
493 }
laddr_pg(const struct dso * p,size_t v)494 static void *laddr_pg(const struct dso *p, size_t v)
495 {
496 size_t j=0;
497 size_t pgsz = PAGE_SIZE;
498 if (!p->loadmap) return p->base + v;
499 for (j=0; ; j++) {
500 size_t a = p->loadmap->segs[j].p_vaddr;
501 size_t b = a + p->loadmap->segs[j].p_memsz;
502 a &= -pgsz;
503 b += pgsz-1;
504 b &= -pgsz;
505 if (v-a<b-a) break;
506 }
507 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
508 }
fdbarrier(void * p)509 static void (*fdbarrier(void *p))()
510 {
511 void (*fd)();
512 __asm__("" : "=r"(fd) : "0"(p));
513 return fd;
514 }
515 #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
516 laddr(p, v), (p)->got }))
517 #else
518 #define laddr(p, v) (void *)((p)->base + (v))
519 #define laddr_pg(p, v) laddr(p, v)
520 #define fpaddr(p, v) ((void (*)())laddr(p, v))
521 #endif
522
decode_vec(size_t * v,size_t * a,size_t cnt)523 static void decode_vec(size_t *v, size_t *a, size_t cnt)
524 {
525 size_t i;
526 for (i=0; i<cnt; i++) a[i] = 0;
527 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
528 if (v[0] < 8 * sizeof(long)) {
529 a[0] |= 1UL<<v[0];
530 }
531 a[v[0]] = v[1];
532 }
533 }
534
search_vec(size_t * v,size_t * r,size_t key)535 static int search_vec(size_t *v, size_t *r, size_t key)
536 {
537 for (; v[0]!=key; v+=2)
538 if (!v[0]) return 0;
539 *r = v[1];
540 return 1;
541 }
542
check_vna_hash(Verdef * def,int16_t vsym,uint32_t vna_hash)543 UT_STATIC int check_vna_hash(Verdef *def, int16_t vsym, uint32_t vna_hash)
544 {
545 int matched = 0;
546
547 vsym &= 0x7fff;
548 Verdef *verdef = def;
549 for (;;) {
550 if ((verdef->vd_ndx & 0x7fff) == vsym) {
551 if (vna_hash == verdef->vd_hash) {
552 matched = 1;
553 }
554 break;
555 }
556 if (matched) {
557 break;
558 }
559 if (verdef->vd_next == 0) {
560 break;
561 }
562 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
563 }
564 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
565 if (!matched) {
566 LD_LOGD("check_vna_hash no matched found. vsym=%{public}d vna_hash=%{public}x", vsym, vna_hash);
567 }
568 #endif
569 return matched;
570 }
571
check_verinfo(Verdef * def,int16_t * versym,uint32_t index,struct verinfo * verinfo,char * strings)572 UT_STATIC int check_verinfo(Verdef *def, int16_t *versym, uint32_t index, struct verinfo *verinfo, char *strings)
573 {
574 /* if the versym and verinfo is null , then not need version. */
575 if (!versym || !def) {
576 if (strlen(verinfo->v) == 0) {
577 return 1;
578 } else {
579 LD_LOGD("check_verinfo versym or def is null and verinfo->v exist, s:%{public}s v:%{public}s.",
580 verinfo->s, verinfo->v);
581 return 0;
582 }
583 }
584
585 int16_t vsym = versym[index];
586
587 /* find the verneed symbol. */
588 if (verinfo->use_vna_hash) {
589 if (vsym != VER_NDX_LOCAL && versym != (int16_t *)VER_NDX_GLOBAL) {
590 return check_vna_hash(def, vsym, verinfo->vna_hash);
591 }
592 }
593
594 /* if the version length is zero and vsym not less than zero, then library hava default version symbol. */
595 if (strlen(verinfo->v) == 0) {
596 if (vsym >= 0) {
597 return 1;
598 } else {
599 LD_LOGD("check_verinfo not default version. vsym:%{public}d s:%{public}s", vsym, verinfo->s);
600 return 0;
601 }
602 }
603
604 /* find the version of symbol. */
605 vsym &= 0x7fff;
606 for (;;) {
607 if (!(def->vd_flags & VER_FLG_BASE) && (def->vd_ndx & 0x7fff) == vsym) {
608 break;
609 }
610 if (def->vd_next == 0) {
611 return 0;
612 }
613 def = (Verdef *)((char *)def + def->vd_next);
614 }
615
616 Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux);
617
618 int ret = !strcmp(verinfo->v, strings + aux->vda_name);
619 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
620 if (!ret) {
621 LD_LOGD("check_verinfo version not match. s=%{public}s v=%{public}s vsym=%{public}d vda_name=%{public}s",
622 verinfo->s, verinfo->v, vsym, strings + aux->vda_name);
623 }
624 #endif
625 return ret;
626 }
627
sysv_hash(const char * s0)628 static struct sym_info_pair sysv_hash(const char *s0)
629 {
630 struct sym_info_pair s_info_p;
631 const unsigned char *s = (void *)s0;
632 uint_fast32_t h = 0;
633 while (*s) {
634 h = 16*h + *s++;
635 h ^= h>>24 & 0xf0;
636 }
637 s_info_p.sym_h = h & 0xfffffff;
638 s_info_p.sym_l = (char *)s - s0;
639 return s_info_p;
640 }
641
gnu_hash(const char * s0)642 struct sym_info_pair gnu_hash(const char *s0)
643 {
644 struct sym_info_pair s_info_p;
645 const unsigned char *s = (void *)s0;
646 uint_fast32_t h = 5381;
647 for (; *s; s++)
648 h += h*32 + *s;
649 s_info_p.sym_h = h;
650 s_info_p.sym_l = (char *)s - s0;
651 return s_info_p;
652 }
653
sysv_lookup(struct verinfo * verinfo,struct sym_info_pair s_info_p,struct dso * dso)654 static Sym *sysv_lookup(struct verinfo *verinfo, struct sym_info_pair s_info_p, struct dso *dso)
655 {
656 size_t i;
657 uint32_t h = s_info_p.sym_h;
658 Sym *syms = dso->syms;
659 Elf_Symndx *hashtab = dso->hashtab;
660 char *strings = dso->strings;
661 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
662 if ((!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
663 && (!memcmp(verinfo->s, strings+syms[i].st_name, s_info_p.sym_l))) {
664 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
665 continue;
666 }
667
668 return syms+i;
669 }
670
671 }
672 LD_LOGD("sysv_lookup not find the symbol, "
673 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
674 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
675 return 0;
676 }
677
gnu_lookup(struct sym_info_pair s_info_p,uint32_t * hashtab,struct dso * dso,struct verinfo * verinfo)678 static Sym *gnu_lookup(struct sym_info_pair s_info_p, uint32_t *hashtab, struct dso *dso, struct verinfo *verinfo)
679 {
680 uint32_t h1 = s_info_p.sym_h;
681 uint32_t nbuckets = hashtab[0];
682 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
683 uint32_t i = buckets[h1 % nbuckets];
684
685 if (!i) {
686 LD_LOGD("gnu_lookup symbol not found (bloom filter), so:%{public}s s:%{public}s", dso->name, verinfo->s);
687 return 0;
688 }
689
690 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
691
692 for (h1 |= 1; ; i++) {
693 uint32_t h2 = *hashval++;
694 if ((h1 == (h2|1)) && (!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
695 && !memcmp(verinfo->s, dso->strings + dso->syms[i].st_name, s_info_p.sym_l)) {
696 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
697 continue;
698 }
699
700 return dso->syms+i;
701 }
702
703 if (h2 & 1) break;
704 }
705
706 LD_LOGD("gnu_lookup symbol not found, "
707 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
708 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
709 return 0;
710 }
711
check_sym_accessible(struct dso * dso,ns_t * ns)712 static bool check_sym_accessible(struct dso *dso, ns_t *ns)
713 {
714 if (!dso || !dso->namespace || !ns) {
715 LD_LOGD("check_sym_accessible invalid parameter!");
716 return false;
717 }
718 if (dso->namespace == ns) {
719 return true;
720 }
721 for (int i = 0; i < dso->parents_count; i++) {
722 if (dso->parents[i]->namespace == ns) {
723 return true;
724 }
725 }
726 LD_LOGD(
727 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
728 return false;
729 }
730
is_dso_accessible(struct dso * dso,ns_t * ns)731 static inline bool is_dso_accessible(struct dso *dso, ns_t *ns)
732 {
733 if (dso->namespace == ns) {
734 return true;
735 }
736 for (int i = 0; i < dso->parents_count; i++) {
737 if (dso->parents[i]->namespace == ns) {
738 return true;
739 }
740 }
741 LD_LOGD(
742 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
743 return false;
744 }
745
find_dso_parent(struct dso * p,struct dso * target)746 static int find_dso_parent(struct dso *p, struct dso *target)
747 {
748 int index = -1;
749 for (int i = 0; i < p->parents_count; i++) {
750 if (p->parents[i] == target) {
751 index = i;
752 break;
753 }
754 }
755 return index;
756 }
757
add_dso_parent(struct dso * p,struct dso * parent)758 static void add_dso_parent(struct dso *p, struct dso *parent)
759 {
760 int index = find_dso_parent(p, parent);
761 if (index != -1) {
762 return;
763 }
764 if (p->parents_count + 1 > p->parents_capacity) {
765 if (p->parents_capacity == 0) {
766 p->parents = (struct dso **)malloc(sizeof(struct dso *) * PARENTS_BASE_CAPACITY);
767 if (!p->parents) {
768 return;
769 }
770 p->parents_capacity = PARENTS_BASE_CAPACITY;
771 } else {
772 struct dso ** realloced = (struct dso **)realloc(
773 p->parents, sizeof(struct dso *) * (p->parents_capacity + PARENTS_BASE_CAPACITY));
774 if (!realloced) {
775 return;
776 }
777 p->parents = realloced;
778 p->parents_capacity += PARENTS_BASE_CAPACITY;
779 }
780 }
781 p->parents[p->parents_count] = parent;
782 p->parents_count++;
783 }
784
remove_dso_parent(struct dso * p,struct dso * parent)785 static void remove_dso_parent(struct dso *p, struct dso *parent)
786 {
787 int index = find_dso_parent(p, parent);
788 if (index == -1) {
789 return;
790 }
791 int i;
792 for (i = 0; i < index; i++) {
793 p->parents[i] = p->parents[i];
794 }
795 for (i = index; i < p->parents_count - 1; i++) {
796 p->parents[i] = p->parents[i + 1];
797 }
798 p->parents_count--;
799 }
800
add_reloc_can_search_dso(struct dso * p,struct dso * can_search_so)801 static void add_reloc_can_search_dso(struct dso *p, struct dso *can_search_so)
802 {
803 if (p->reloc_can_search_dso_count + 1 > p->reloc_can_search_dso_capacity) {
804 if (p->reloc_can_search_dso_capacity == 0) {
805 p->reloc_can_search_dso_list =
806 (struct dso **)malloc(sizeof(struct dso *) * RELOC_CAN_SEARCH_DSO_BASE_CAPACITY);
807 if (!p->reloc_can_search_dso_list) {
808 return;
809 }
810 p->reloc_can_search_dso_capacity = RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
811 } else {
812 struct dso ** realloced = (struct dso **)realloc(
813 p->reloc_can_search_dso_list,
814 sizeof(struct dso *) * (p->reloc_can_search_dso_capacity + RELOC_CAN_SEARCH_DSO_BASE_CAPACITY));
815 if (!realloced) {
816 return;
817 }
818 p->reloc_can_search_dso_list = realloced;
819 p->reloc_can_search_dso_capacity += RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
820 }
821 }
822 p->reloc_can_search_dso_list[p->reloc_can_search_dso_count] = can_search_so;
823 p->reloc_can_search_dso_count++;
824 }
825
free_reloc_can_search_dso(struct dso * p)826 static void free_reloc_can_search_dso(struct dso *p)
827 {
828 if (p->reloc_can_search_dso_list) {
829 free(p->reloc_can_search_dso_list);
830 p->reloc_can_search_dso_list = NULL;
831 p->reloc_can_search_dso_count = 0;
832 p->reloc_can_search_dso_capacity = 0;
833 }
834 }
835
836 /* The list of so that can be accessed during relocation include:
837 * - The is_global flag of the so is true which means accessible by default.
838 * Global so includes exe, ld preload so and ldso.
839 * - We only check whether ns is accessible for the so if is_reloc_head_so_dep is true.
840 *
841 * How to set is_reloc_head_so_dep:
842 * When dlopen A, we set is_reloc_head_so_dep to true for
843 * all direct and indirect dependent sos of A, including A itself. */
add_can_search_so_list_in_dso(struct dso * dso_relocating,struct dso * start_check_dso)844 static void add_can_search_so_list_in_dso(struct dso *dso_relocating, struct dso *start_check_dso) {
845 struct dso *p = start_check_dso;
846 for (; p; p = p->syms_next) {
847 if (p->is_global) {
848 add_reloc_can_search_dso(dso_relocating, p);
849 continue;
850 }
851
852 if (p->is_reloc_head_so_dep) {
853 if (dso_relocating->namespace && check_sym_accessible(p, dso_relocating->namespace)) {
854 add_reloc_can_search_dso(dso_relocating, p);
855 }
856 }
857 }
858
859 return;
860 }
861
862 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
863 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
864
865 #ifndef ARCH_SYM_REJECT_UND
866 #define ARCH_SYM_REJECT_UND(s) 0
867 #endif
868
869 #if defined(__GNUC__)
870 __attribute__((always_inline))
871 #endif
872
find_sym_impl(struct dso * dso,struct verinfo * verinfo,struct sym_info_pair s_info_g,int need_def,ns_t * ns)873 struct symdef find_sym_impl(
874 struct dso *dso, struct verinfo *verinfo, struct sym_info_pair s_info_g, int need_def, ns_t *ns)
875 {
876 Sym *sym;
877 struct sym_info_pair s_info_s = {0, 0};
878 uint32_t *ght;
879 uint32_t gh = s_info_g.sym_h;
880 uint32_t gho = gh / (8 * sizeof(size_t));
881 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
882 struct symdef def = {0};
883 if (ns && !check_sym_accessible(dso, ns))
884 return def;
885
886 if ((ght = dso->ghashtab)) {
887 const size_t *bloomwords = (const void *)(ght + 4);
888 size_t f = bloomwords[gho & (ght[2] - 1)];
889 if (!(f & ghm))
890 return def;
891
892 f >>= (gh >> ght[3]) % (8 * sizeof f);
893 if (!(f & 1))
894 return def;
895
896 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
897 } else {
898 if (!s_info_s.sym_h)
899 s_info_s = sysv_hash(verinfo->s);
900
901 sym = sysv_lookup(verinfo, s_info_s, dso);
902 }
903
904 if (!sym)
905 return def;
906
907 if (!sym->st_shndx)
908 if (need_def || (sym->st_info & 0xf) == STT_TLS || ARCH_SYM_REJECT_UND(sym))
909 return def;
910
911 if (!sym->st_value)
912 if ((sym->st_info & 0xf) != STT_TLS)
913 return def;
914
915 if (!(1 << (sym->st_info & 0xf) & OK_TYPES))
916 return def;
917
918 if (!(1 << (sym->st_info >> 4) & OK_BINDS))
919 return def;
920
921 def.sym = sym;
922 def.dso = dso;
923 return def;
924 }
925
find_sym2(struct dso * dso,struct verinfo * verinfo,int need_def,int use_deps,ns_t * ns)926 static inline struct symdef find_sym2(struct dso *dso, struct verinfo *verinfo, int need_def, int use_deps, ns_t *ns)
927 {
928 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
929 struct sym_info_pair s_info_s = {0, 0};
930 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
931 size_t ghm = 1ul << gh % (8*sizeof(size_t));
932 struct symdef def = {0};
933 struct dso **deps = use_deps ? dso->deps : 0;
934 for (; dso; dso=use_deps ? *deps++ : dso->syms_next) {
935 Sym *sym;
936 // for ldso, app, preload so which is global, should be accessible in all exist namespaces
937 if (!dso->is_preload && ns && !check_sym_accessible(dso, ns)) {
938 continue;
939 }
940 if ((ght = dso->ghashtab)) {
941 GNU_HASH_FILTER(ght, ghm, gho)
942 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
943 } else {
944 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
945 sym = sysv_lookup(verinfo, s_info_s, dso);
946 }
947
948 if (!sym) continue;
949 if (!sym->st_shndx)
950 if (need_def || (sym->st_info&0xf) == STT_TLS
951 || ARCH_SYM_REJECT_UND(sym))
952 continue;
953 if (!sym->st_value)
954 if ((sym->st_info&0xf) != STT_TLS)
955 continue;
956 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
957 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
958 def.sym = sym;
959 def.dso = dso;
960 break;
961 }
962 return def;
963 }
964
find_sym_by_deps(struct dso * dso,struct verinfo * verinfo,int need_def,ns_t * ns)965 static inline struct symdef find_sym_by_deps(struct dso *dso, struct verinfo *verinfo, int need_def, ns_t *ns)
966 {
967 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
968 struct sym_info_pair s_info_s = {0, 0};
969 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
970 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
971 struct symdef def = {0};
972 struct dso **deps = dso->deps;
973 for (; dso; dso = *deps++) {
974 Sym *sym;
975 if (!is_dso_accessible(dso, ns)) {
976 continue;
977 }
978 if ((ght = dso->ghashtab)) {
979 GNU_HASH_FILTER(ght, ghm, gho)
980 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
981 } else {
982 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
983 sym = sysv_lookup(verinfo, s_info_s, dso);
984 }
985
986 if (!sym) continue;
987 if (!sym->st_shndx)
988 if (need_def || (sym->st_info&0xf) == STT_TLS
989 || ARCH_SYM_REJECT_UND(sym))
990 continue;
991 if (!sym->st_value)
992 if ((sym->st_info&0xf) != STT_TLS)
993 continue;
994 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
995 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
996 def.sym = sym;
997 def.dso = dso;
998 break;
999 }
1000 return def;
1001 }
1002
find_sym_by_saved_so_list(int sym_type,struct dso * dso,struct verinfo * verinfo,int need_def,struct dso * dso_relocating)1003 static inline struct symdef find_sym_by_saved_so_list(
1004 int sym_type, struct dso *dso, struct verinfo *verinfo, int need_def, struct dso *dso_relocating)
1005 {
1006 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
1007 struct sym_info_pair s_info_s = {0, 0};
1008 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
1009 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
1010 struct symdef def = {0};
1011 // skip head dso.
1012 int start_search_index = sym_type==REL_COPY ? 1 : 0;
1013 struct dso *dso_searching = 0;
1014 for (int i = start_search_index; i < dso_relocating->reloc_can_search_dso_count; i++) {
1015 dso_searching = dso_relocating->reloc_can_search_dso_list[i];
1016 Sym *sym;
1017 if ((ght = dso_searching->ghashtab)) {
1018 GNU_HASH_FILTER(ght, ghm, gho)
1019 sym = gnu_lookup(s_info_g, ght, dso_searching, verinfo);
1020 } else {
1021 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
1022 sym = sysv_lookup(verinfo, s_info_s, dso_searching);
1023 }
1024 if (!sym) continue;
1025 if (!sym->st_shndx)
1026 if (need_def || (sym->st_info&0xf) == STT_TLS
1027 || ARCH_SYM_REJECT_UND(sym))
1028 continue;
1029 if (!sym->st_value)
1030 if ((sym->st_info&0xf) != STT_TLS)
1031 continue;
1032 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
1033 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
1034 def.sym = sym;
1035 def.dso = dso_searching;
1036 break;
1037 }
1038 return def;
1039 }
1040
find_sym(struct dso * dso,const char * s,int need_def)1041 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
1042 {
1043 struct verinfo verinfo = { .s = s, .v = "", .use_vna_hash = false };
1044 return find_sym2(dso, &verinfo, need_def, 0, NULL);
1045 }
1046
get_vna_hash(struct dso * dso,int sym_index,uint32_t * vna_hash)1047 static bool get_vna_hash(struct dso *dso, int sym_index, uint32_t *vna_hash)
1048 {
1049 if (!dso->versym || !dso->verneed) {
1050 return false;
1051 }
1052
1053 uint16_t vsym = dso->versym[sym_index];
1054 if (vsym == VER_NDX_LOCAL || vsym == VER_NDX_GLOBAL) {
1055 return false;
1056 }
1057
1058 bool result = false;
1059 Verneed *verneed = dso->verneed;
1060 Vernaux *vernaux;
1061 vsym &= 0x7fff;
1062
1063 for (;;) {
1064 vernaux = (Vernaux *)((char *)verneed + verneed->vn_aux);
1065
1066 for (size_t cnt = 0; cnt < verneed->vn_cnt; cnt++) {
1067 if ((vernaux->vna_other & 0x7fff) == vsym) {
1068 result = true;
1069 *vna_hash = vernaux->vna_hash;
1070 break;
1071 }
1072
1073 vernaux = (Vernaux *)((char *)vernaux + vernaux->vna_next);
1074 }
1075
1076 if (result) {
1077 break;
1078 }
1079
1080 if (verneed->vn_next == 0) {
1081 break;
1082 }
1083
1084 verneed = (Verneed *)((char *)verneed + verneed->vn_next);
1085 }
1086 return result;
1087 }
1088
get_verinfo(struct dso * dso,int sym_index,struct verinfo * vinfo)1089 static void get_verinfo(struct dso *dso, int sym_index, struct verinfo *vinfo)
1090 {
1091 char *strings = dso->strings;
1092 // try to get version number from .gnu.version
1093 int16_t vsym = dso->versym[sym_index];
1094 Verdef *verdef = dso->verdef;
1095 vsym &= 0x7fff;
1096 if (!verdef) {
1097 return;
1098 }
1099 int version_found = 0;
1100 for (;;) {
1101 if (!verdef) {
1102 break;
1103 }
1104 if (!(verdef->vd_flags & VER_FLG_BASE) && (verdef->vd_ndx & 0x7fff) == vsym) {
1105 version_found = 1;
1106 break;
1107 }
1108 if (verdef->vd_next == 0) {
1109 break;
1110 }
1111 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
1112 }
1113 if (version_found) {
1114 Verdaux *aux = (Verdaux *)((char *)verdef + verdef->vd_aux);
1115 if (aux && aux->vda_name && strings && (dso->strings + aux->vda_name)) {
1116 vinfo->v = dso->strings + aux->vda_name;
1117 }
1118 }
1119 }
1120
do_relocs(struct dso * dso,size_t * rel,size_t rel_size,size_t stride)1121 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
1122 {
1123 unsigned char *base = dso->base;
1124 Sym *syms = dso->syms;
1125 char *strings = dso->strings;
1126 Sym *sym;
1127 const char *name;
1128 void *ctx;
1129 int type;
1130 int sym_index;
1131 struct symdef def;
1132 size_t *reloc_addr;
1133 size_t sym_val;
1134 size_t tls_val;
1135 size_t addend;
1136 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
1137
1138 if (dso == &ldso) {
1139 /* Only ldso's REL table needs addend saving/reuse. */
1140 if (rel == apply_addends_to)
1141 reuse_addends = 1;
1142 skip_relative = 1;
1143 }
1144
1145 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
1146 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue;
1147 type = R_TYPE(rel[1]);
1148 if (type == REL_NONE) continue;
1149 reloc_addr = laddr(dso, rel[0]);
1150
1151 if (stride > 2) {
1152 addend = rel[2];
1153 } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) {
1154 addend = 0;
1155 } else if (reuse_addends) {
1156 /* Save original addend in stage 2 where the dso
1157 * chain consists of just ldso; otherwise read back
1158 * saved addend since the inline one was clobbered. */
1159 if (head==&ldso)
1160 saved_addends[save_slot] = *reloc_addr;
1161 addend = saved_addends[save_slot++];
1162 } else {
1163 addend = *reloc_addr;
1164 }
1165
1166 sym_index = R_SYM(rel[1]);
1167 if (sym_index) {
1168 sym = syms + sym_index;
1169 name = strings + sym->st_name;
1170 ctx = type==REL_COPY ? head->syms_next : head;
1171 struct verinfo vinfo = { .s = name, .v = ""};
1172
1173 vinfo.use_vna_hash = get_vna_hash(dso, sym_index, &vinfo.vna_hash);
1174 if (!vinfo.use_vna_hash && dso->versym && (dso->versym[sym_index] & 0x7fff) >= 0) {
1175 get_verinfo(dso, sym_index, &vinfo);
1176 }
1177 if (dso->cache_sym_index == sym_index) {
1178 def = (struct symdef){ .dso = dso->cache_dso, .sym = dso->cache_sym };
1179 } else {
1180 def = (sym->st_info>>4) == STB_LOCAL
1181 ? (struct symdef){ .dso = dso, .sym = sym }
1182 : dso != &ldso ? find_sym_by_saved_so_list(type, ctx, &vinfo, type==REL_PLT, dso)
1183 : find_sym2(ctx, &vinfo, type==REL_PLT, 0, dso->namespace);
1184 dso->cache_sym_index = sym_index;
1185 dso->cache_dso = def.dso;
1186 dso->cache_sym = def.sym;
1187 }
1188
1189 if (!def.sym && (sym->st_shndx != SHN_UNDEF
1190 || sym->st_info>>4 != STB_WEAK)) {
1191 if (dso->lazy && (type==REL_PLT || type==REL_GOT)) {
1192 dso->lazy[3*dso->lazy_cnt+0] = rel[0];
1193 dso->lazy[3*dso->lazy_cnt+1] = rel[1];
1194 dso->lazy[3*dso->lazy_cnt+2] = addend;
1195 dso->lazy_cnt++;
1196 continue;
1197 }
1198 LD_LOGE("relocating failed: symbol not found. "
1199 "dso=%{public}s s=%{public}s use_vna_hash=%{public}d van_hash=%{public}x",
1200 dso->name, name, vinfo.use_vna_hash, vinfo.vna_hash);
1201 error("Error relocating %s: %s: symbol not found",
1202 dso->name, name);
1203 if (runtime) longjmp(*rtld_fail, 1);
1204 continue;
1205 }
1206 } else {
1207 sym = 0;
1208 def.sym = 0;
1209 def.dso = dso;
1210 }
1211
1212 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0;
1213 tls_val = def.sym ? def.sym->st_value : 0;
1214
1215 if ((type == REL_TPOFF || type == REL_TPOFF_NEG)
1216 && def.dso->tls_id > static_tls_cnt) {
1217 error("Error relocating %s: %s: initial-exec TLS "
1218 "resolves to dynamic definition in %s",
1219 dso->name, name, def.dso->name);
1220 longjmp(*rtld_fail, 1);
1221 }
1222
1223 switch(type) {
1224 case REL_OFFSET:
1225 addend -= (size_t)reloc_addr;
1226 case REL_SYMBOLIC:
1227 case REL_GOT:
1228 case REL_PLT:
1229 *reloc_addr = sym_val + addend;
1230 break;
1231 case REL_USYMBOLIC:
1232 memcpy(reloc_addr, &(size_t){sym_val + addend}, sizeof(size_t));
1233 break;
1234 case REL_RELATIVE:
1235 *reloc_addr = (size_t)base + addend;
1236 break;
1237 case REL_SYM_OR_REL:
1238 if (sym) *reloc_addr = sym_val + addend;
1239 else *reloc_addr = (size_t)base + addend;
1240 break;
1241 case REL_COPY:
1242 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
1243 break;
1244 case REL_OFFSET32:
1245 *(uint32_t *)reloc_addr = sym_val + addend
1246 - (size_t)reloc_addr;
1247 break;
1248 case REL_FUNCDESC:
1249 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
1250 + (def.sym - def.dso->syms)) : 0;
1251 break;
1252 case REL_FUNCDESC_VAL:
1253 if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val;
1254 else *reloc_addr = sym_val;
1255 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
1256 break;
1257 case REL_DTPMOD:
1258 *reloc_addr = def.dso->tls_id;
1259 break;
1260 case REL_DTPOFF:
1261 *reloc_addr = tls_val + addend - DTP_OFFSET;
1262 break;
1263 #ifdef TLS_ABOVE_TP
1264 case REL_TPOFF:
1265 *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
1266 break;
1267 #else
1268 case REL_TPOFF:
1269 *reloc_addr = tls_val - def.dso->tls.offset + addend;
1270 break;
1271 case REL_TPOFF_NEG:
1272 *reloc_addr = def.dso->tls.offset - tls_val + addend;
1273 break;
1274 #endif
1275 case REL_TLSDESC:
1276 if (stride<3) addend = reloc_addr[!TLSDESC_BACKWARDS];
1277 if (def.dso->tls_id > static_tls_cnt) {
1278 struct td_index *new = malloc(sizeof *new);
1279 if (!new) {
1280 error(
1281 "Error relocating %s: cannot allocate TLSDESC for %s",
1282 dso->name, sym ? name : "(local)" );
1283 longjmp(*rtld_fail, 1);
1284 }
1285 new->next = dso->td_index;
1286 dso->td_index = new;
1287 new->args[0] = def.dso->tls_id;
1288 new->args[1] = tls_val + addend - DTP_OFFSET;
1289 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
1290 reloc_addr[1] = (size_t)new;
1291 } else {
1292 reloc_addr[0] = (size_t)__tlsdesc_static;
1293 #ifdef TLS_ABOVE_TP
1294 reloc_addr[1] = tls_val + def.dso->tls.offset
1295 + TPOFF_K + addend;
1296 #else
1297 reloc_addr[1] = tls_val - def.dso->tls.offset
1298 + addend;
1299 #endif
1300 }
1301 /* Some archs (32-bit ARM at least) invert the order of
1302 * the descriptor members. Fix them up here. */
1303 if (TLSDESC_BACKWARDS) {
1304 size_t tmp = reloc_addr[0];
1305 reloc_addr[0] = reloc_addr[1];
1306 reloc_addr[1] = tmp;
1307 }
1308 break;
1309 default:
1310 error("Error relocating %s: unsupported relocation type %d",
1311 dso->name, type);
1312 if (runtime) longjmp(*rtld_fail, 1);
1313 continue;
1314 }
1315 }
1316 }
1317
redo_lazy_relocs()1318 static void redo_lazy_relocs()
1319 {
1320 struct dso *p = lazy_head, *next;
1321 lazy_head = 0;
1322 for (; p; p=next) {
1323 next = p->lazy_next;
1324 size_t size = p->lazy_cnt*3*sizeof(size_t);
1325 p->lazy_cnt = 0;
1326 do_relocs(p, p->lazy, size, 3);
1327 if (p->lazy_cnt) {
1328 p->lazy_next = lazy_head;
1329 lazy_head = p;
1330 } else {
1331 free(p->lazy);
1332 p->lazy = 0;
1333 p->lazy_next = 0;
1334 }
1335 }
1336 }
1337
1338 /* A huge hack: to make up for the wastefulness of shared libraries
1339 * needing at least a page of dirty memory even if they have no global
1340 * data, we reclaim the gaps at the beginning and end of writable maps
1341 * and "donate" them to the heap. */
1342
reclaim(struct dso * dso,size_t start,size_t end)1343 static void reclaim(struct dso *dso, size_t start, size_t end)
1344 {
1345 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
1346 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
1347 if (start >= end) return;
1348 char *base = laddr_pg(dso, start);
1349 __malloc_donate(base, base+(end-start));
1350 }
1351
reclaim_gaps(struct dso * dso)1352 static void reclaim_gaps(struct dso *dso)
1353 {
1354 Phdr *ph = dso->phdr;
1355 size_t phcnt = dso->phnum;
1356
1357 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
1358 if (ph->p_type!=PT_LOAD) continue;
1359 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
1360 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
1361 reclaim(dso, ph->p_vaddr+ph->p_memsz,
1362 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
1363 }
1364 }
1365
mmap_fixed(void * p,size_t n,int prot,int flags,int fd,off_t off)1366 UT_STATIC void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
1367 {
1368 static int no_map_fixed;
1369 char *q;
1370 if (!n) return p;
1371 if (!no_map_fixed) {
1372 q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
1373 if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
1374 return q;
1375 no_map_fixed = 1;
1376 }
1377 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
1378 if (flags & MAP_ANONYMOUS) {
1379 memset(p, 0, n);
1380 return p;
1381 }
1382 ssize_t r;
1383 if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
1384 for (q=p; n; q+=r, off+=r, n-=r) {
1385 r = read(fd, q, n);
1386 if (r < 0 && errno != EINTR) return MAP_FAILED;
1387 if (!r) {
1388 memset(q, 0, n);
1389 break;
1390 }
1391 }
1392 return p;
1393 }
1394
unmap_library(struct dso * dso)1395 UT_STATIC void unmap_library(struct dso *dso)
1396 {
1397 if (dso->loadmap) {
1398 size_t i;
1399 for (i=0; i<dso->loadmap->nsegs; i++) {
1400 if (!dso->loadmap->segs[i].p_memsz)
1401 continue;
1402 if (!is_dlclose_debug_enable()) {
1403 munmap((void *)dso->loadmap->segs[i].addr,
1404 dso->loadmap->segs[i].p_memsz);
1405 } else {
1406 (void)mprotect((void *)dso->loadmap->segs[i].addr,
1407 dso->loadmap->segs[i].p_memsz, PROT_NONE);
1408 }
1409 }
1410 free(dso->loadmap);
1411 } else if (dso->map && dso->map_len) {
1412 if (!is_dlclose_debug_enable()) {
1413 munmap(dso->map, dso->map_len);
1414 } else {
1415 mprotect(dso->map, dso->map_len, PROT_NONE);
1416 }
1417 }
1418 }
1419
get_random(void * buf,size_t buflen)1420 UT_STATIC bool get_random(void *buf, size_t buflen)
1421 {
1422 int ret;
1423 int fd = open("/dev/urandom", O_RDONLY);
1424 if (fd < 0) {
1425 return false;
1426 }
1427
1428 ret = read(fd, buf, buflen);
1429 if (ret < 0) {
1430 close(fd);
1431 return false;
1432 }
1433
1434 close(fd);
1435 return true;
1436 }
1437
fill_random_data(void * buf,size_t buflen)1438 UT_STATIC void fill_random_data(void *buf, size_t buflen)
1439 {
1440 uint64_t x;
1441 int i;
1442 int pos = 0;
1443 struct timespec ts;
1444 /* Try to use urandom to get the random number first */
1445 if (!get_random(buf, buflen)) {
1446 /* Can't get random number from /dev/urandom, generate from addr based on ASLR and time */
1447 for (i = 1; i <= (buflen / sizeof(x)); i++) {
1448 (void)clock_gettime(CLOCK_REALTIME, &ts);
1449 x = (((uint64_t)get_random) << 32) ^ (uint64_t)fill_random_data ^ ts.tv_nsec;
1450 memcpy((char *)buf + pos, &x, sizeof(x));
1451 pos += sizeof(x);
1452 }
1453 }
1454 return;
1455 }
1456
get_transparent_hugepages_supported(void)1457 static bool get_transparent_hugepages_supported(void)
1458 {
1459 int fd = -1;
1460 ssize_t read_size = 0;
1461 bool enable = false;
1462 char buf[HUGEPAGES_SUPPORTED_STR_SIZE] = {'0'};
1463
1464 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
1465 if (fd < 0)
1466 goto done;
1467
1468 read_size = read(fd, buf, HUGEPAGES_SUPPORTED_STR_SIZE - 1);
1469 if (read_size < 0)
1470 goto close_fd;
1471
1472 buf[HUGEPAGES_SUPPORTED_STR_SIZE - 1] = '\0';
1473 if (strstr(buf, "[never]") == NULL)
1474 enable = true;
1475
1476 close_fd:
1477 close(fd);
1478 done:
1479 return enable;
1480 }
1481
phdr_table_get_maxinum_alignment(Phdr * phdr_table,size_t phdr_count)1482 static size_t phdr_table_get_maxinum_alignment(Phdr *phdr_table, size_t phdr_count)
1483 {
1484 #if defined(__LP64__)
1485 size_t maxinum_alignment = PAGE_SIZE;
1486 size_t i = 0;
1487
1488 for (i = 0; i < phdr_count; ++i) {
1489 const Phdr *phdr = &phdr_table[i];
1490
1491 /* p_align must be 0, 1, or a positive, integral power of two */
1492 if ((phdr->p_type != PT_LOAD) || ((phdr->p_align & (phdr->p_align - 1)) != 0))
1493 continue;
1494
1495 if (phdr->p_align > maxinum_alignment)
1496 maxinum_alignment = phdr->p_align;
1497 }
1498
1499 return maxinum_alignment;
1500 #else
1501 return PAGE_SIZE;
1502 #endif
1503 }
1504
check_xpm(int fd)1505 static bool check_xpm(int fd)
1506 {
1507 size_t mapLen = sizeof(Ehdr);
1508 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, fd, 0);
1509 if (map == MAP_FAILED) {
1510 LD_LOGE("Xpm check failed for so file, errno for mmap is: %{public}d", errno);
1511 return false;
1512 }
1513 munmap(map, mapLen);
1514 return true;
1515 }
1516
map_library(int fd,struct dso * dso,struct reserved_address_params * reserved_params)1517 UT_STATIC void *map_library(int fd, struct dso *dso, struct reserved_address_params *reserved_params)
1518 {
1519 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
1520 void *allocated_buf=0;
1521 size_t phsize;
1522 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
1523 size_t this_min, this_max;
1524 size_t nsegs = 0;
1525 off_t off_start;
1526 Ehdr *eh;
1527 Phdr *ph, *ph0;
1528 unsigned prot;
1529 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
1530 unsigned ext_prot = 0;
1531 #endif
1532 unsigned char *map=MAP_FAILED, *base;
1533 size_t dyn=0;
1534 size_t tls_image=0;
1535 size_t i;
1536 int map_flags = MAP_PRIVATE;
1537 size_t start_addr;
1538 size_t start_alignment = PAGE_SIZE;
1539 bool hugepage_enabled = false;
1540 if (!check_xpm(fd)) {
1541 return 0;
1542 }
1543
1544 ssize_t l = read(fd, buf, sizeof buf);
1545 eh = buf;
1546 if (l<0) return 0;
1547 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
1548 goto noexec;
1549 phsize = eh->e_phentsize * eh->e_phnum;
1550 if (phsize > sizeof buf - sizeof *eh) {
1551 allocated_buf = malloc(phsize);
1552 if (!allocated_buf) return 0;
1553 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
1554 if (l < 0) goto error;
1555 if (l != phsize) goto noexec;
1556 ph = ph0 = allocated_buf;
1557 } else if (eh->e_phoff + phsize > l) {
1558 l = pread(fd, buf+1, phsize, eh->e_phoff);
1559 if (l < 0) goto error;
1560 if (l != phsize) goto noexec;
1561 ph = ph0 = (void *)(buf + 1);
1562 } else {
1563 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
1564 }
1565 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1566 if (ph->p_type == PT_DYNAMIC) {
1567 dyn = ph->p_vaddr;
1568 } else if (ph->p_type == PT_TLS) {
1569 tls_image = ph->p_vaddr;
1570 dso->tls.align = ph->p_align;
1571 dso->tls.len = ph->p_filesz;
1572 dso->tls.size = ph->p_memsz;
1573 } else if (ph->p_type == PT_GNU_RELRO) {
1574 dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
1575 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
1576 } else if (ph->p_type == PT_GNU_STACK) {
1577 if (!runtime && ph->p_memsz > __default_stacksize) {
1578 __default_stacksize =
1579 ph->p_memsz < DEFAULT_STACK_MAX ?
1580 ph->p_memsz : DEFAULT_STACK_MAX;
1581 }
1582 }
1583 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
1584 /* Security enhancement: parse extra PROT in ELF.
1585 * Currently only for BTI protection*/
1586 if (ph->p_type == PT_GNU_PROPERTY || ph->p_type == PT_NOTE) {
1587 ext_prot |= parse_extra_prot_fd(fd, ph);
1588 }
1589 #endif
1590 if (ph->p_type != PT_LOAD) continue;
1591 nsegs++;
1592 if (ph->p_vaddr < addr_min) {
1593 addr_min = ph->p_vaddr;
1594 off_start = ph->p_offset;
1595 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1596 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1597 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1598 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
1599 if (ph->p_flags & PF_X) {
1600 prot |= ext_prot;
1601 }
1602 #endif
1603 }
1604 if (ph->p_vaddr+ph->p_memsz > addr_max) {
1605 addr_max = ph->p_vaddr+ph->p_memsz;
1606 }
1607 }
1608 if (!dyn) goto noexec;
1609 if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
1610 dso->loadmap = calloc(1, sizeof *dso->loadmap
1611 + nsegs * sizeof *dso->loadmap->segs);
1612 if (!dso->loadmap) goto error;
1613 dso->loadmap->nsegs = nsegs;
1614 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
1615 if (ph->p_type != PT_LOAD) continue;
1616 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1617 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1618 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1619 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
1620 if (ph->p_flags & PF_X) {
1621 prot |= ext_prot;
1622 }
1623 #endif
1624 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
1625 prot, MAP_PRIVATE,
1626 fd, ph->p_offset & -PAGE_SIZE);
1627 if (map == MAP_FAILED) {
1628 unmap_library(dso);
1629 goto error;
1630 }
1631 dso->loadmap->segs[i].addr = (size_t)map +
1632 (ph->p_vaddr & PAGE_SIZE-1);
1633 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
1634 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
1635 i++;
1636 if (prot & PROT_WRITE) {
1637 size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
1638 + ph->p_filesz;
1639 size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
1640 size_t pgend = brk + ph->p_memsz - ph->p_filesz
1641 + PAGE_SIZE-1 & -PAGE_SIZE;
1642 if (pgend > pgbrk && mmap_fixed(map+pgbrk,
1643 pgend-pgbrk, prot,
1644 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
1645 -1, off_start) == MAP_FAILED)
1646 goto error;
1647 memset(map + brk, 0, pgbrk-brk);
1648 }
1649 }
1650 map = (void *)dso->loadmap->segs[0].addr;
1651 map_len = 0;
1652 goto done_mapping;
1653 }
1654 addr_max += PAGE_SIZE-1;
1655 addr_max &= -PAGE_SIZE;
1656 addr_min &= -PAGE_SIZE;
1657 off_start &= -PAGE_SIZE;
1658 map_len = addr_max - addr_min + off_start;
1659 start_addr = addr_min;
1660
1661 hugepage_enabled = get_transparent_hugepages_supported();
1662 if (hugepage_enabled) {
1663 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(ph0, eh->e_phnum);
1664
1665 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
1666 }
1667
1668 if (reserved_params) {
1669 if (map_len > reserved_params->reserved_size) {
1670 if (reserved_params->must_use_reserved) {
1671 goto error;
1672 }
1673 } else {
1674 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
1675 map_flags |= MAP_FIXED;
1676 }
1677 }
1678
1679 /* we will find a mapping_align aligned address as the start of dso
1680 * so we need a tmp_map_len as map_len + mapping_align to make sure
1681 * we have enough space to shift the dso to the correct location. */
1682 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
1683 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
1684
1685 /* map the whole load segments with PROT_READ first for security consideration. */
1686 prot = PROT_READ;
1687
1688 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
1689 if (reserved_params) {
1690 map = DL_NOMMU_SUPPORT
1691 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1692 : mmap((void *)start_addr, map_len, prot, map_flags, fd, off_start);
1693 if (map == MAP_FAILED) {
1694 goto error;
1695 }
1696 if (reserved_params && map_len < reserved_params->reserved_size) {
1697 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
1698 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
1699 }
1700 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
1701 } else {
1702 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
1703 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1704 if (temp_map == MAP_FAILED) {
1705 goto error;
1706 }
1707
1708 /* find the mapping_align aligned address */
1709 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
1710 map = DL_NOMMU_SUPPORT
1711 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1712 /* use map_len to mmap correct space for the dso with file mapping */
1713 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, fd, off_start);
1714 if (map == MAP_FAILED || map != real_map) {
1715 LD_LOGE("mmap MAP_FIXED failed");
1716 goto error;
1717 }
1718
1719 /* Free unused memory.
1720 * |--------------------------tmp_map_len--------------------------|
1721 * ^ ^ ^ ^
1722 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
1723 * temp_map real_map(aligned) temp_map_end
1724 */
1725 unsigned char *temp_map_end = temp_map + tmp_map_len;
1726 size_t unused_part_1 = real_map - temp_map;
1727 size_t unused_part_2 = temp_map_end - (real_map + map_len);
1728 if (unused_part_1 > 0) {
1729 int res1 = munmap(temp_map, unused_part_1);
1730 if (res1 == -1) {
1731 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
1732 }
1733 }
1734
1735 if (unused_part_2 > 0) {
1736 int res2 = munmap(real_map + map_len, unused_part_2);
1737 if (res2 == -1) {
1738 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
1739 }
1740 }
1741 }
1742 dso->map = map;
1743 dso->map_len = map_len;
1744 /* If the loaded file is not relocatable and the requested address is
1745 * not available, then the load operation must fail. */
1746 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
1747 errno = EBUSY;
1748 goto error;
1749 }
1750 base = map - addr_min;
1751 dso->phdr = 0;
1752 dso->phnum = 0;
1753 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1754 if (ph->p_type == PT_OHOS_RANDOMDATA) {
1755 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
1756 continue;
1757 }
1758 if (ph->p_type != PT_LOAD) continue;
1759 /* Check if the programs headers are in this load segment, and
1760 * if so, record the address for use by dl_iterate_phdr. */
1761 if (!dso->phdr && eh->e_phoff >= ph->p_offset
1762 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
1763 dso->phdr = (void *)(base + ph->p_vaddr
1764 + (eh->e_phoff-ph->p_offset));
1765 dso->phnum = eh->e_phnum;
1766 dso->phentsize = eh->e_phentsize;
1767 }
1768 this_min = ph->p_vaddr & -PAGE_SIZE;
1769 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
1770 off_start = ph->p_offset & -PAGE_SIZE;
1771 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1772 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1773 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1774 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
1775 if (ph->p_flags & PF_X) {
1776 prot |= ext_prot;
1777 }
1778 #endif
1779 /* Reuse the existing mapping for the lowest-address LOAD */
1780 if (mmap_fixed(
1781 base + this_min,
1782 this_max - this_min,
1783 prot, MAP_PRIVATE | MAP_FIXED,
1784 fd,
1785 off_start) == MAP_FAILED) {
1786 LD_LOGE("Error mapping library: mmap fix failed errno=%{public}d", errno);
1787 goto error;
1788 }
1789 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
1790 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
1791 if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
1792 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
1793 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
1794 size_t zeromap_size = (size_t)base + this_max - pgbrk;
1795 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
1796 if (pgbrk - (size_t)base < this_max && mmap_fixed((void *)pgbrk, zeromap_size, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
1797 goto error;
1798 set_bss_vma_name(dso->name, (void *)pgbrk, zeromap_size);
1799 }
1800 }
1801 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
1802 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
1803 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
1804 && errno != ENOSYS)
1805 goto error;
1806 break;
1807 }
1808 done_mapping:
1809 dso->base = base;
1810 dso->dynv = laddr(dso, dyn);
1811 if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
1812 free(allocated_buf);
1813 return map;
1814 noexec:
1815 errno = ENOEXEC;
1816 error:
1817 if (map!=MAP_FAILED) unmap_library(dso);
1818 free(allocated_buf);
1819 return 0;
1820 }
1821
path_open(const char * name,const char * s,char * buf,size_t buf_size)1822 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
1823 {
1824 size_t l;
1825 int fd;
1826 for (;;) {
1827 s += strspn(s, ":\n");
1828 l = strcspn(s, ":\n");
1829 if (l-1 >= INT_MAX) return -1;
1830 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
1831 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
1832 switch (errno) {
1833 case ENOENT:
1834 case ENOTDIR:
1835 case EACCES:
1836 case ENAMETOOLONG:
1837 break;
1838 default:
1839 /* Any negative value but -1 will inhibit
1840 * futher path search. */
1841 return -2;
1842 }
1843 }
1844 s += l;
1845 }
1846 }
1847
fixup_rpath(struct dso * p,char * buf,size_t buf_size)1848 UT_STATIC int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
1849 {
1850 size_t n, l;
1851 const char *s, *t, *origin;
1852 char *d;
1853 if (p->rpath || !p->rpath_orig) return 0;
1854 if (!strchr(p->rpath_orig, '$')) {
1855 p->rpath = ld_strdup(p->rpath_orig);
1856 return 0;
1857 }
1858 n = 0;
1859 s = p->rpath_orig;
1860 while ((t=strchr(s, '$'))) {
1861 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
1862 return 0;
1863 s = t+1;
1864 n++;
1865 }
1866 if (n > SSIZE_MAX/PATH_MAX) return 0;
1867
1868 if (p->kernel_mapped) {
1869 /* $ORIGIN searches cannot be performed for the main program
1870 * when it is suid/sgid/AT_SECURE. This is because the
1871 * pathname is under the control of the caller of execve.
1872 * For libraries, however, $ORIGIN can be processed safely
1873 * since the library's pathname came from a trusted source
1874 * (either system paths or a call to dlopen). */
1875 if (libc.secure)
1876 return 0;
1877 l = readlink("/proc/self/exe", buf, buf_size);
1878 if (l == -1) switch (errno) {
1879 case ENOENT:
1880 case ENOTDIR:
1881 case EACCES:
1882 return 0;
1883 default:
1884 return -1;
1885 }
1886 if (l >= buf_size)
1887 return 0;
1888 buf[l] = 0;
1889 origin = buf;
1890 } else {
1891 origin = p->name;
1892 }
1893 t = strrchr(origin, '/');
1894 if (t) {
1895 l = t-origin;
1896 } else {
1897 /* Normally p->name will always be an absolute or relative
1898 * pathname containing at least one '/' character, but in the
1899 * case where ldso was invoked as a command to execute a
1900 * program in the working directory, app.name may not. Fix. */
1901 origin = ".";
1902 l = 1;
1903 }
1904 /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */
1905 if (libc.secure && *origin != '/')
1906 return 0;
1907 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
1908 if (!p->rpath) return -1;
1909
1910 d = p->rpath;
1911 s = p->rpath_orig;
1912 while ((t=strchr(s, '$'))) {
1913 memcpy(d, s, t-s);
1914 d += t-s;
1915 memcpy(d, origin, l);
1916 d += l;
1917 /* It was determined previously that the '$' is followed
1918 * either by "ORIGIN" or "{ORIGIN}". */
1919 s = t + 7 + 2*(t[1]=='{');
1920 }
1921 strcpy(d, s);
1922 return 0;
1923 }
1924
decode_dyn(struct dso * p)1925 static void decode_dyn(struct dso *p)
1926 {
1927 size_t dyn[DYN_CNT];
1928 size_t flags1 = 0;
1929 decode_vec(p->dynv, dyn, DYN_CNT);
1930 search_vec(p->dynv, &flags1, DT_FLAGS_1);
1931 if (flags1 & DF_1_GLOBAL) {
1932 LD_LOGI("Add DF_1_GLOBAL for %{public}s", p->name);
1933 p->is_global = true;
1934 }
1935 if (flags1 & DF_1_NODELETE) {
1936 p->flags |= DSO_FLAGS_NODELETE;
1937 }
1938 p->syms = laddr(p, dyn[DT_SYMTAB]);
1939 p->strings = laddr(p, dyn[DT_STRTAB]);
1940 if (dyn[0]&(1<<DT_HASH))
1941 p->hashtab = laddr(p, dyn[DT_HASH]);
1942 if (dyn[0]&(1<<DT_RPATH))
1943 p->rpath_orig = p->strings + dyn[DT_RPATH];
1944 if (dyn[0]&(1<<DT_RUNPATH))
1945 p->rpath_orig = p->strings + dyn[DT_RUNPATH];
1946 if (dyn[0]&(1<<DT_PLTGOT))
1947 p->got = laddr(p, dyn[DT_PLTGOT]);
1948 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
1949 p->ghashtab = laddr(p, *dyn);
1950 if (search_vec(p->dynv, dyn, DT_VERSYM))
1951 p->versym = laddr(p, *dyn);
1952 if (search_vec(p->dynv, dyn, DT_VERDEF))
1953 p->verdef = laddr(p, *dyn);
1954 if (search_vec(p->dynv, dyn, DT_VERNEED))
1955 p->verneed = laddr(p, *dyn);
1956 }
1957
count_syms(struct dso * p)1958 UT_STATIC size_t count_syms(struct dso *p)
1959 {
1960 if (p->hashtab) return p->hashtab[1];
1961
1962 size_t nsym, i;
1963 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1964 uint32_t *hashval;
1965 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
1966 if (buckets[i] > nsym)
1967 nsym = buckets[i];
1968 }
1969 if (nsym) {
1970 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
1971 do nsym++;
1972 while (!(*hashval++ & 1));
1973 }
1974 return nsym;
1975 }
1976
dl_mmap(size_t n)1977 static void *dl_mmap(size_t n)
1978 {
1979 void *p;
1980 int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE;
1981 #ifdef SYS_mmap2
1982 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0);
1983 #else
1984 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0);
1985 #endif
1986 return (unsigned long)p > -4096UL ? 0 : p;
1987 }
1988
makefuncdescs(struct dso * p)1989 static void makefuncdescs(struct dso *p)
1990 {
1991 static int self_done;
1992 size_t nsym = count_syms(p);
1993 size_t i, size = nsym * sizeof(*p->funcdescs);
1994
1995 if (!self_done) {
1996 p->funcdescs = dl_mmap(size);
1997 self_done = 1;
1998 } else {
1999 p->funcdescs = malloc(size);
2000 }
2001 if (!p->funcdescs) {
2002 if (!runtime) a_crash();
2003 error("Error allocating function descriptors for %s", p->name);
2004 longjmp(*rtld_fail, 1);
2005 }
2006 for (i=0; i<nsym; i++) {
2007 if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) {
2008 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value);
2009 p->funcdescs[i].got = p->got;
2010 } else {
2011 p->funcdescs[i].addr = 0;
2012 p->funcdescs[i].got = 0;
2013 }
2014 }
2015 }
2016
get_sys_path(ns_configor * conf)2017 static void get_sys_path(ns_configor *conf)
2018 {
2019 LD_LOGD("get_sys_path g_is_asan:%{public}d", g_is_asan);
2020 /* Use ini file's system paths when Asan is not enabled */
2021 if (!g_is_asan) {
2022 sys_path = conf->get_sys_paths();
2023 } else {
2024 /* Use ini file's asan system paths when the Asan is enabled
2025 * Merge two strings when both sys_paths and asan_sys_paths are valid */
2026 sys_path = conf->get_asan_sys_paths();
2027 char *sys_path_default = conf->get_sys_paths();
2028 if (!sys_path) {
2029 sys_path = sys_path_default;
2030 } else if (sys_path_default) {
2031 size_t newlen = strlen(sys_path) + strlen(sys_path_default) + 2;
2032 char *new_syspath = malloc(newlen);
2033 memset(new_syspath, 0, newlen);
2034 strcpy(new_syspath, sys_path);
2035 strcat(new_syspath, ":");
2036 strcat(new_syspath, sys_path_default);
2037 sys_path = new_syspath;
2038 }
2039 }
2040 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib:/lib64";
2041 LD_LOGD("get_sys_path sys_path:%{public}s", sys_path);
2042 }
2043
search_dso_by_name(const char * name,const ns_t * ns)2044 static struct dso *search_dso_by_name(const char *name, const ns_t *ns) {
2045 LD_LOGD("search_dso_by_name name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2046 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2047 struct dso *p = ns->ns_dsos->dsos[i];
2048 if (p->shortname && !strcmp(p->shortname, name)) {
2049 LD_LOGD("search_dso_by_name found name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2050 return p;
2051 }
2052 }
2053 return NULL;
2054 }
2055
search_dso_by_fstat(const struct stat * st,const ns_t * ns,uint64_t file_offset)2056 static struct dso *search_dso_by_fstat(const struct stat *st, const ns_t *ns, uint64_t file_offset) {
2057 LD_LOGD("search_dso_by_fstat ns_name:%{public}s", ns ? ns->ns_name : "NULL");
2058 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2059 struct dso *p = ns->ns_dsos->dsos[i];
2060 if (p->dev == st->st_dev && p->ino == st->st_ino && p->file_offset == file_offset) {
2061 LD_LOGD("search_dso_by_fstat found dev:%{public}lu, ino:%{public}lu, ns_name:%{public}s",
2062 st->st_dev, st->st_ino, ns ? ns->ns_name : "NULL");
2063 return p;
2064 }
2065 }
2066 return NULL;
2067 }
2068
app_has_same_name_so(const char * so_name,const ns_t * ns)2069 static inline int app_has_same_name_so(const char *so_name, const ns_t *ns)
2070 {
2071 int fd = -1;
2072 /* Only check system app. */
2073 if (((ns->flag & LOCAL_NS_PREFERED) != 0) && ns->lib_paths) {
2074 char tmp_buf[PATH_MAX + 1];
2075 fd = path_open(so_name, ns->lib_paths, tmp_buf, sizeof tmp_buf);
2076 }
2077 return fd;
2078 }
2079
2080 /* Find loaded so by name */
find_library_by_name(const char * name,const ns_t * ns,bool check_inherited)2081 static struct dso *find_library_by_name(const char *name, const ns_t *ns, bool check_inherited)
2082 {
2083 LD_LOGD("find_library_by_name name:%{public}s, ns_name:%{public}s, check_inherited:%{public}d",
2084 name,
2085 ns ? ns->ns_name : "NULL",
2086 !!check_inherited);
2087 struct dso *p = search_dso_by_name(name, ns);
2088 if (p) return p;
2089 if (check_inherited && ns->ns_inherits) {
2090 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2091 ns_inherit * inherit = ns->ns_inherits->inherits[i];
2092 p = search_dso_by_name(name, inherit->inherited_ns);
2093 if (p && is_sharable(inherit, name)) {
2094 if (app_has_same_name_so(name, ns) != -1) {
2095 return NULL;
2096 }
2097 return p;
2098 }
2099 }
2100 }
2101 return NULL;
2102 }
2103 /* Find loaded so by file stat */
find_library_by_fstat(const struct stat * st,const ns_t * ns,bool check_inherited,uint64_t file_offset)2104 UT_STATIC struct dso *find_library_by_fstat(const struct stat *st, const ns_t *ns, bool check_inherited, uint64_t file_offset) {
2105 LD_LOGD("find_library_by_fstat ns_name:%{public}s, check_inherited :%{public}d",
2106 ns ? ns->ns_name : "NULL",
2107 !!check_inherited);
2108 struct dso *p = search_dso_by_fstat(st, ns, file_offset);
2109 if (p) return p;
2110 if (check_inherited && ns->ns_inherits) {
2111 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2112 ns_inherit *inherit = ns->ns_inherits->inherits[i];
2113 p = search_dso_by_fstat(st, inherit->inherited_ns, file_offset);
2114 if (p && is_sharable(inherit, p->shortname)) return p;
2115 }
2116 }
2117 return NULL;
2118 }
2119
2120 #ifndef LOAD_ORDER_RANDOMIZATION
2121 /* add namespace function */
load_library(const char * name,struct dso * needed_by,ns_t * namespace,bool check_inherited,struct reserved_address_params * reserved_params)2122 struct dso *load_library(
2123 const char *name, struct dso *needed_by, ns_t *namespace, bool check_inherited, struct reserved_address_params *reserved_params)
2124 {
2125 char buf[PATH_MAX + 1];
2126 const char *pathname;
2127 unsigned char *map;
2128 struct dso *p, temp_dso = {0};
2129 int fd;
2130 struct stat st;
2131 size_t alloc_size;
2132 int n_th = 0;
2133 int is_self = 0;
2134
2135 if (!*name) {
2136 errno = EINVAL;
2137 return 0;
2138 }
2139
2140 /* Catch and block attempts to reload the implementation itself */
2141 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
2142 static const char reserved[] =
2143 "c.pthread.rt.m.dl.util.xnet.";
2144 const char *rp, *next;
2145 for (rp=reserved; *rp; rp=next) {
2146 next = strchr(rp, '.') + 1;
2147 if (strncmp(name+3, rp, next-rp) == 0)
2148 break;
2149 }
2150 if (*rp) {
2151 if (ldd_mode) {
2152 /* Track which names have been resolved
2153 * and only report each one once. */
2154 static unsigned reported;
2155 unsigned mask = 1U<<(rp-reserved);
2156 if (!(reported & mask)) {
2157 reported |= mask;
2158 dprintf(1, "\t%s => %s (%p)\n",
2159 name, ldso.name,
2160 ldso.base);
2161 }
2162 }
2163 is_self = 1;
2164 }
2165 }
2166 if (!strcmp(name, ldso.name)) is_self = 1;
2167 if (is_self) {
2168 if (!ldso.prev) {
2169 tail->next = &ldso;
2170 ldso.prev = tail;
2171 tail = &ldso;
2172 ldso.namespace = namespace;
2173 ns_add_dso(namespace, &ldso);
2174 }
2175 return &ldso;
2176 }
2177 if (strchr(name, '/')) {
2178 pathname = name;
2179
2180 if (!is_accessible(namespace, pathname, g_is_asan, check_inherited)) {
2181 fd = -1;
2182 LD_LOGD("load_library is_accessible return false,fd = -1");
2183 } else {
2184 fd = open(name, O_RDONLY|O_CLOEXEC);
2185 LD_LOGD("load_library is_accessible return true, open file fd:%{public}d .", fd);
2186 }
2187 } else {
2188 /* Search for the name to see if it's already loaded */
2189 /* Search in namespace */
2190 p = find_library_by_name(name, namespace, check_inherited);
2191 if (p) {
2192 LD_LOGD("load_library find_library_by_name found p, return it!");
2193 return p;
2194 }
2195 if (strlen(name) > NAME_MAX) {
2196 LD_LOGE("load_library name exceeding the maximum length, return 0!");
2197 return 0;
2198 }
2199 fd = -1;
2200 if (namespace->env_paths) fd = path_open(name, namespace->env_paths, buf, sizeof buf);
2201 for (p = needed_by; fd == -1 && p; p = p->needed_by) {
2202 if (fixup_rpath(p, buf, sizeof buf) < 0) {
2203 LD_LOGD("load_library Inhibit further search,fd = -2.");
2204 fd = -2; /* Inhibit further search. */
2205 }
2206 if (p->rpath) {
2207 fd = path_open(name, p->rpath, buf, sizeof buf);
2208 LD_LOGD("load_library p->rpath path_open fd:%{public}d.", fd);
2209 }
2210
2211 }
2212 if (g_is_asan) {
2213 fd = handle_asan_path_open(fd, name, namespace, buf, sizeof buf);
2214 LD_LOGD("load_library handle_asan_path_open fd:%{public}d.", fd);
2215 } else {
2216 if (fd == -1 && namespace->lib_paths) {
2217 fd = path_open(name, namespace->lib_paths, buf, sizeof buf);
2218 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", fd);
2219 }
2220 }
2221 pathname = buf;
2222 LD_LOGD("load_library lib_paths pathname:%{public}s.", pathname);
2223 }
2224 if (fd < 0) {
2225 if (!check_inherited || !namespace->ns_inherits) return 0;
2226 /* Load lib in inherited namespace. Do not check inherited again.*/
2227 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
2228 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
2229 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) continue;
2230 p = load_library(name, needed_by, inherit->inherited_ns, false, reserved_params);
2231 if (p) {
2232 LD_LOGD("load_library search in inherited, found p ,inherited_ns name:%{public}s",
2233 inherit->inherited_ns->ns_name);
2234 return p;
2235 }
2236 }
2237 return 0;
2238 }
2239 if (fstat(fd, &st) < 0) {
2240 close(fd);
2241 LD_LOGE("load_library fstat < 0,return 0!");
2242 return 0;
2243 }
2244 /* Search in namespace */
2245 p = find_library_by_fstat(&st, namespace, check_inherited, 0);
2246 if (p) {
2247 /* If this library was previously loaded with a
2248 * pathname but a search found the same inode,
2249 * setup its shortname so it can be found by name. */
2250 if (!p->shortname && pathname != name)
2251 p->shortname = strrchr(p->name, '/')+1;
2252 close(fd);
2253 LD_LOGD("load_library find_library_by_fstat, found p and return it!");
2254 return p;
2255 }
2256 map = noload ? 0 : map_library(fd, &temp_dso, reserved_params);
2257 close(fd);
2258 if (!map) return 0;
2259
2260 /* Avoid the danger of getting two versions of libc mapped into the
2261 * same process when an absolute pathname was used. The symbols
2262 * checked are chosen to catch both musl and glibc, and to avoid
2263 * false positives from interposition-hack libraries. */
2264 decode_dyn(&temp_dso);
2265 if (find_sym(&temp_dso, "__libc_start_main", 1).sym &&
2266 find_sym(&temp_dso, "stdin", 1).sym) {
2267 unmap_library(&temp_dso);
2268 return load_library("libc.so", needed_by, namespace, true, reserved_params);
2269 }
2270 /* Past this point, if we haven't reached runtime yet, ldso has
2271 * committed either to use the mapped library or to abort execution.
2272 * Unmapping is not possible, so we can safely reclaim gaps. */
2273 if (!runtime) reclaim_gaps(&temp_dso);
2274
2275 /* Allocate storage for the new DSO. When there is TLS, this
2276 * storage must include a reservation for all pre-existing
2277 * threads to obtain copies of both the new TLS, and an
2278 * extended DTV capable of storing an additional slot for
2279 * the newly-loaded DSO. */
2280 alloc_size = sizeof *p + strlen(pathname) + 1;
2281 if (runtime && temp_dso.tls.image) {
2282 size_t per_th = temp_dso.tls.size + temp_dso.tls.align
2283 + sizeof(void *) * (tls_cnt+3);
2284 n_th = libc.threads_minus_1 + 1;
2285 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
2286 else alloc_size += n_th * per_th;
2287 }
2288 p = calloc(1, alloc_size);
2289 if (!p) {
2290 unmap_library(&temp_dso);
2291 return 0;
2292 }
2293 memcpy(p, &temp_dso, sizeof temp_dso);
2294 p->dev = st.st_dev;
2295 p->ino = st.st_ino;
2296 p->needed_by = needed_by;
2297 p->name = p->buf;
2298 p->runtime_loaded = runtime;
2299 strcpy(p->name, pathname);
2300 /* Add a shortname only if name arg was not an explicit pathname. */
2301 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
2302 if (p->tls.image) {
2303 p->tls_id = ++tls_cnt;
2304 tls_align = MAXP2(tls_align, p->tls.align);
2305 #ifdef TLS_ABOVE_TP
2306 p->tls.offset = tls_offset + ( (p->tls.align-1) &
2307 (-tls_offset + (uintptr_t)p->tls.image) );
2308 tls_offset = p->tls.offset + p->tls.size;
2309 #else
2310 tls_offset += p->tls.size + p->tls.align - 1;
2311 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
2312 & (p->tls.align-1);
2313 p->tls.offset = tls_offset;
2314 #endif
2315 p->new_dtv = (void *)(-sizeof(size_t) &
2316 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
2317 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
2318 if (tls_tail) tls_tail->next = &p->tls;
2319 else libc.tls_head = &p->tls;
2320 tls_tail = &p->tls;
2321 }
2322
2323 tail->next = p;
2324 p->prev = tail;
2325 tail = p;
2326
2327 /* Add dso to namespace */
2328 p->namespace = namespace;
2329 ns_add_dso(namespace, p);
2330 if (runtime)
2331 p->by_dlopen = 1;
2332
2333 if (DL_FDPIC) makefuncdescs(p);
2334
2335 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
2336
2337 return p;
2338 }
2339
load_direct_deps(struct dso * p,ns_t * namespace,struct reserved_address_params * reserved_params)2340 static void load_direct_deps(struct dso *p, ns_t *namespace, struct reserved_address_params *reserved_params)
2341 {
2342 size_t i, cnt=0;
2343
2344 if (p->deps) return;
2345 /* For head, all preloads are direct pseudo-dependencies.
2346 * Count and include them now to avoid realloc later. */
2347 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2348 cnt++;
2349 for (i=0; p->dynv[i]; i+=2)
2350 if (p->dynv[i] == DT_NEEDED) cnt++;
2351 /* Use builtin buffer for apps with no external deps, to
2352 * preserve property of no runtime failure paths. */
2353 p->deps = (p==head && cnt<2) ? builtin_deps :
2354 calloc(cnt+1, sizeof *p->deps);
2355 if (!p->deps) {
2356 error("Error loading dependencies for %s", p->name);
2357 if (runtime) longjmp(*rtld_fail, 1);
2358 }
2359 cnt=0;
2360 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2361 p->deps[cnt++] = q;
2362 for (i=0; p->dynv[i]; i+=2) {
2363 if (p->dynv[i] != DT_NEEDED) continue;
2364 struct dso *dep = load_library(p->strings + p->dynv[i + 1], p, namespace, true, reserved_params);
2365 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
2366 if (!dep) {
2367 error("Error loading shared library %s: %m (needed by %s)",
2368 p->strings + p->dynv[i+1], p->name);
2369 if (runtime) longjmp(*rtld_fail, 1);
2370 continue;
2371 }
2372 p->deps[cnt++] = dep;
2373 }
2374 p->deps[cnt] = 0;
2375 p->ndeps_direct = cnt;
2376 for (i = 0; i < p->ndeps_direct; i++) {
2377 add_dso_parent(p->deps[i], p);
2378 }
2379 }
2380
load_deps(struct dso * p,struct reserved_address_params * reserved_params)2381 static void load_deps(struct dso *p, struct reserved_address_params *reserved_params)
2382 {
2383 if (p->deps) return;
2384 for (; p; p = p->next)
2385 load_direct_deps(p, p->namespace, reserved_params);
2386 }
2387 #endif
2388
extend_bfs_deps(struct dso * p,bool to_deps_all)2389 static void extend_bfs_deps(struct dso *p, bool to_deps_all)
2390 {
2391 size_t i, j, cnt, ndeps_all;
2392 struct dso **tmp;
2393
2394 /* Can't use realloc if the original p->deps was allocated at
2395 * program entry and malloc has been replaced, or if it's
2396 * the builtin non-allocated trivial main program deps array. */
2397 int no_realloc = (__malloc_replaced && !p->runtime_loaded)
2398 || p->deps == builtin_deps;
2399
2400 if (p->bfs_built) return;
2401 if (to_deps_all && p->deps_all_built) {
2402 return;
2403 }
2404
2405 ndeps_all = p->ndeps_direct;
2406 if (to_deps_all) {
2407 // Use one more because the last one of the deps is NULL.
2408 p->deps_all = calloc(ndeps_all + 1, sizeof *p->deps);
2409 }
2410
2411 /* Mark existing (direct) deps so they won't be duplicated. */
2412 for (i=0; p->deps[i]; i++) {
2413 if (to_deps_all) {
2414 p->deps_all[i] = p->deps[i];
2415 }
2416 p->deps[i]->mark = 1;
2417 }
2418
2419 /* For each dependency already in the list, copy its list of direct
2420 * dependencies to the list, excluding any items already in the
2421 * list. Note that the list this loop iterates over will grow during
2422 * the loop, but since duplicates are excluded, growth is bounded. */
2423 if (to_deps_all) {
2424 for (i=0; p->deps_all[i]; i++) {
2425 struct dso *dep = p->deps_all[i];
2426 for (j=cnt=0; j<dep->ndeps_direct; j++)
2427 if (!dep->deps[j]->mark) cnt++;
2428 tmp = no_realloc ?
2429 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2430 realloc(p->deps_all, sizeof(*tmp) * (ndeps_all+cnt+1));
2431 if (!tmp) {
2432 error("Error recording dependencies for %s", p->name);
2433 if (runtime) longjmp(*rtld_fail, 1);
2434 continue;
2435 }
2436 if (no_realloc) {
2437 memcpy(tmp, p->deps_all, sizeof(*tmp) * (ndeps_all+1));
2438 no_realloc = 0;
2439 }
2440 p->deps_all = tmp;
2441 for (j=0; j<dep->ndeps_direct; j++) {
2442 if (dep->deps[j]->mark) continue;
2443 dep->deps[j]->mark = 1;
2444 p->deps_all[ndeps_all++] = dep->deps[j];
2445 }
2446 p->deps_all[ndeps_all] = 0;
2447 }
2448 p->deps_all_built = 1;
2449 } else {
2450 for (i=0; p->deps[i]; i++) {
2451 struct dso *dep = p->deps[i];
2452 for (j=cnt=0; j<dep->ndeps_direct; j++)
2453 if (!dep->deps[j]->mark) cnt++;
2454 tmp = no_realloc ?
2455 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2456 realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1));
2457 if (!tmp) {
2458 error("Error recording dependencies for %s", p->name);
2459 if (runtime) longjmp(*rtld_fail, 1);
2460 continue;
2461 }
2462 if (no_realloc) {
2463 memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1));
2464 no_realloc = 0;
2465 }
2466 p->deps = tmp;
2467 for (j=0; j<dep->ndeps_direct; j++) {
2468 if (dep->deps[j]->mark) continue;
2469 dep->deps[j]->mark = 1;
2470 p->deps[ndeps_all++] = dep->deps[j];
2471 }
2472 p->deps[ndeps_all] = 0;
2473 }
2474 p->bfs_built = 1;
2475 }
2476 for (p=head; p; p=p->next)
2477 p->mark = 0;
2478 }
2479
2480 #ifndef LOAD_ORDER_RANDOMIZATION
load_preload(char * s,ns_t * ns)2481 static void load_preload(char *s, ns_t *ns)
2482 {
2483 int tmp;
2484 char *z;
2485 for (z=s; *z; s=z) {
2486 for ( ; *s && (isspace(*s) || *s==':'); s++);
2487 for (z=s; *z && !isspace(*z) && *z!=':'; z++);
2488 tmp = *z;
2489 *z = 0;
2490 load_library(s, 0, ns, true, NULL);
2491 *z = tmp;
2492 }
2493 }
2494 #endif
2495
add_syms(struct dso * p)2496 static void add_syms(struct dso *p)
2497 {
2498 if (!p->syms_next && syms_tail != p) {
2499 syms_tail->syms_next = p;
2500 syms_tail = p;
2501 }
2502 }
2503
revert_syms(struct dso * old_tail)2504 static void revert_syms(struct dso *old_tail)
2505 {
2506 struct dso *p, *next;
2507 /* Chop off the tail of the list of dsos that participate in
2508 * the global symbol table, reverting them to RTLD_LOCAL. */
2509 for (p=old_tail; p; p=next) {
2510 next = p->syms_next;
2511 p->syms_next = 0;
2512 }
2513 syms_tail = old_tail;
2514 }
2515
do_mips_relocs(struct dso * p,size_t * got)2516 static void do_mips_relocs(struct dso *p, size_t *got)
2517 {
2518 size_t i, j, rel[2];
2519 unsigned char *base = p->base;
2520 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO);
2521 if (p==&ldso) {
2522 got += i;
2523 } else {
2524 while (i--) *got++ += (size_t)base;
2525 }
2526 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
2527 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
2528 Sym *sym = p->syms + j;
2529 rel[0] = (unsigned char *)got - base;
2530 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
2531 rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT);
2532 do_relocs(p, rel, sizeof rel, 2);
2533 }
2534 }
2535
sleb128_decoder(uint8_t * current,uint8_t * end,size_t * value)2536 static uint8_t* sleb128_decoder(uint8_t* current, uint8_t* end, size_t* value)
2537 {
2538 size_t result = 0;
2539 static const size_t size = CHAR_BIT * sizeof(result);
2540
2541 size_t shift = 0;
2542 uint8_t byte;
2543
2544 do {
2545 if (current >= end) {
2546 a_crash();
2547 }
2548
2549 byte = *current++;
2550 result |= ((size_t)(byte & 127) << shift);
2551 shift += 7;
2552 } while (byte & 128);
2553
2554 if (shift < size && (byte & 64)) {
2555 result |= -((size_t)(1) << shift);
2556 }
2557
2558 *value = result;
2559
2560 return current;
2561 }
2562
do_android_relocs(struct dso * p,size_t dt_name,size_t dt_size)2563 static void do_android_relocs(struct dso *p, size_t dt_name, size_t dt_size)
2564 {
2565 size_t android_rel_addr = 0, android_rel_size = 0;
2566 uint8_t *android_rel_curr, *android_rel_end;
2567
2568 search_vec(p->dynv, &android_rel_addr, dt_name);
2569 search_vec(p->dynv, &android_rel_size, dt_size);
2570
2571 if (!android_rel_addr || (android_rel_size < 4)) {
2572 return;
2573 }
2574
2575 android_rel_curr = laddr(p, android_rel_addr);
2576 if (memcmp(android_rel_curr, "APS2", ANDROID_REL_SIGN_SIZE)) {
2577 return;
2578 }
2579
2580 android_rel_curr += ANDROID_REL_SIGN_SIZE;
2581 android_rel_size -= ANDROID_REL_SIGN_SIZE;
2582
2583 android_rel_end = android_rel_curr + android_rel_size;
2584
2585 size_t relocs_num;
2586 size_t rel[3] = {0};
2587
2588 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &relocs_num);
2589 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[0]);
2590
2591 for (size_t i = 0; i < relocs_num;) {
2592
2593 size_t group_size, group_flags;
2594
2595 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_size);
2596 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_flags);
2597
2598 size_t group_r_offset_delta = 0;
2599
2600 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2601 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_r_offset_delta);
2602 }
2603
2604 if (group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) {
2605 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2606 }
2607
2608 const size_t addend_flags = group_flags & (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG);
2609
2610 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2611 } else if (addend_flags == (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG)) {
2612 size_t addend;
2613 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2614 rel[2] += addend;
2615 } else {
2616 rel[2] = 0;
2617 }
2618
2619 for (size_t j = 0; j < group_size; j++) {
2620 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2621 rel[0] += group_r_offset_delta;
2622 } else {
2623 size_t offset_detla;
2624 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &offset_detla);
2625
2626 rel[0] += offset_detla;
2627 }
2628
2629 if ((group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) == 0) {
2630 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2631 }
2632
2633 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2634 size_t addend;
2635 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2636 rel[2] += addend;
2637 }
2638
2639 if (dt_name == DT_ANDROID_REL) {
2640 do_relocs(p, rel, sizeof(size_t) * 2, 2);
2641 } else {
2642 do_relocs(p, rel, sizeof(size_t) * 3, 3);
2643 }
2644 }
2645
2646 i += group_size;
2647 }
2648 }
2649
do_relr_relocs(struct dso * dso,size_t * relr,size_t relr_size)2650 static void do_relr_relocs(struct dso *dso, size_t *relr, size_t relr_size)
2651 {
2652 if (dso == &ldso) return; /* self-relocation was done in _dlstart */
2653 unsigned char *base = dso->base;
2654 size_t *reloc_addr;
2655 for (; relr_size; relr++, relr_size -= sizeof(size_t))
2656 if ((relr[0] & 1) == 0) {
2657 reloc_addr = laddr(dso, relr[0]);
2658 *reloc_addr++ += (size_t)base;
2659 } else {
2660 int i = 0;
2661 for (size_t bitmap = relr[0]; (bitmap >>= 1); i++)
2662 if (bitmap & 1)
2663 reloc_addr[i] += (size_t)base;
2664 reloc_addr += 8 * sizeof(size_t) - 1;
2665 }
2666 }
2667
reloc_all(struct dso * p,const dl_extinfo * extinfo)2668 static void reloc_all(struct dso *p, const dl_extinfo *extinfo)
2669 {
2670 ssize_t relro_fd_offset = 0;
2671 size_t dyn[DYN_CNT];
2672 for (; p; p=p->next) {
2673 if (p->relocated) continue;
2674 if (p != &ldso) {
2675 add_can_search_so_list_in_dso(p, head);
2676 }
2677 decode_vec(p->dynv, dyn, DYN_CNT);
2678 if (NEED_MIPS_GOT_RELOCS)
2679 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT]));
2680 do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
2681 2+(dyn[DT_PLTREL]==DT_RELA));
2682 do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2);
2683 do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3);
2684 if (!DL_FDPIC)
2685 do_relr_relocs(p, laddr(p, dyn[DT_RELR]), dyn[DT_RELRSZ]);
2686
2687 do_android_relocs(p, DT_ANDROID_REL, DT_ANDROID_RELSZ);
2688 do_android_relocs(p, DT_ANDROID_RELA, DT_ANDROID_RELASZ);
2689
2690 if (head != &ldso && p->relro_start != p->relro_end &&
2691 mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ)
2692 && errno != ENOSYS) {
2693 error("Error relocating %s: RELRO protection failed: %m",
2694 p->name);
2695 if (runtime) longjmp(*rtld_fail, 1);
2696 }
2697 /* Handle serializing/mapping the RELRO segment */
2698 handle_relro_sharing(p, extinfo, &relro_fd_offset);
2699
2700 p->relocated = 1;
2701 free_reloc_can_search_dso(p);
2702 }
2703 }
2704
kernel_mapped_dso(struct dso * p)2705 static void kernel_mapped_dso(struct dso *p)
2706 {
2707 size_t min_addr = -1, max_addr = 0, cnt;
2708 Phdr *ph = p->phdr;
2709 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
2710 if (ph->p_type == PT_DYNAMIC) {
2711 p->dynv = laddr(p, ph->p_vaddr);
2712 } else if (ph->p_type == PT_GNU_RELRO) {
2713 p->relro_start = ph->p_vaddr & -PAGE_SIZE;
2714 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
2715 } else if (ph->p_type == PT_GNU_STACK) {
2716 if (!runtime && ph->p_memsz > __default_stacksize) {
2717 __default_stacksize =
2718 ph->p_memsz < DEFAULT_STACK_MAX ?
2719 ph->p_memsz : DEFAULT_STACK_MAX;
2720 }
2721 }
2722 if (ph->p_type != PT_LOAD) continue;
2723 if (ph->p_vaddr < min_addr)
2724 min_addr = ph->p_vaddr;
2725 if (ph->p_vaddr+ph->p_memsz > max_addr)
2726 max_addr = ph->p_vaddr+ph->p_memsz;
2727 }
2728 min_addr &= -PAGE_SIZE;
2729 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
2730 p->map = p->base + min_addr;
2731 p->map_len = max_addr - min_addr;
2732 p->kernel_mapped = 1;
2733 }
2734
__libc_exit_fini()2735 void __libc_exit_fini()
2736 {
2737 struct dso *p;
2738 size_t dyn[DYN_CNT];
2739 pthread_t self = __pthread_self();
2740
2741 /* Take both locks before setting shutting_down, so that
2742 * either lock is sufficient to read its value. The lock
2743 * order matches that in dlopen to avoid deadlock. */
2744 pthread_rwlock_wrlock(&lock);
2745 pthread_mutex_lock(&init_fini_lock);
2746 shutting_down = 1;
2747 pthread_rwlock_unlock(&lock);
2748 for (p=fini_head; p; p=p->fini_next) {
2749 while (p->ctor_visitor && p->ctor_visitor!=self)
2750 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2751 if (!p->constructed) continue;
2752 decode_vec(p->dynv, dyn, DYN_CNT);
2753 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
2754 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
2755 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n;
2756 while (n--) ((void (*)(void))*--fn)();
2757 }
2758 #ifndef NO_LEGACY_INITFINI
2759 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
2760 fpaddr(p, dyn[DT_FINI])();
2761 #endif
2762 }
2763 }
2764
__pthread_mutex_unlock_atfork(int who)2765 void __pthread_mutex_unlock_atfork(int who)
2766 {
2767 if (who == 0) {
2768 // If a multithread process lock dlclose_lock and call fork,
2769 // dlclose_lock will never unlock before child process call execve.
2770 // so reset dlclose_lock to make sure child process can call dlclose after fork
2771 __pthread_mutex_unlock_recursive_inner(&dlclose_lock);
2772 }
2773 }
2774
__ldso_atfork(int who)2775 void __ldso_atfork(int who)
2776 {
2777 if (who<0) {
2778 pthread_rwlock_wrlock(&lock);
2779 pthread_mutex_lock(&init_fini_lock);
2780 } else {
2781 pthread_mutex_unlock(&init_fini_lock);
2782 pthread_rwlock_unlock(&lock);
2783 }
2784 }
2785
queue_ctors(struct dso * dso)2786 static struct dso **queue_ctors(struct dso *dso)
2787 {
2788 size_t cnt, qpos, spos, i;
2789 struct dso *p, **queue, **stack;
2790
2791 if (ldd_mode) return 0;
2792
2793 /* Bound on queue size is the total number of indirect deps.
2794 * If a bfs deps list was built, we can use it. Otherwise,
2795 * bound by the total number of DSOs, which is always safe and
2796 * is reasonable we use it (for main app at startup). */
2797 if (dso->bfs_built) {
2798 for (cnt=0; dso->deps[cnt]; cnt++)
2799 dso->deps[cnt]->mark = 0;
2800 cnt++; /* self, not included in deps */
2801 } else {
2802 for (cnt=0, p=head; p; cnt++, p=p->next)
2803 p->mark = 0;
2804 }
2805 cnt++; /* termination slot */
2806 if (dso==head && cnt <= countof(builtin_ctor_queue))
2807 queue = builtin_ctor_queue;
2808 else
2809 queue = calloc(cnt, sizeof *queue);
2810
2811 if (!queue) {
2812 error("Error allocating constructor queue: %m\n");
2813 if (runtime) longjmp(*rtld_fail, 1);
2814 return 0;
2815 }
2816
2817 /* Opposite ends of the allocated buffer serve as an output queue
2818 * and a working stack. Setup initial stack with just the argument
2819 * dso and initial queue empty... */
2820 stack = queue;
2821 qpos = 0;
2822 spos = cnt;
2823 stack[--spos] = dso;
2824 dso->next_dep = 0;
2825 dso->mark = 1;
2826
2827 /* Then perform pseudo-DFS sort, but ignoring circular deps. */
2828 while (spos<cnt) {
2829 p = stack[spos++];
2830 while (p->next_dep < p->ndeps_direct) {
2831 if (p->deps[p->next_dep]->mark) {
2832 p->next_dep++;
2833 } else {
2834 stack[--spos] = p;
2835 p = p->deps[p->next_dep];
2836 p->next_dep = 0;
2837 p->mark = 1;
2838 }
2839 }
2840 queue[qpos++] = p;
2841 }
2842 queue[qpos] = 0;
2843 for (i=0; i<qpos; i++) queue[i]->mark = 0;
2844
2845 return queue;
2846 }
2847
do_init_fini(struct dso ** queue)2848 static void do_init_fini(struct dso **queue)
2849 {
2850 struct dso *p;
2851 size_t dyn[DYN_CNT], i;
2852 pthread_t self = __pthread_self();
2853
2854 pthread_mutex_lock(&init_fini_lock);
2855 for (i=0; (p=queue[i]); i++) {
2856 while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down)
2857 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2858 if (p->ctor_visitor || p->constructed)
2859 continue;
2860 p->ctor_visitor = self;
2861
2862 decode_vec(p->dynv, dyn, DYN_CNT);
2863 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
2864 p->fini_next = fini_head;
2865 fini_head = p;
2866 }
2867
2868 pthread_mutex_unlock(&init_fini_lock);
2869
2870 #ifndef NO_LEGACY_INITFINI
2871 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
2872 fpaddr(p, dyn[DT_INIT])();
2873 #endif
2874 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
2875 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
2876 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]);
2877 if (p != &ldso) {
2878 trace_marker_begin(HITRACE_TAG_MUSL, "calling constructors: ", p->name);
2879 }
2880 while (n--) ((void (*)(void))*fn++)();
2881 if (p != &ldso) {
2882 trace_marker_end(HITRACE_TAG_MUSL);
2883 }
2884 }
2885
2886 pthread_mutex_lock(&init_fini_lock);
2887 p->ctor_visitor = 0;
2888 p->constructed = 1;
2889 pthread_cond_broadcast(&ctor_cond);
2890 }
2891 pthread_mutex_unlock(&init_fini_lock);
2892 }
2893
__libc_start_init(void)2894 void __libc_start_init(void)
2895 {
2896 do_init_fini(main_ctor_queue);
2897 if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
2898 free(main_ctor_queue);
2899 main_ctor_queue = 0;
2900 }
2901
dl_debug_state(void)2902 static void dl_debug_state(void)
2903 {
2904 }
2905
2906 weak_alias(dl_debug_state, _dl_debug_state);
2907
__init_tls(size_t * auxv)2908 void __init_tls(size_t *auxv)
2909 {
2910 }
2911
update_tls_size()2912 static void update_tls_size()
2913 {
2914 libc.tls_cnt = tls_cnt;
2915 libc.tls_align = tls_align;
2916 libc.tls_size = ALIGN(
2917 (1+tls_cnt) * sizeof(void *) +
2918 tls_offset +
2919 sizeof(struct pthread) +
2920 tls_align * 2,
2921 tls_align);
2922 }
2923
install_new_tls(void)2924 static void install_new_tls(void)
2925 {
2926 sigset_t set;
2927 pthread_t self = __pthread_self(), td;
2928 struct dso *dtv_provider = container_of(tls_tail, struct dso, tls);
2929 uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv;
2930 struct dso *p;
2931 size_t i, j;
2932 size_t old_cnt = self->dtv[0];
2933
2934 __block_app_sigs(&set);
2935 __tl_lock();
2936 if (get_tl_lock_caller_count()) {
2937 get_tl_lock_caller_count()->install_new_tls_tl_lock++;
2938 }
2939 /* Copy existing dtv contents from all existing threads. */
2940 for (i=0, td=self; !i || td!=self; i++, td=td->next) {
2941 memcpy(newdtv+i, td->dtv,
2942 (old_cnt+1)*sizeof(uintptr_t));
2943 newdtv[i][0] = tls_cnt;
2944 }
2945 /* Install new dtls into the enlarged, uninstalled dtv copies. */
2946 for (p=head; ; p=p->next) {
2947 if (p->tls_id <= old_cnt) continue;
2948 unsigned char *mem = p->new_tls;
2949 for (j=0; j<i; j++) {
2950 unsigned char *new = mem;
2951 new += ((uintptr_t)p->tls.image - (uintptr_t)mem)
2952 & (p->tls.align-1);
2953 memcpy(new, p->tls.image, p->tls.len);
2954 newdtv[j][p->tls_id] =
2955 (uintptr_t)new + DTP_OFFSET;
2956 mem += p->tls.size + p->tls.align;
2957 }
2958 if (p->tls_id == tls_cnt) break;
2959 }
2960
2961 /* Broadcast barrier to ensure contents of new dtv is visible
2962 * if the new dtv pointer is. The __membarrier function has a
2963 * fallback emulation using signals for kernels that lack the
2964 * feature at the syscall level. */
2965
2966 __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
2967
2968 /* Install new dtv for each thread. */
2969 for (j=0, td=self; !j || td!=self; j++, td=td->next) {
2970 td->dtv = newdtv[j];
2971 }
2972
2973 if (get_tl_lock_caller_count()) {
2974 get_tl_lock_caller_count()->install_new_tls_tl_lock--;
2975 }
2976 __tl_unlock();
2977 __restore_sigs(&set);
2978 }
2979
2980 /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
2981 * following stage 2 and stage 3 functions via primitive symbolic lookup
2982 * since it does not have access to their addresses to begin with. */
2983
2984 /* Stage 2 of the dynamic linker is called after relative relocations
2985 * have been processed. It can make function calls to static functions
2986 * and access string literals and static data, but cannot use extern
2987 * symbols. Its job is to perform symbolic relocations on the dynamic
2988 * linker itself, but some of the relocations performed may need to be
2989 * replaced later due to copy relocations in the main program. */
2990
__dls2(unsigned char * base,size_t * sp)2991 hidden void __dls2(unsigned char *base, size_t *sp)
2992 {
2993 size_t *auxv;
2994 for (auxv=sp+1+*sp+1; *auxv; auxv++);
2995 auxv++;
2996 if (DL_FDPIC) {
2997 void *p1 = (void *)sp[-2];
2998 void *p2 = (void *)sp[-1];
2999 if (!p1) {
3000 size_t aux[AUX_CNT];
3001 decode_vec(auxv, aux, AUX_CNT);
3002 if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE];
3003 else ldso.base = (void *)(aux[AT_PHDR] & -4096);
3004 }
3005 app_loadmap = p2 ? p1 : 0;
3006 ldso.loadmap = p2 ? p2 : p1;
3007 ldso.base = laddr(&ldso, 0);
3008 } else {
3009 ldso.base = base;
3010 }
3011 size_t aux[AUX_CNT];
3012 decode_vec(auxv, aux, AUX_CNT);
3013 libc.page_size = aux[AT_PAGESZ];
3014 Ehdr *ehdr = __ehdr_start ? (void *)__ehdr_start : (void *)ldso.base;
3015 ldso.name = ldso.shortname = "libc.so";
3016 ldso.phnum = ehdr->e_phnum;
3017 ldso.phdr = laddr(&ldso, ehdr->e_phoff);
3018 ldso.phentsize = ehdr->e_phentsize;
3019 ldso.is_global = true;
3020 search_vec(auxv, &ldso_page_size, AT_PAGESZ);
3021 kernel_mapped_dso(&ldso);
3022 decode_dyn(&ldso);
3023
3024 if (DL_FDPIC) makefuncdescs(&ldso);
3025
3026 /* Prepare storage for to save clobbered REL addends so they
3027 * can be reused in stage 3. There should be very few. If
3028 * something goes wrong and there are a huge number, abort
3029 * instead of risking stack overflow. */
3030 size_t dyn[DYN_CNT];
3031 decode_vec(ldso.dynv, dyn, DYN_CNT);
3032 size_t *rel = laddr(&ldso, dyn[DT_REL]);
3033 size_t rel_size = dyn[DT_RELSZ];
3034 size_t symbolic_rel_cnt = 0;
3035 apply_addends_to = rel;
3036 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
3037 if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++;
3038 if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash();
3039 size_t addends[symbolic_rel_cnt+1];
3040 saved_addends = addends;
3041
3042 head = &ldso;
3043 reloc_all(&ldso, NULL);
3044
3045 ldso.relocated = 0;
3046
3047 /* Call dynamic linker stage-2b, __dls2b, looking it up
3048 * symbolically as a barrier against moving the address
3049 * load across the above relocation processing. */
3050 struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0);
3051 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp, auxv, aux);
3052 else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp, auxv, aux);
3053 }
3054
3055 /* Stage 2b sets up a valid thread pointer, which requires relocations
3056 * completed in stage 2, and on which stage 3 is permitted to depend.
3057 * This is done as a separate stage, with symbolic lookup as a barrier,
3058 * so that loads of the thread pointer and &errno can be pure/const and
3059 * thereby hoistable. */
3060
__dls2b(size_t * sp,size_t * auxv,size_t * aux)3061 void __dls2b(size_t *sp, size_t *auxv, size_t *aux)
3062 {
3063 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
3064 * use during dynamic linking. If possible it will also serve as the
3065 * thread pointer at runtime. */
3066 search_vec(auxv, &__hwcap, AT_HWCAP);
3067 libc.auxv = auxv;
3068 libc.tls_size = sizeof builtin_tls;
3069 libc.tls_align = tls_align;
3070 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
3071 a_crash();
3072 }
3073 __pthread_self()->stack = (void *)(sp + 1);
3074 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
3075 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp, auxv, aux);
3076 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp, auxv, aux);
3077 }
3078
3079 /* Stage 3 of the dynamic linker is called with the dynamic linker/libc
3080 * fully functional. Its job is to load (if not already loaded) and
3081 * process dependencies and relocations for the main application and
3082 * transfer control to its entry point. */
3083
__dls3(size_t * sp,size_t * auxv,size_t * aux)3084 void __dls3(size_t *sp, size_t *auxv, size_t *aux)
3085 {
3086 static struct dso app, vdso;
3087 size_t i;
3088 char *env_preload=0;
3089 char *replace_argv0=0;
3090 size_t vdso_base;
3091 int argc = *sp;
3092 char **argv = (void *)(sp+1);
3093 char **argv_orig = argv;
3094 char **envp = argv+argc+1;
3095
3096 /* Find aux vector just past environ[] and use it to initialize
3097 * global data that may be needed before we can make syscalls. */
3098 __environ = envp;
3099 search_vec(auxv, &__sysinfo, AT_SYSINFO);
3100 __pthread_self()->sysinfo = __sysinfo;
3101 libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
3102 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]);
3103
3104 /* Only trust user/env if kernel says we're not suid/sgid */
3105 if (!libc.secure) {
3106 env_path = getenv("LD_LIBRARY_PATH");
3107 env_preload = getenv("LD_PRELOAD");
3108 }
3109
3110 /* Activate error handler function */
3111 error = error_impl;
3112
3113 #ifdef OHOS_ENABLE_PARAMETER
3114 InitParameterClient();
3115 #endif
3116 // we may abort when linking other libs, load signal handler before stage start
3117 #ifdef DFX_SIGNAL_LIBC
3118 DFX_InstallSignalHandler();
3119 #endif
3120 #if defined (ENABLE_MUSL_LOG) && !defined(__LITEOS__)
3121 InitHilogSocketFd();
3122 #endif
3123 __init_fdsan();
3124 InitDeviceApiVersion(); // do nothing when no define OHOS_ENABLE_PARAMETER
3125 InitTimeZoneParam();
3126 /* If the main program was already loaded by the kernel,
3127 * AT_PHDR will point to some location other than the dynamic
3128 * linker's program headers. */
3129 if (aux[AT_PHDR] != (size_t)ldso.phdr) {
3130 size_t interp_off = 0;
3131 size_t tls_image = 0;
3132 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
3133 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR];
3134 app.phnum = aux[AT_PHNUM];
3135 app.phentsize = aux[AT_PHENT];
3136 for (i = aux[AT_PHNUM]; i; i--, phdr = (void *)((char *)phdr + aux[AT_PHENT])) {
3137 if (phdr->p_type == PT_PHDR)
3138 app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
3139 else if (phdr->p_type == PT_INTERP)
3140 interp_off = (size_t)phdr->p_vaddr;
3141 else if (phdr->p_type == PT_TLS) {
3142 tls_image = phdr->p_vaddr;
3143 app.tls.len = phdr->p_filesz;
3144 app.tls.size = phdr->p_memsz;
3145 app.tls.align = phdr->p_align;
3146 }
3147 }
3148 if (DL_FDPIC) app.loadmap = app_loadmap;
3149 if (app.tls.size) app.tls.image = laddr(&app, tls_image);
3150 if (interp_off) ldso.name = laddr(&app, interp_off);
3151 if ((aux[0] & (1UL<<AT_EXECFN))
3152 && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
3153 app.name = (char *)aux[AT_EXECFN];
3154 else
3155 app.name = argv[0];
3156 kernel_mapped_dso(&app);
3157 } else {
3158 int fd;
3159 char *ldname = argv[0];
3160 size_t l = strlen(ldname);
3161 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
3162 argv++;
3163 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
3164 char *opt = argv[0]+2;
3165 *argv++ = (void *)-1;
3166 if (!*opt) {
3167 break;
3168 } else if (!memcmp(opt, "list", 5)) {
3169 ldd_mode = 1;
3170 } else if (!memcmp(opt, "library-path", 12)) {
3171 if (opt[12]=='=') env_path = opt+13;
3172 else if (opt[12]) *argv = 0;
3173 else if (*argv) env_path = *argv++;
3174 } else if (!memcmp(opt, "preload", 7)) {
3175 if (opt[7]=='=') env_preload = opt+8;
3176 else if (opt[7]) *argv = 0;
3177 else if (*argv) env_preload = *argv++;
3178 } else if (!memcmp(opt, "argv0", 5)) {
3179 if (opt[5]=='=') replace_argv0 = opt+6;
3180 else if (opt[5]) *argv = 0;
3181 else if (*argv) replace_argv0 = *argv++;
3182 } else {
3183 argv[0] = 0;
3184 }
3185 }
3186 argv[-1] = (void *)(argc - (argv-argv_orig));
3187 if (!argv[0]) {
3188 dprintf(2, "musl libc (" LDSO_ARCH ")\n"
3189 "Version %s\n"
3190 "Dynamic Program Loader\n"
3191 "Usage: %s [options] [--] pathname%s\n",
3192 __libc_version, ldname,
3193 ldd_mode ? "" : " [args]");
3194 _exit(1);
3195 }
3196 fd = open(argv[0], O_RDONLY);
3197 if (fd < 0) {
3198 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
3199 _exit(1);
3200 }
3201 Ehdr *ehdr = map_library(fd, &app, NULL);
3202 if (!ehdr) {
3203 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
3204 _exit(1);
3205 }
3206 close(fd);
3207 ldso.name = ldname;
3208 app.name = argv[0];
3209 aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry);
3210 /* Find the name that would have been used for the dynamic
3211 * linker had ldd not taken its place. */
3212 if (ldd_mode) {
3213 for (i=0; i<app.phnum; i++) {
3214 if (app.phdr[i].p_type == PT_INTERP)
3215 ldso.name = laddr(&app, app.phdr[i].p_vaddr);
3216 }
3217 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
3218 }
3219 }
3220 if (app.tls.size) {
3221 libc.tls_head = tls_tail = &app.tls;
3222 app.tls_id = tls_cnt = 1;
3223 #ifdef TLS_ABOVE_TP
3224 app.tls.offset = GAP_ABOVE_TP;
3225 app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image)
3226 & (app.tls.align-1);
3227 tls_offset = app.tls.offset + app.tls.size;
3228 #else
3229 tls_offset = app.tls.offset = app.tls.size
3230 + ( -((uintptr_t)app.tls.image + app.tls.size)
3231 & (app.tls.align-1) );
3232 #endif
3233 tls_align = MAXP2(tls_align, app.tls.align);
3234 }
3235 decode_dyn(&app);
3236 if (DL_FDPIC) {
3237 makefuncdescs(&app);
3238 if (!app.loadmap) {
3239 app.loadmap = (void *)&app_dummy_loadmap;
3240 app.loadmap->nsegs = 1;
3241 app.loadmap->segs[0].addr = (size_t)app.map;
3242 app.loadmap->segs[0].p_vaddr = (size_t)app.map
3243 - (size_t)app.base;
3244 app.loadmap->segs[0].p_memsz = app.map_len;
3245 }
3246 argv[-3] = (void *)app.loadmap;
3247 }
3248 app.is_global = true;
3249
3250 /* Initial dso chain consists only of the app. */
3251 head = tail = syms_tail = &app;
3252
3253 /* Donate unused parts of app and library mapping to malloc */
3254 reclaim_gaps(&app);
3255 reclaim_gaps(&ldso);
3256
3257 find_and_set_bss_name(&app);
3258 find_and_set_bss_name(&ldso);
3259
3260 /* Load preload/needed libraries, add symbols to global namespace. */
3261 ldso.deps = (struct dso **)no_deps;
3262 /* Init g_is_asan */
3263 g_is_asan = false;
3264 LD_LOGD("__dls3 ldso.name:%{public}s.", ldso.name);
3265 /* Through ldso Name to judge whether the Asan function is enabled */
3266 if (strstr(ldso.name, "-asan")) {
3267 g_is_asan = true;
3268 LD_LOGD("__dls3 g_is_asan is true.");
3269 }
3270 /* Init all namespaces by config file. there is a default namespace always*/
3271 init_namespace(&app);
3272
3273 char dfx_preload[] = "libdfx_signalhandler.z.so";
3274 #ifdef LOAD_ORDER_RANDOMIZATION
3275 struct loadtasks *tasks = create_loadtasks();
3276 if (!tasks) {
3277 _exit(1);
3278 }
3279 load_preload(dfx_preload, get_default_ns(), tasks);
3280 if (env_preload) {
3281 load_preload(env_preload, get_default_ns(), tasks);
3282 }
3283 for (struct dso *q = head; q; q = q->next) {
3284 q->is_global = true;
3285 q->is_preload = true;
3286 }
3287 preload_deps(&app, tasks);
3288 unmap_preloaded_sections(tasks);
3289 shuffle_loadtasks(tasks);
3290 run_loadtasks(tasks, NULL);
3291 free_loadtasks(tasks);
3292 assign_tls(app.next);
3293 #else
3294 load_preload(dfx_preload, get_default_ns());
3295 if (env_preload) load_preload(env_preload, get_default_ns());
3296 for (struct dso *q = head; q; q = q->next) {
3297 q->is_global = true;
3298 q->is_preload = true;
3299 }
3300 load_deps(&app, NULL);
3301 #endif
3302
3303 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of app, including app self. */
3304 for (struct dso *p = head; p; p = p->next) {
3305 p->is_reloc_head_so_dep = true;
3306 add_syms(p);
3307 }
3308
3309 /* Attach to vdso, if provided by the kernel, last so that it does
3310 * not become part of the global namespace. */
3311 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) {
3312 Ehdr *ehdr = (void *)vdso_base;
3313 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
3314 vdso.phnum = ehdr->e_phnum;
3315 vdso.phentsize = ehdr->e_phentsize;
3316 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
3317 if (phdr->p_type == PT_DYNAMIC)
3318 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
3319 if (phdr->p_type == PT_LOAD)
3320 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
3321 }
3322 vdso.name = "";
3323 vdso.shortname = "linux-gate.so.1";
3324 vdso.relocated = 1;
3325 vdso.deps = (struct dso **)no_deps;
3326 decode_dyn(&vdso);
3327 vdso.prev = tail;
3328 tail->next = &vdso;
3329 tail = &vdso;
3330 vdso.namespace = get_default_ns();
3331 ns_add_dso(vdso.namespace, &vdso);
3332 }
3333
3334 for (i=0; app.dynv[i]; i+=2) {
3335 if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG)
3336 app.dynv[i+1] = (size_t)&debug;
3337 if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) {
3338 size_t *ptr = (size_t *) app.dynv[i+1];
3339 *ptr = (size_t)&debug;
3340 }
3341 if (app.dynv[i]==DT_DEBUG_INDIRECT_REL) {
3342 size_t *ptr = (size_t *)((size_t)&app.dynv[i] + app.dynv[i+1]);
3343 *ptr = (size_t)&debug;
3344 }
3345 }
3346
3347 /* This must be done before final relocations, since it calls
3348 * malloc, which may be provided by the application. Calling any
3349 * application code prior to the jump to its entry point is not
3350 * valid in our model and does not work with FDPIC, where there
3351 * are additional relocation-like fixups that only the entry point
3352 * code can see to perform. */
3353 main_ctor_queue = queue_ctors(&app);
3354
3355 /* Initial TLS must also be allocated before final relocations
3356 * might result in calloc being a call to application code. */
3357 update_tls_size();
3358 void *initial_tls = builtin_tls;
3359 if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) {
3360 initial_tls = calloc(libc.tls_size, 1);
3361 if (!initial_tls) {
3362 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
3363 argv[0], libc.tls_size);
3364 _exit(127);
3365 }
3366 }
3367 static_tls_cnt = tls_cnt;
3368
3369 /* The main program must be relocated LAST since it may contain
3370 * copy relocations which depend on libraries' relocations. */
3371 reloc_all(app.next, NULL);
3372 reloc_all(&app, NULL);
3373 for (struct dso *q = head; q; q = q->next) {
3374 q->is_reloc_head_so_dep = false;
3375 }
3376
3377 /* Actual copying to new TLS needs to happen after relocations,
3378 * since the TLS images might have contained relocated addresses. */
3379 if (initial_tls != builtin_tls) {
3380 pthread_t self = __pthread_self();
3381 pthread_t td = __copy_tls(initial_tls);
3382 if (__init_tp(td) < 0) {
3383 a_crash();
3384 }
3385 td->tsd = self->tsd;
3386 // Record stack here for unwinding in gwp-asan
3387 td->stack = self->stack;
3388 } else {
3389 size_t tmp_tls_size = libc.tls_size;
3390 pthread_t self = __pthread_self();
3391 /* Temporarily set the tls size to the full size of
3392 * builtin_tls so that __copy_tls will use the same layout
3393 * as it did for before. Then check, just to be safe. */
3394 libc.tls_size = sizeof builtin_tls;
3395 if (__copy_tls((void*)builtin_tls) != self) a_crash();
3396 libc.tls_size = tmp_tls_size;
3397 }
3398
3399 if (init_cfi_shadow(head, &ldso, &app, &vdso) == CFI_FAILED) {
3400 error("[%s] init_cfi_shadow failed: %m", __FUNCTION__);
3401 }
3402
3403 if (ldso_fail) _exit(127);
3404 if (ldd_mode) _exit(0);
3405
3406 /* Determine if malloc was interposed by a replacement implementation
3407 * so that calloc and the memalign family can harden against the
3408 * possibility of incomplete replacement. */
3409 if (find_sym(head, "malloc", 1).dso != &ldso)
3410 __malloc_replaced = 1;
3411 if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
3412 __aligned_alloc_replaced = 1;
3413
3414 /* Switch to runtime mode: any further failures in the dynamic
3415 * linker are a reportable failure rather than a fatal startup
3416 * error. */
3417 runtime = 1;
3418
3419 sync_with_debugger();
3420
3421 if (replace_argv0) argv[0] = replace_argv0;
3422
3423 #ifdef USE_GWP_ASAN
3424 init_gwp_asan_by_libc(false);
3425 #endif
3426
3427 errno = 0;
3428
3429 CRTJMP((void *)aux[AT_ENTRY], argv - 1);
3430 for(;;);
3431 }
3432
prepare_lazy(struct dso * p)3433 static void prepare_lazy(struct dso *p)
3434 {
3435 size_t dyn[DYN_CNT], n, flags1=0;
3436 decode_vec(p->dynv, dyn, DYN_CNT);
3437 search_vec(p->dynv, &flags1, DT_FLAGS_1);
3438 if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW))
3439 return;
3440 n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1;
3441 if (NEED_MIPS_GOT_RELOCS) {
3442 size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
3443 size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
3444 n += i-j;
3445 }
3446 p->lazy = calloc(n, 3*sizeof(size_t));
3447 if (!p->lazy) {
3448 error("Error preparing lazy relocation for %s: %m", p->name);
3449 longjmp(*rtld_fail, 1);
3450 }
3451 p->lazy_next = lazy_head;
3452 lazy_head = p;
3453 }
3454
dlopen_post(struct dso * p,int mode)3455 static void *dlopen_post(struct dso* p, int mode) {
3456 if (p == NULL) {
3457 return p;
3458 }
3459 bool is_dlclose_debug = false;
3460 if (is_dlclose_debug_enable()) {
3461 is_dlclose_debug = true;
3462 }
3463 p->nr_dlopen++;
3464 if (is_dlclose_debug) {
3465 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d ",
3466 p->name, p->name, p->nr_dlopen);
3467 }
3468 if (p->bfs_built) {
3469 for (int i = 0; p->deps[i]; i++) {
3470 p->deps[i]->nr_dlopen++;
3471 if (is_dlclose_debug) {
3472 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d",
3473 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
3474 }
3475 if (mode & RTLD_NODELETE) {
3476 p->deps[i]->flags |= DSO_FLAGS_NODELETE;
3477 }
3478 }
3479 }
3480
3481 #ifdef HANDLE_RANDOMIZATION
3482 void *handle = assign_valid_handle(p);
3483 if (handle == NULL) {
3484 LD_LOGE("dlopen_post: generate random handle failed");
3485 do_dlclose(p, 0);
3486 }
3487
3488 return handle;
3489 #endif
3490
3491 return p;
3492 }
3493
3494 static char *dlopen_permitted_list[] =
3495 {
3496 "default",
3497 "ndk",
3498 };
3499
3500 #define PERMITIED_TARGET "nweb_ns"
in_permitted_list(char * caller,char * target)3501 static bool in_permitted_list(char *caller, char *target)
3502 {
3503 for (int i = 0; i < sizeof(dlopen_permitted_list)/sizeof(char*); i++) {
3504 if (strcmp(dlopen_permitted_list[i], caller) == 0) {
3505 return true;
3506 }
3507 }
3508
3509 if (strcmp(PERMITIED_TARGET, target) == 0) {
3510 return true;
3511 }
3512
3513 return false;
3514 }
3515
is_permitted(const void * caller_addr,char * target)3516 static bool is_permitted(const void *caller_addr, char *target)
3517 {
3518 struct dso *caller;
3519 ns_t *ns;
3520 caller = (struct dso *)addr2dso((size_t)caller_addr);
3521 if ((caller == NULL) || (caller->namespace == NULL)) {
3522 LD_LOGE("caller ns get error");
3523 return false;
3524 }
3525
3526 ns = caller->namespace;
3527 if (in_permitted_list(ns->ns_name, target) == false) {
3528 LD_LOGE("caller ns: %{public}s have no permission, target is %{public}s", ns->ns_name, target);
3529 return false;
3530 }
3531
3532 return true;
3533 }
3534
3535 /* Add namespace function.
3536 * Some limitations come from sanitizer:
3537 * Sanitizer requires this interface to be exposed.
3538 * Pay attention to call __builtin_return_address in this interface because sanitizer can hook and call this interface.
3539 */
3540 #ifdef IS_ASAN
3541 static const char *redir_paths[] = {
3542 LIB,
3543 CHIP_PROD_ETC,
3544 NULL
3545 };
3546 #endif
3547
dlopen_impl(const char * file,int mode,const char * namespace,const void * caller_addr,const dl_extinfo * extinfo)3548 void *dlopen_impl(
3549 const char *file, int mode, const char *namespace, const void *caller_addr, const dl_extinfo *extinfo)
3550 {
3551 struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next;
3552 struct tls_module *orig_tls_tail;
3553 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
3554 size_t i;
3555 int cs;
3556 jmp_buf jb;
3557 struct dso **volatile ctor_queue = 0;
3558 ns_t *ns;
3559 struct dso *caller;
3560 bool reserved_address = false;
3561 bool reserved_address_recursive = false;
3562 struct reserved_address_params reserved_params = {0};
3563 struct dlopen_time_info dlopen_cost = {0};
3564 struct timespec time_start, time_end, total_start, total_end;
3565 struct dso *current_so = NULL;
3566 clock_gettime(CLOCK_MONOTONIC, &total_start);
3567 #ifdef LOAD_ORDER_RANDOMIZATION
3568 struct loadtasks *tasks = NULL;
3569 struct loadtask *task = NULL;
3570 struct loadtasks **volatile tasks_ptr = (struct loadtasks **volatile)&tasks;
3571 bool is_task_appended = false;
3572 #endif
3573 #ifdef IS_ASAN
3574 char asan_file[PATH_MAX] = {0};
3575 #endif
3576
3577 if (!file) {
3578 LD_LOGD("dlopen_impl file is null, return head.");
3579 return dlopen_post(head, mode);
3580 }
3581
3582 #ifdef IS_ASAN
3583 if (g_is_asan) {
3584 for (int i=0; redir_paths[i] != NULL; i++) {
3585 char *place = strstr(file, redir_paths[i]);
3586 if (place && asan_file) {
3587 int ret = snprintf(asan_file, sizeof asan_file, "%.*s/asan%s", (int)(place - file), file, place);
3588 if (ret > 0 && access(asan_file, F_OK) == 0) {
3589 LD_LOGI("dlopen_impl redirect to asan library.");
3590 file = asan_file;
3591 break;
3592 }
3593 }
3594 }
3595 }
3596 #endif
3597
3598 if (extinfo) {
3599 reserved_address_recursive = extinfo->flag & DL_EXT_RESERVED_ADDRESS_RECURSIVE;
3600 if (extinfo->flag & DL_EXT_RESERVED_ADDRESS) {
3601 reserved_address = true;
3602 reserved_params.start_addr = extinfo->reserved_addr;
3603 reserved_params.reserved_size = extinfo->reserved_size;
3604 reserved_params.must_use_reserved = true;
3605 reserved_params.reserved_address_recursive = reserved_address_recursive;
3606 } else if (extinfo->flag & DL_EXT_RESERVED_ADDRESS_HINT) {
3607 reserved_address = true;
3608 reserved_params.start_addr = extinfo->reserved_addr;
3609 reserved_params.reserved_size = extinfo->reserved_size;
3610 reserved_params.must_use_reserved = false;
3611 reserved_params.reserved_address_recursive = reserved_address_recursive;
3612 }
3613 }
3614
3615 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
3616 pthread_rwlock_wrlock(&lock);
3617 __inhibit_ptc();
3618 trace_marker_reset();
3619 trace_marker_begin(HITRACE_TAG_MUSL, "dlopen: ", file);
3620
3621 /* When namespace does not exist, use caller's namespce
3622 * and when caller does not exist, use default namespce. */
3623 caller = (struct dso *)addr2dso((size_t)caller_addr);
3624 ns = find_ns_by_name(namespace);
3625 if (!ns) ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3626
3627 p = 0;
3628 if (shutting_down) {
3629 error("Cannot dlopen while program is exiting.");
3630 goto end;
3631 }
3632 orig_tls_tail = tls_tail;
3633 orig_tls_cnt = tls_cnt;
3634 orig_tls_offset = tls_offset;
3635 orig_tls_align = tls_align;
3636 orig_lazy_head = lazy_head;
3637 orig_syms_tail = syms_tail;
3638 orig_tail = tail;
3639 noload = mode & RTLD_NOLOAD;
3640
3641 rtld_fail = &jb;
3642 if (setjmp(*rtld_fail)) {
3643 /* Clean up anything new that was (partially) loaded */
3644 revert_syms(orig_syms_tail);
3645 for (p = orig_tail->next; p; p = next) {
3646 next = p->next;
3647 while (p->td_index) {
3648 void *tmp = p->td_index->next;
3649 free(p->td_index);
3650 p->td_index = tmp;
3651 }
3652 free(p->funcdescs);
3653 free(p->rpath);
3654 if (p->deps) {
3655 for (int i = 0; i < p->ndeps_direct; i++) {
3656 remove_dso_parent(p->deps[i], p);
3657 }
3658 }
3659 free(p->deps);
3660 dlclose_ns(p);
3661 unmap_library(p);
3662 if (p->parents) {
3663 free(p->parents);
3664 }
3665 free_reloc_can_search_dso(p);
3666 }
3667 for (p=orig_tail->next; p; p=next) {
3668 next = p->next;
3669 free(p);
3670 }
3671 free(ctor_queue);
3672 ctor_queue = 0;
3673 if (!orig_tls_tail) libc.tls_head = 0;
3674 tls_tail = orig_tls_tail;
3675 if (tls_tail) tls_tail->next = 0;
3676 tls_cnt = orig_tls_cnt;
3677 tls_offset = orig_tls_offset;
3678 tls_align = orig_tls_align;
3679 lazy_head = orig_lazy_head;
3680 tail = orig_tail;
3681 tail->next = 0;
3682 p = 0;
3683 goto end;
3684 } else {
3685 #ifdef LOAD_ORDER_RANDOMIZATION
3686 tasks = create_loadtasks();
3687 if (!tasks) {
3688 LD_LOGE("dlopen_impl create loadtasks failed");
3689 goto end;
3690 }
3691 task = create_loadtask(file, head, ns, true);
3692 if (!task) {
3693 LD_LOGE("dlopen_impl create loadtask failed");
3694 goto end;
3695 }
3696 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3697 clock_gettime(CLOCK_MONOTONIC, &time_start);
3698 if (!load_library_header(task)) {
3699 error(noload ?
3700 "Library %s is not already loaded" :
3701 "Error loading shared library %s: %m",
3702 file);
3703 LD_LOGE("dlopen_impl load library header failed for %{public}s", task->name);
3704 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3705 goto end;
3706 }
3707 if (reserved_address) {
3708 reserved_params.target = task->p;
3709 }
3710 }
3711 if (!task->p) {
3712 LD_LOGE("dlopen_impl load library failed for %{public}s", task->name);
3713 error(noload ?
3714 "Library %s is not already loaded" :
3715 "Error loading shared library %s: %m",
3716 file);
3717 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3718 goto end;
3719 }
3720 clock_gettime(CLOCK_MONOTONIC, &time_end);
3721 dlopen_cost.entry_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3722 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3723 if (!task->isloaded) {
3724 is_task_appended = append_loadtasks(tasks, task);
3725 }
3726 clock_gettime(CLOCK_MONOTONIC, &time_start);
3727 preload_deps(task->p, tasks);
3728 clock_gettime(CLOCK_MONOTONIC, &time_end);
3729 dlopen_cost.deps_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3730 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3731 unmap_preloaded_sections(tasks);
3732 if (!reserved_address_recursive) {
3733 shuffle_loadtasks(tasks);
3734 }
3735 clock_gettime(CLOCK_MONOTONIC, &time_start);
3736 run_loadtasks(tasks, reserved_address ? &reserved_params : NULL);
3737 clock_gettime(CLOCK_MONOTONIC, &time_end);
3738 dlopen_cost.map_so_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3739 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3740 p = task->p;
3741 if (!task->isloaded) {
3742 assign_tls(p);
3743 }
3744 if (!is_task_appended) {
3745 free_task(task);
3746 task = NULL;
3747 }
3748 free_loadtasks(tasks);
3749 tasks = NULL;
3750 #else
3751 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3752 p = load_library(file, head, ns, true, reserved_address ? &reserved_params : NULL);
3753 }
3754
3755 if (!p) {
3756 error(noload ?
3757 "Library %s is not already loaded" :
3758 "Error loading shared library %s: %m",
3759 file);
3760 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3761 goto end;
3762 }
3763 /* First load handling */
3764 load_deps(p, reserved_address && reserved_address_recursive ? &reserved_params : NULL);
3765 #endif
3766 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3767 extend_bfs_deps(p, 0);
3768 pthread_mutex_lock(&init_fini_lock);
3769 int constructed = p->constructed;
3770 pthread_mutex_unlock(&init_fini_lock);
3771 if (!constructed) ctor_queue = queue_ctors(p);
3772 if (!p->relocated && (mode & RTLD_LAZY)) {
3773 prepare_lazy(p);
3774 for (i = 0; p->deps[i]; i++)
3775 if (!p->deps[i]->relocated)
3776 prepare_lazy(p->deps[i]);
3777 }
3778 if (!p->relocated || (mode & RTLD_GLOBAL)) {
3779 /* Make new symbols global, at least temporarily, so we can do
3780 * relocations. If not RTLD_GLOBAL, this is reverted below. */
3781 add_syms(p);
3782 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of p, including p self. */
3783 p->is_reloc_head_so_dep = true;
3784 for (i = 0; p->deps[i]; i++) {
3785 p->deps[i]->is_reloc_head_so_dep = true;
3786 add_syms(p->deps[i]);
3787 }
3788 }
3789 struct dso *reloc_head_so = p;
3790 trace_marker_begin(HITRACE_TAG_MUSL, "linking: entry so", p->name);
3791 clock_gettime(CLOCK_MONOTONIC, &time_start);
3792 if (!p->relocated) {
3793 reloc_all(p, extinfo);
3794 }
3795 clock_gettime(CLOCK_MONOTONIC, &time_end);
3796 dlopen_cost.reloc_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3797 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3798 trace_marker_end(HITRACE_TAG_MUSL);
3799 reloc_head_so->is_reloc_head_so_dep = false;
3800 for (size_t i = 0; reloc_head_so->deps[i]; i++) {
3801 reloc_head_so->deps[i]->is_reloc_head_so_dep = false;
3802 }
3803
3804 /* If RTLD_GLOBAL was not specified, undo any new additions
3805 * to the global symbol table. This is a nop if the library was
3806 * previously loaded and already global. */
3807 if (!(mode & RTLD_GLOBAL))
3808 revert_syms(orig_syms_tail);
3809
3810 /* Processing of deferred lazy relocations must not happen until
3811 * the new libraries are committed; otherwise we could end up with
3812 * relocations resolved to symbol definitions that get removed. */
3813 redo_lazy_relocs();
3814 clock_gettime(CLOCK_MONOTONIC, &time_start);
3815 if (map_dso_to_cfi_shadow(p) == CFI_FAILED) {
3816 error("[%s] map_dso_to_cfi_shadow failed: %m", __FUNCTION__);
3817 longjmp(*rtld_fail, 1);
3818 }
3819 clock_gettime(CLOCK_MONOTONIC, &time_end);
3820 dlopen_cost.map_cfi_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3821 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3822
3823 if (mode & RTLD_NODELETE) {
3824 p->flags |= DSO_FLAGS_NODELETE;
3825 }
3826
3827 update_tls_size();
3828 if (tls_cnt != orig_tls_cnt)
3829 install_new_tls();
3830
3831 if (orig_tail != tail) {
3832 notify_addition_to_debugger(orig_tail->next);
3833 }
3834
3835 orig_tail = tail;
3836 current_so = p;
3837 p = dlopen_post(p, mode);
3838 end:
3839 #ifdef LOAD_ORDER_RANDOMIZATION
3840 if (!is_task_appended) {
3841 free_task(task);
3842 }
3843 free_loadtasks(*tasks_ptr);
3844 #endif
3845 __release_ptc();
3846 clock_gettime(CLOCK_MONOTONIC, &time_start);
3847 pthread_rwlock_unlock(&lock);
3848 if (ctor_queue) {
3849 do_init_fini(ctor_queue);
3850 free(ctor_queue);
3851 }
3852 clock_gettime(CLOCK_MONOTONIC, &time_end);
3853 dlopen_cost.init_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3854 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3855 pthread_setcancelstate(cs, 0);
3856 trace_marker_end(HITRACE_TAG_MUSL); // "dlopen: " trace end.
3857 clock_gettime(CLOCK_MONOTONIC, &total_end);
3858 dlopen_cost.total_time = (total_end.tv_sec - total_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3859 + (total_end.tv_nsec - total_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3860 if ((dlopen_cost.total_time > DLOPEN_TIME_THRESHOLD || is_dlopen_debug_enable()) && current_so) {
3861 LD_LOGE("dlopen so: %{public}s "
3862 #ifdef DLOPEN_TIME_LOG
3863 "total_time: %{public}d ms, "
3864 "entry_header_time: %{public}d ms, "
3865 "deps_header_time: %{public}d ms, "
3866 "map_so_time: %{public}d ms, "
3867 "reloc_time: %{public}d ms, "
3868 "map_cfi_time: %{public}d ms, "
3869 #endif
3870 "init_time: %{public}d ms",
3871 current_so->name,
3872 #ifdef DLOPEN_TIME_LOG
3873 dlopen_cost.total_time,
3874 dlopen_cost.entry_header_time,
3875 dlopen_cost.deps_header_time,
3876 dlopen_cost.map_so_time,
3877 dlopen_cost.reloc_time,
3878 dlopen_cost.map_cfi_time,
3879 #endif
3880 dlopen_cost.init_time);
3881 }
3882 return p;
3883 }
3884
dlopen(const char * file,int mode)3885 void *dlopen(const char *file, int mode)
3886 {
3887 const void *caller_addr = __builtin_return_address(0);
3888 musl_log_reset();
3889 ld_log_reset();
3890 LD_LOGI("dlopen file:%{public}s, mode:%{public}x ,caller_addr:%{public}p .", file, mode, caller_addr);
3891 return dlopen_impl(file, mode, NULL, caller_addr, NULL);
3892 }
3893
dlns_init(Dl_namespace * dlns,const char * name)3894 void dlns_init(Dl_namespace *dlns, const char *name)
3895 {
3896 if (!dlns) {
3897 return;
3898 }
3899 if (!name) {
3900 dlns->name[0] = 0;
3901 return;
3902 }
3903
3904 const void *caller_addr = __builtin_return_address(0);
3905 if (is_permitted(caller_addr, (char *)name) == false) {
3906 return;
3907 }
3908
3909 snprintf(dlns->name, sizeof dlns->name, name);
3910 LD_LOGI("dlns_init dlns->name:%{public}s .", dlns->name);
3911 }
3912
dlns_get(const char * name,Dl_namespace * dlns)3913 int dlns_get(const char *name, Dl_namespace *dlns)
3914 {
3915 if (!dlns) {
3916 LD_LOGE("dlns_get dlns is null.");
3917 return EINVAL;
3918 }
3919 int ret = 0;
3920 ns_t *ns = NULL;
3921 pthread_rwlock_rdlock(&lock);
3922 if (!name) {
3923 struct dso *caller;
3924 const void *caller_addr = __builtin_return_address(0);
3925 caller = (struct dso *)addr2dso((size_t)caller_addr);
3926 ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3927 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3928 LD_LOGI("dlns_get name is null, current dlns dlns->name:%{public}s.", dlns->name);
3929 } else {
3930 ns = find_ns_by_name(name);
3931 if (ns) {
3932 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3933 LD_LOGI("dlns_get found ns, current dlns dlns->name:%{public}s.", dlns->name);
3934 } else {
3935 LD_LOGI("dlns_get not found ns! name:%{public}s.", name);
3936 ret = ENOKEY;
3937 }
3938 }
3939 pthread_rwlock_unlock(&lock);
3940 return ret;
3941 }
3942
dlopen_ns(Dl_namespace * dlns,const char * file,int mode)3943 void *dlopen_ns(Dl_namespace *dlns, const char *file, int mode)
3944 {
3945 const void *caller_addr = __builtin_return_address(0);
3946 if (is_permitted(caller_addr, dlns->name) == false) {
3947 return NULL;
3948 }
3949
3950 musl_log_reset();
3951 ld_log_reset();
3952 LD_LOGI("dlopen_ns file:%{public}s, mode:%{public}x , caller_addr:%{public}p , dlns->name:%{public}s.",
3953 file,
3954 mode,
3955 caller_addr,
3956 dlns ? dlns->name : "NULL");
3957 return dlopen_impl(file, mode, dlns->name, caller_addr, NULL);
3958 }
3959
dlopen_ns_ext(Dl_namespace * dlns,const char * file,int mode,const dl_extinfo * extinfo)3960 void *dlopen_ns_ext(Dl_namespace *dlns, const char *file, int mode, const dl_extinfo *extinfo)
3961 {
3962 const void *caller_addr = __builtin_return_address(0);
3963 if (is_permitted(caller_addr, dlns->name) == false) {
3964 return NULL;
3965 }
3966
3967 musl_log_reset();
3968 ld_log_reset();
3969 LD_LOGI("dlopen_ns_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , "
3970 "dlns->name:%{public}s. , extinfo->flag:%{public}x",
3971 file,
3972 mode,
3973 caller_addr,
3974 dlns->name,
3975 extinfo ? extinfo->flag : 0);
3976 return dlopen_impl(file, mode, dlns->name, caller_addr, extinfo);
3977 }
3978
dlns_create2(Dl_namespace * dlns,const char * lib_path,int flags)3979 int dlns_create2(Dl_namespace *dlns, const char *lib_path, int flags)
3980 {
3981 if (!dlns) {
3982 LD_LOGE("dlns_create2 dlns is null.");
3983 return EINVAL;
3984 }
3985 ns_t *ns;
3986
3987 pthread_rwlock_wrlock(&lock);
3988 const void *caller_addr = __builtin_return_address(0);
3989 if (is_permitted(caller_addr, dlns->name) == false) {
3990 pthread_rwlock_unlock(&lock);
3991 return EPERM;
3992 }
3993
3994 ns = find_ns_by_name(dlns->name);
3995 if (ns) {
3996 LD_LOGE("dlns_create2 ns is exist.");
3997 pthread_rwlock_unlock(&lock);
3998 return EEXIST;
3999 }
4000 ns = ns_alloc();
4001 if (!ns) {
4002 LD_LOGE("dlns_create2 no memery.");
4003 pthread_rwlock_unlock(&lock);
4004 return ENOMEM;
4005 }
4006 ns_set_name(ns, dlns->name);
4007 ns_set_flag(ns, flags);
4008 ns_add_dso(ns, get_default_ns()->ns_dsos->dsos[0]); /* add main app to this namespace*/
4009 nslist_add_ns(ns); /* add ns to list*/
4010 ns_set_lib_paths(ns, lib_path);
4011
4012 if ((flags & CREATE_INHERIT_DEFAULT) != 0) {
4013 ns_add_inherit(ns, get_default_ns(), NULL);
4014 }
4015
4016 if ((flags & CREATE_INHERIT_CURRENT) != 0) {
4017 struct dso *caller;
4018 caller_addr = __builtin_return_address(0);
4019 caller = (struct dso *)addr2dso((size_t)caller_addr);
4020 if (caller && caller->namespace) {
4021 ns_add_inherit(ns, caller->namespace, NULL);
4022 }
4023 }
4024
4025 LD_LOGI("dlns_create2:"
4026 "ns_name: %{public}s ,"
4027 "separated:%{public}d ,"
4028 "lib_paths:%{public}s ",
4029 ns->ns_name, ns->separated, ns->lib_paths);
4030 pthread_rwlock_unlock(&lock);
4031
4032 return 0;
4033 }
4034
dlns_create(Dl_namespace * dlns,const char * lib_path)4035 int dlns_create(Dl_namespace *dlns, const char *lib_path)
4036 {
4037 LD_LOGI("dlns_create lib_paths:%{public}s", lib_path);
4038 return dlns_create2(dlns, lib_path, CREATE_INHERIT_DEFAULT);
4039 }
4040
dlns_inherit(Dl_namespace * dlns,Dl_namespace * inherited,const char * shared_libs)4041 int dlns_inherit(Dl_namespace *dlns, Dl_namespace *inherited, const char *shared_libs)
4042 {
4043 if (!dlns || !inherited) {
4044 LD_LOGE("dlns_inherit dlns or inherited is null.");
4045 return EINVAL;
4046 }
4047
4048 pthread_rwlock_wrlock(&lock);
4049 const void *caller_addr = __builtin_return_address(0);
4050 if (is_permitted(caller_addr, dlns->name) == false) {
4051 pthread_rwlock_unlock(&lock);
4052 return EPERM;
4053 }
4054
4055 ns_t* ns = find_ns_by_name(dlns->name);
4056 ns_t* ns_inherited = find_ns_by_name(inherited->name);
4057 if (!ns || !ns_inherited) {
4058 LD_LOGE("dlns_inherit ns or ns_inherited is not found.");
4059 pthread_rwlock_unlock(&lock);
4060 return ENOKEY;
4061 }
4062 ns_add_inherit(ns, ns_inherited, shared_libs);
4063 pthread_rwlock_unlock(&lock);
4064
4065 return 0;
4066 }
4067
dlclose_ns(struct dso * p)4068 static void dlclose_ns(struct dso *p)
4069 {
4070 if (!p) return;
4071 ns_t * ns = p->namespace;
4072 if (!ns || !ns->ns_dsos) return;
4073 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
4074 if (p == ns->ns_dsos->dsos[i]) {
4075 for (size_t j = i + 1; j < ns->ns_dsos->num; j++) {
4076 ns->ns_dsos->dsos[j - 1] = ns->ns_dsos->dsos[j];
4077 }
4078 ns->ns_dsos->num--;
4079 return;
4080 }
4081 }
4082 }
4083
__dl_invalid_handle(void * h)4084 hidden int __dl_invalid_handle(void *h)
4085 {
4086 struct dso *p;
4087 for (p=head; p; p=p->next) if (h==p) return 0;
4088 error("Invalid library handle %p", (void *)h);
4089 return 1;
4090 }
4091
addr2dso(size_t a)4092 void *addr2dso(size_t a)
4093 {
4094 struct dso *p;
4095 for (p=head; p; p=p->next) {
4096 if (a < (size_t)p->map || a - (size_t)p->map >= p->map_len) continue;
4097 Phdr *ph = p->phdr;
4098 size_t phcnt = p->phnum;
4099 size_t entsz = p->phentsize;
4100 size_t base = (size_t)p->base;
4101 for (; phcnt--; ph=(void *)((char *)ph+entsz)) {
4102 if (ph->p_type != PT_LOAD) continue;
4103 if (a-base-ph->p_vaddr < ph->p_memsz)
4104 return p;
4105 }
4106 if (a-(size_t)p->map < p->map_len)
4107 return 0;
4108 }
4109 return 0;
4110 }
4111
do_dlsym(struct dso * p,const char * s,const char * v,void * ra)4112 static void *do_dlsym(struct dso *p, const char *s, const char *v, void *ra)
4113 {
4114 int use_deps = 0;
4115 bool ra2dso = false;
4116 ns_t *ns = NULL;
4117 struct dso *caller = NULL;
4118 if (p == head || p == RTLD_DEFAULT) {
4119 p = head;
4120 ra2dso = true;
4121 } else if (p == RTLD_NEXT) {
4122 p = addr2dso((size_t)ra);
4123 if (!p) p=head;
4124 p = p->next;
4125 ra2dso = true;
4126 #ifndef HANDLE_RANDOMIZATION
4127 } else if (__dl_invalid_handle(p)) {
4128 return 0;
4129 #endif
4130 } else {
4131 use_deps = 1;
4132 ns = p->namespace;
4133 }
4134 if (ra2dso) {
4135 caller = (struct dso *)addr2dso((size_t)ra);
4136 if (caller && caller->namespace) {
4137 ns = caller->namespace;
4138 }
4139 }
4140 trace_marker_begin(HITRACE_TAG_MUSL, "dlsym: ", (s == NULL ? "(NULL)" : s));
4141 struct verinfo verinfo = { .s = s, .v = v, .use_vna_hash = false };
4142 struct symdef def = use_deps ? find_sym_by_deps(p, &verinfo, 0, ns) :
4143 find_sym2(p, &verinfo, 0, use_deps, ns);
4144 trace_marker_end(HITRACE_TAG_MUSL);
4145 if (!def.sym) {
4146 LD_LOGW("do_dlsym failed: symbol not found. so=%{public}s s=%{public}s v=%{public}s",
4147 (p == NULL ? "NULL" : p->name), s, v);
4148 error("do_dlsym failed: Symbol not found: %s, version: %s so=%s",
4149 s, strlen(v) > 0 ? v : "null", (p == NULL ? "NULL" : p->name));
4150 return 0;
4151 }
4152 if ((def.sym->st_info&0xf) == STT_TLS)
4153 return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET});
4154 if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC)
4155 return def.dso->funcdescs + (def.sym - def.dso->syms);
4156 return laddr(def.dso, def.sym->st_value);
4157 }
4158
4159 extern int invalidate_exit_funcs(struct dso *p);
4160
so_can_unload(struct dso * p,int check_flag)4161 static int so_can_unload(struct dso *p, int check_flag)
4162 {
4163 if ((check_flag & UNLOAD_COMMON_CHECK) != 0) {
4164 if (__dl_invalid_handle(p)) {
4165 LD_LOGE("[dlclose]: invalid handle %{public}p", p);
4166 error("[dlclose]: Handle is invalid.");
4167 return 0;
4168 }
4169
4170 if (!p->by_dlopen) {
4171 LD_LOGD("[dlclose]: skip unload %{public}s because it's not loaded by dlopen", p->name);
4172 return 0;
4173 }
4174
4175 /* dso is marked as RTLD_NODELETE library, do nothing here. */
4176 if ((p->flags & DSO_FLAGS_NODELETE) != 0) {
4177 LD_LOGD("[dlclose]: skip unload %{public}s because flags is RTLD_NODELETE", p->name);
4178 return 0;
4179 }
4180 }
4181
4182 if ((check_flag & UNLOAD_NR_DLOPEN_CHECK) != 0) {
4183 if (p->nr_dlopen > 0) {
4184 LD_LOGD("[dlclose]: skip unload %{public}s because nr_dlopen=%{public}d > 0", p->name, p->nr_dlopen);
4185 return 0;
4186 }
4187 }
4188
4189 return 1;
4190 }
4191
dlclose_post(struct dso * p)4192 static int dlclose_post(struct dso *p)
4193 {
4194 if (p == NULL) {
4195 return -1;
4196 }
4197 #ifdef ENABLE_HWASAN
4198 if (libc.unload_hook) {
4199 libc.unload_hook((unsigned long int)p->base, p->phdr, p->phnum);
4200 }
4201 #endif
4202 unmap_dso_from_cfi_shadow(p);
4203 unmap_library(p);
4204 if (p->parents) {
4205 free(p->parents);
4206 }
4207 free_reloc_can_search_dso(p);
4208 if (p->tls.size == 0) {
4209 free(p);
4210 }
4211
4212 ++subcnt;
4213 return 0;
4214 }
4215
dlclose_impl(struct dso * p)4216 static int dlclose_impl(struct dso *p)
4217 {
4218 struct dso *d;
4219
4220 trace_marker_reset();
4221 trace_marker_begin(HITRACE_TAG_MUSL, "dlclose", p->name);
4222
4223 /* remove dso symbols from global list */
4224 if (p->syms_next) {
4225 for (d = head; d->syms_next != p; d = d->syms_next)
4226 ; /* NOP */
4227 d->syms_next = p->syms_next;
4228 } else if (p == syms_tail) {
4229 for (d = head; d->syms_next != p; d = d->syms_next)
4230 ; /* NOP */
4231 d->syms_next = NULL;
4232 syms_tail = d;
4233 }
4234
4235 /* remove dso from lazy list if needed */
4236 if (p == lazy_head) {
4237 lazy_head = p->lazy_next;
4238 } else if (p->lazy_next) {
4239 for (d = lazy_head; d->lazy_next != p; d = d->lazy_next)
4240 ; /* NOP */
4241 d->lazy_next = p->lazy_next;
4242 }
4243
4244 pthread_mutex_lock(&init_fini_lock);
4245 /* remove dso from fini list */
4246 if (p == fini_head) {
4247 fini_head = p->fini_next;
4248 } else if (p->fini_next) {
4249 for (d = fini_head; d->fini_next != p; d = d->fini_next)
4250 ; /* NOP */
4251 d->fini_next = p->fini_next;
4252 }
4253 pthread_mutex_unlock(&init_fini_lock);
4254
4255 /* empty tls image */
4256 if (p->tls.size != 0) {
4257 p->tls.image = NULL;
4258 }
4259
4260 /* remove dso from global dso list */
4261 if (p == tail) {
4262 tail = p->prev;
4263 tail->next = NULL;
4264 } else {
4265 p->next->prev = p->prev;
4266 p->prev->next = p->next;
4267 }
4268
4269 /* remove dso from namespace */
4270 dlclose_ns(p);
4271
4272 /* */
4273 void* handle = find_handle_by_dso(p);
4274 if (handle) {
4275 remove_handle_node(handle);
4276 }
4277
4278 /* after destruct, invalidate atexit funcs which belong to this dso */
4279 #if (defined(FEATURE_ATEXIT_CB_PROTECT))
4280 invalidate_exit_funcs(p);
4281 #endif
4282
4283 notify_remove_to_debugger(p);
4284
4285 if (p->lazy != NULL)
4286 free(p->lazy);
4287 if (p->deps != no_deps)
4288 free(p->deps);
4289
4290 if (p->deps_all_built) {
4291 free(p->deps_all);
4292 }
4293
4294 trace_marker_end(HITRACE_TAG_MUSL);
4295
4296 return 0;
4297 }
4298
do_dlclose(struct dso * p,bool check_deps_all)4299 static int do_dlclose(struct dso *p, bool check_deps_all)
4300 {
4301 struct dso_entry *ef = NULL;
4302 struct dso_entry *ef_tmp = NULL;
4303 size_t n;
4304 int unload_check_result;
4305 TAILQ_HEAD(unload_queue, dso_entry) unload_queue;
4306 TAILQ_HEAD(need_unload_queue, dso_entry) need_unload_queue;
4307 unload_check_result = so_can_unload(p, UNLOAD_COMMON_CHECK);
4308 if (unload_check_result != 1) {
4309 return unload_check_result;
4310 }
4311 // Unconditionally subtract 1 because unconditionally add 1 at dlopen_post.
4312 if (p->nr_dlopen > 0) {
4313 --(p->nr_dlopen);
4314 } else {
4315 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4316 p->name, p->name);
4317 return 0;
4318 }
4319
4320 if (p->bfs_built) {
4321 for (int i = 0; p->deps[i]; i++) {
4322 if (p->deps[i]->nr_dlopen > 0) {
4323 p->deps[i]->nr_dlopen--;
4324 } else {
4325 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4326 p->deps[i]->name, p->name);
4327 return 0;
4328 }
4329 }
4330 } else {
4331 /* This part is used for thread local object destructors:
4332 * - nr_dlopen increases for all deps(include self) when a thread local object destructor is added.
4333 * - nr_dlopen decreases for all deps(include self) when a thread local object destructor is called.
4334 */
4335 if (check_deps_all && p->deps_all_built) {
4336 for (int i = 0; p->deps_all[i]; i++) {
4337 if (p->deps_all[i]->nr_dlopen > 0) {
4338 p->deps_all[i]->nr_dlopen--;
4339 } else {
4340 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4341 p->deps_all[i]->name, p->name);
4342 return 0;
4343 }
4344 }
4345 }
4346 }
4347
4348 unload_check_result = so_can_unload(p, UNLOAD_NR_DLOPEN_CHECK);
4349 if (unload_check_result != 1) {
4350 return unload_check_result;
4351 }
4352 TAILQ_INIT(&unload_queue);
4353 TAILQ_INIT(&need_unload_queue);
4354 struct dso_entry *start_entry = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4355 start_entry->dso = p;
4356 TAILQ_INSERT_TAIL(&unload_queue, start_entry, entries);
4357
4358 while (!TAILQ_EMPTY(&unload_queue)) {
4359 struct dso_entry *ecur = TAILQ_FIRST(&unload_queue);
4360 struct dso *cur = ecur->dso;
4361 TAILQ_REMOVE(&unload_queue, ecur, entries);
4362 bool already_in_need_unload_queue = false;
4363 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4364 if (ef->dso == cur) {
4365 already_in_need_unload_queue = true;
4366 break;
4367 }
4368 }
4369 if (already_in_need_unload_queue) {
4370 continue;
4371 }
4372 TAILQ_INSERT_TAIL(&need_unload_queue, ecur, entries);
4373 for (int i = 0; i < cur->ndeps_direct; i++) {
4374 remove_dso_parent(cur->deps[i], cur);
4375 if ((cur->deps[i]->parents_count == 0) && (so_can_unload(cur->deps[i], UNLOAD_ALL_CHECK) == 1)) {
4376 bool already_in_unload_queue = false;
4377 TAILQ_FOREACH(ef, &unload_queue, entries) {
4378 if (ef->dso == cur->deps[i]) {
4379 already_in_unload_queue = true;
4380 break;
4381 }
4382 }
4383 if (already_in_unload_queue) {
4384 continue;
4385 }
4386
4387 struct dso_entry *edeps = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4388 edeps->dso = cur->deps[i];
4389 TAILQ_INSERT_TAIL(&unload_queue, edeps, entries);
4390 }
4391 } /* for */
4392 } /* while */
4393
4394 if (is_dlclose_debug_enable()) {
4395 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4396 LD_LOGE("[dlclose]: unload %{public}s succeed when dlclose %{public}s", ef->dso->name, p->name);
4397 }
4398 for (size_t deps_num = 0; p->deps[deps_num]; deps_num++) {
4399 bool ready_to_unload = false;
4400 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4401 if (ef->dso == p->deps[deps_num]) {
4402 ready_to_unload = true;
4403 break;
4404 }
4405 }
4406 if (!ready_to_unload) {
4407 LD_LOGE("[dlclose]: unload %{public}s failed when dlclose %{public}s,"
4408 "nr_dlopen:%{public}d, by_dlopen:%{public}d, parents_count:%{public}d",
4409 p->deps[deps_num]->name, p->name, p->deps[deps_num]->nr_dlopen,
4410 p->deps[deps_num]->by_dlopen, p->deps[deps_num]->parents_count);
4411 }
4412 }
4413 }
4414
4415 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4416 dlclose_impl(ef->dso);
4417 }
4418
4419 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4420 /* call destructors if needed */
4421 pthread_mutex_lock(&init_fini_lock);
4422 int constructed = ef->dso->constructed;
4423 pthread_mutex_unlock(&init_fini_lock);
4424
4425 if (constructed) {
4426 size_t dyn[DYN_CNT];
4427 decode_vec(ef->dso->dynv, dyn, DYN_CNT);
4428 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
4429 n = dyn[DT_FINI_ARRAYSZ] / sizeof(size_t);
4430 size_t *fn = (size_t *)laddr(ef->dso, dyn[DT_FINI_ARRAY]) + n;
4431 trace_marker_begin(HITRACE_TAG_MUSL, "calling destructors:", ef->dso->name);
4432
4433 pthread_rwlock_unlock(&lock);
4434 while (n--)
4435 ((void (*)(void))*--fn)();
4436 pthread_rwlock_wrlock(&lock);
4437
4438 trace_marker_end(HITRACE_TAG_MUSL);
4439 }
4440 pthread_mutex_lock(&init_fini_lock);
4441 ef->dso->constructed = 0;
4442 pthread_mutex_unlock(&init_fini_lock);
4443 }
4444 }
4445 // Unload all sos at the end because weak symbol may cause later unloaded so to access the previous so's function.
4446 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4447 dlclose_post(ef->dso);
4448 }
4449 // Free dso_entry.
4450 TAILQ_FOREACH_SAFE(ef, &need_unload_queue, entries, ef_tmp) {
4451 if (ef) {
4452 free(ef);
4453 }
4454 }
4455
4456 return 0;
4457 }
4458
__dlclose(void * p)4459 hidden int __dlclose(void *p)
4460 {
4461 pthread_mutex_lock(&dlclose_lock);
4462 int rc;
4463 pthread_rwlock_wrlock(&lock);
4464 if (shutting_down) {
4465 error("Cannot dlclose while program is exiting.");
4466 pthread_rwlock_unlock(&lock);
4467 pthread_mutex_unlock(&dlclose_lock);
4468 return -1;
4469 }
4470 #ifdef HANDLE_RANDOMIZATION
4471 struct dso *dso = find_dso_by_handle(p);
4472 if (dso == NULL) {
4473 errno = EINVAL;
4474 error("Handle is invalid.");
4475 LD_LOGE("Handle is not find.");
4476 pthread_rwlock_unlock(&lock);
4477 pthread_mutex_unlock(&dlclose_lock);
4478 return -1;
4479 }
4480 rc = do_dlclose(dso, 0);
4481 #else
4482 rc = do_dlclose(p, 0);
4483 #endif
4484 pthread_rwlock_unlock(&lock);
4485 pthread_mutex_unlock(&dlclose_lock);
4486 return rc;
4487 }
4488
sym_is_matched(const Sym * sym,size_t addr_offset_so)4489 static inline int sym_is_matched(const Sym* sym, size_t addr_offset_so) {
4490 return sym->st_value &&
4491 (1<<(sym->st_info&0xf) != STT_TLS) &&
4492 (addr_offset_so >= sym->st_value) &&
4493 (addr_offset_so < sym->st_value + sym->st_size);
4494 }
4495
find_addr_by_elf(size_t addr_offset_so,struct dso * p)4496 static inline Sym* find_addr_by_elf(size_t addr_offset_so, struct dso *p) {
4497 uint32_t nsym = p->hashtab[1];
4498 Sym *sym = p->syms;
4499 for (; nsym; nsym--, sym++) {
4500 if (sym_is_matched(sym, addr_offset_so)) {
4501 return sym;
4502 }
4503 }
4504
4505 return NULL;
4506 }
4507
find_addr_by_gnu(size_t addr_offset_so,struct dso * p)4508 static inline Sym* find_addr_by_gnu(size_t addr_offset_so, struct dso *p) {
4509
4510 size_t i, nsym, first_hash_sym_index;
4511 uint32_t *hashval;
4512 Sym *sym_tab = p->syms;
4513 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2] * sizeof(size_t) / 4);
4514 // Points to the first defined symbol, all symbols before it are undefined.
4515 first_hash_sym_index = buckets[0];
4516 Sym *sym = &sym_tab[first_hash_sym_index];
4517
4518 // Get the location pointed by the last bucket.
4519 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
4520 if (buckets[i] > nsym)
4521 nsym = buckets[i];
4522 }
4523
4524 for (i = first_hash_sym_index; i < nsym; i++) {
4525 if (sym_is_matched(sym, addr_offset_so)) {
4526 return sym;
4527 }
4528 sym++;
4529 }
4530
4531 // Start traversing the hash list from the position pointed to by the last bucket.
4532 if (nsym) {
4533 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
4534 do {
4535 nsym++;
4536 if (sym_is_matched(sym, addr_offset_so)) {
4537 return sym;
4538 }
4539 sym++;
4540 }
4541 while (!(*hashval++ & 1));
4542 }
4543
4544 return NULL;
4545 }
4546
4547
dladdr(const void * addr_arg,Dl_info * info)4548 int dladdr(const void *addr_arg, Dl_info *info)
4549 {
4550 size_t addr = (size_t)addr_arg;
4551 struct dso *p;
4552 Sym *match_sym = NULL;
4553 char *strings;
4554
4555 pthread_rwlock_rdlock(&lock);
4556 p = addr2dso(addr);
4557 pthread_rwlock_unlock(&lock);
4558
4559 if (!p) return 0;
4560
4561 strings = p->strings;
4562 size_t addr_offset_so = addr - (size_t)p->base;
4563
4564 info->dli_fname = p->name;
4565 info->dli_fbase = p->map;
4566
4567 if (p->ghashtab) {
4568 match_sym = find_addr_by_gnu(addr_offset_so, p);
4569
4570 } else {
4571 match_sym = find_addr_by_elf(addr_offset_so, p);
4572 }
4573
4574 if (!match_sym) {
4575 info->dli_sname = 0;
4576 info->dli_saddr = 0;
4577 return 1;
4578 }
4579 info->dli_sname = strings + match_sym->st_name;
4580 info->dli_saddr = (void *)laddr(p, match_sym->st_value);
4581 return 1;
4582 }
4583
__dlsym(void * restrict p,const char * restrict s,void * restrict ra)4584 hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
4585 {
4586 void *res;
4587 pthread_rwlock_rdlock(&lock);
4588 #ifdef HANDLE_RANDOMIZATION
4589 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4590 struct dso *dso = find_dso_by_handle(p);
4591 if (dso == NULL) {
4592 pthread_rwlock_unlock(&lock);
4593 return 0;
4594 }
4595 res = do_dlsym(dso, s, "", ra);
4596 } else {
4597 res = do_dlsym(p, s, "", ra);
4598 }
4599 #else
4600 res = do_dlsym(p, s, "", ra);
4601 #endif
4602 pthread_rwlock_unlock(&lock);
4603 return res;
4604 }
4605
__dlvsym(void * restrict p,const char * restrict s,const char * restrict v,void * restrict ra)4606 hidden void *__dlvsym(void *restrict p, const char *restrict s, const char *restrict v, void *restrict ra)
4607 {
4608 void *res;
4609 pthread_rwlock_rdlock(&lock);
4610 #ifdef HANDLE_RANDOMIZATION
4611 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4612 struct dso *dso = find_dso_by_handle(p);
4613 if (dso == NULL) {
4614 pthread_rwlock_unlock(&lock);
4615 return 0;
4616 }
4617 res = do_dlsym(dso, s, v, ra);
4618 } else {
4619 res = do_dlsym(p, s, v, ra);
4620 }
4621 #else
4622 res = do_dlsym(p, s, v, ra);
4623 #endif
4624 pthread_rwlock_unlock(&lock);
4625 return res;
4626 }
4627
__dlsym_redir_time64(void * restrict p,const char * restrict s,void * restrict ra)4628 hidden void *__dlsym_redir_time64(void *restrict p, const char *restrict s, void *restrict ra)
4629 {
4630 #if _REDIR_TIME64
4631 const char *suffix, *suffix2 = "";
4632 char redir[36];
4633
4634 /* Map the symbol name to a time64 version of itself according to the
4635 * pattern used for naming the redirected time64 symbols. */
4636 size_t l = strnlen(s, sizeof redir);
4637 if (l<4 || l==sizeof redir) goto no_redir;
4638 if (s[l-2]=='_' && s[l-1]=='r') {
4639 l -= 2;
4640 suffix2 = s+l;
4641 }
4642 if (l<4) goto no_redir;
4643 if (!strcmp(s+l-4, "time")) suffix = "64";
4644 else suffix = "_time64";
4645
4646 /* Use the presence of the remapped symbol name in libc to determine
4647 * whether it's one that requires time64 redirection; replace if so. */
4648 snprintf(redir, sizeof redir, "__%.*s%s%s", (int)l, s, suffix, suffix2);
4649 if (find_sym(&ldso, redir, 1).sym) s = redir;
4650 no_redir:
4651 #endif
4652 return __dlsym(p, s, ra);
4653 }
4654
dl_iterate_phdr(int (* callback)(struct dl_phdr_info * info,size_t size,void * data),void * data)4655 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
4656 {
4657 pthread_mutex_lock(&dlclose_lock);
4658 struct dso *current;
4659 struct dl_phdr_info info;
4660 int ret = 0;
4661 for(current = head; current;) {
4662 info.dlpi_addr = (uintptr_t)current->base;
4663 info.dlpi_name = current->name;
4664 info.dlpi_phdr = current->phdr;
4665 info.dlpi_phnum = current->phnum;
4666 info.dlpi_adds = gencnt;
4667 info.dlpi_subs = subcnt;
4668 info.dlpi_tls_modid = current->tls_id;
4669 info.dlpi_tls_data = !current->tls_id ? 0 :
4670 __tls_get_addr((tls_mod_off_t[]){current->tls_id,0});
4671
4672 // FIXME: add dl_phdr_lock for unwind callback
4673 pthread_mutex_lock(&dl_phdr_lock);
4674 ret = (callback)(&info, sizeof (info), data);
4675 pthread_mutex_unlock(&dl_phdr_lock);
4676
4677 if (ret != 0) break;
4678 pthread_rwlock_rdlock(&lock);
4679 current = current->next;
4680 pthread_rwlock_unlock(&lock);
4681 }
4682 pthread_mutex_unlock(&dlclose_lock);
4683 return ret;
4684 }
4685
error_impl(const char * fmt,...)4686 static void error_impl(const char *fmt, ...)
4687 {
4688 va_list ap;
4689 va_start(ap, fmt);
4690 if (!runtime) {
4691 vdprintf(2, fmt, ap);
4692 dprintf(2, "\n");
4693 ldso_fail = 1;
4694 va_end(ap);
4695 return;
4696 }
4697 __dl_vseterr(fmt, ap);
4698 va_end(ap);
4699 }
4700
error_noop(const char * fmt,...)4701 static void error_noop(const char *fmt, ...)
4702 {
4703 }
4704
dlns_set_namespace_lib_path(const char * name,const char * lib_path)4705 int dlns_set_namespace_lib_path(const char * name, const char * lib_path)
4706 {
4707 if (!name || !lib_path) {
4708 LD_LOGE("dlns_set_namespace_lib_path name or lib_path is null.");
4709 return EINVAL;
4710 }
4711
4712 pthread_rwlock_wrlock(&lock);
4713 const void *caller_addr = __builtin_return_address(0);
4714 if (is_permitted(caller_addr, (char *)name) == false) {
4715 pthread_rwlock_unlock(&lock);
4716 return EPERM;
4717 }
4718
4719 ns_t* ns = find_ns_by_name(name);
4720 if (!ns) {
4721 pthread_rwlock_unlock(&lock);
4722 LD_LOGE("dlns_set_namespace_lib_path fail, input ns name : [%{public}s] is not found.", name);
4723 return ENOKEY;
4724 }
4725
4726 ns_set_lib_paths(ns, lib_path);
4727 pthread_rwlock_unlock(&lock);
4728 return 0;
4729 }
4730
dlns_set_namespace_separated(const char * name,const bool separated)4731 int dlns_set_namespace_separated(const char * name, const bool separated)
4732 {
4733 if (!name) {
4734 LD_LOGE("dlns_set_namespace_separated name is null.");
4735 return EINVAL;
4736 }
4737
4738 pthread_rwlock_wrlock(&lock);
4739 const void *caller_addr = __builtin_return_address(0);
4740 if (is_permitted(caller_addr, (char *)name) == false) {
4741 pthread_rwlock_unlock(&lock);
4742 return EPERM;
4743 }
4744
4745 ns_t* ns = find_ns_by_name(name);
4746 if (!ns) {
4747 pthread_rwlock_unlock(&lock);
4748 LD_LOGE("dlns_set_namespace_separated fail, input ns name : [%{public}s] is not found.", name);
4749 return ENOKEY;
4750 }
4751
4752 ns_set_separated(ns, separated);
4753 pthread_rwlock_unlock(&lock);
4754 return 0;
4755 }
4756
dlns_set_namespace_permitted_paths(const char * name,const char * permitted_paths)4757 int dlns_set_namespace_permitted_paths(const char * name, const char * permitted_paths)
4758 {
4759 if (!name || !permitted_paths) {
4760 LD_LOGE("dlns_set_namespace_permitted_paths name or permitted_paths is null.");
4761 return EINVAL;
4762 }
4763
4764 pthread_rwlock_wrlock(&lock);
4765 const void *caller_addr = __builtin_return_address(0);
4766 if (is_permitted(caller_addr, (char *)name) == false) {
4767 pthread_rwlock_unlock(&lock);
4768 return EPERM;
4769 }
4770
4771 ns_t* ns = find_ns_by_name(name);
4772 if (!ns) {
4773 pthread_rwlock_unlock(&lock);
4774 LD_LOGE("dlns_set_namespace_permitted_paths fail, input ns name : [%{public}s] is not found.", name);
4775 return ENOKEY;
4776 }
4777
4778 ns_set_permitted_paths(ns, permitted_paths);
4779 pthread_rwlock_unlock(&lock);
4780 return 0;
4781 }
4782
dlns_set_namespace_allowed_libs(const char * name,const char * allowed_libs)4783 int dlns_set_namespace_allowed_libs(const char * name, const char * allowed_libs)
4784 {
4785 if (!name || !allowed_libs) {
4786 LD_LOGE("dlns_set_namespace_allowed_libs name or allowed_libs is null.");
4787 return EINVAL;
4788 }
4789
4790 pthread_rwlock_wrlock(&lock);
4791 const void *caller_addr = __builtin_return_address(0);
4792 if (is_permitted(caller_addr, (char *)name) == false) {
4793 pthread_rwlock_unlock(&lock);
4794 return EPERM;
4795 }
4796
4797 ns_t* ns = find_ns_by_name(name);
4798 if (!ns) {
4799 pthread_rwlock_unlock(&lock);
4800 LD_LOGE("dlns_set_namespace_allowed_libs fail, input ns name : [%{public}s] is not found.", name);
4801 return ENOKEY;
4802 }
4803
4804 ns_set_allowed_libs(ns, allowed_libs);
4805 pthread_rwlock_unlock(&lock);
4806 return 0;
4807 }
4808
handle_asan_path_open(int fd,const char * name,ns_t * namespace,char * buf,size_t buf_size)4809 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size)
4810 {
4811 LD_LOGD("handle_asan_path_open fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4812 fd,
4813 name,
4814 namespace ? namespace->ns_name : "NULL");
4815 int fd_tmp = fd;
4816 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4817 if (namespace->lib_paths && namespace->asan_lib_paths) {
4818 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4819 char *new_lib_paths = malloc(newlen);
4820 memset(new_lib_paths, 0, newlen);
4821 strcpy(new_lib_paths, namespace->asan_lib_paths);
4822 strcat(new_lib_paths, ":");
4823 strcat(new_lib_paths, namespace->lib_paths);
4824 fd_tmp = path_open(name, new_lib_paths, buf, buf_size);
4825 LD_LOGD("handle_asan_path_open path_open new_lib_paths:%{public}s ,fd: %{public}d.", new_lib_paths, fd_tmp);
4826 free(new_lib_paths);
4827 } else if (namespace->asan_lib_paths) {
4828 fd_tmp = path_open(name, namespace->asan_lib_paths, buf, buf_size);
4829 LD_LOGD("handle_asan_path_open path_open asan_lib_paths:%{public}s ,fd: %{public}d.",
4830 namespace->asan_lib_paths,
4831 fd_tmp);
4832 } else {
4833 fd_tmp = path_open(name, namespace->lib_paths, buf, buf_size);
4834 LD_LOGD(
4835 "handle_asan_path_open path_open lib_paths:%{public}s ,fd: %{public}d.", namespace->lib_paths, fd_tmp);
4836 }
4837 }
4838 return fd_tmp;
4839 }
4840
dlopen_ext(const char * file,int mode,const dl_extinfo * extinfo)4841 void* dlopen_ext(const char *file, int mode, const dl_extinfo *extinfo)
4842 {
4843 const void *caller_addr = __builtin_return_address(0);
4844 musl_log_reset();
4845 ld_log_reset();
4846 if (extinfo != NULL) {
4847 if ((extinfo->flag & ~(DL_EXT_VALID_FLAG_BITS)) != 0) {
4848 LD_LOGE("Error dlopen_ext %{public}s: invalid flag %{public}x", file, extinfo->flag);
4849 return NULL;
4850 }
4851 }
4852 LD_LOGI("dlopen_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , extinfo->flag:%{public}x",
4853 file,
4854 mode,
4855 caller_addr,
4856 extinfo ? extinfo->flag : 0);
4857 return dlopen_impl(file, mode, NULL, caller_addr, extinfo);
4858 }
4859
4860 #ifdef LOAD_ORDER_RANDOMIZATION
open_library_by_path(const char * name,const char * s,struct loadtask * task,struct zip_info * z_info)4861 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info)
4862 {
4863 char *buf = task->buf;
4864 size_t buf_size = sizeof task->buf;
4865 size_t l;
4866 for (;;) {
4867 s += strspn(s, ":\n");
4868 l = strcspn(s, ":\n");
4869 if (l-1 >= INT_MAX) return;
4870 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
4871 char *separator = strstr(buf, ZIP_FILE_PATH_SEPARATOR);
4872 if (separator != NULL) {
4873 int res = open_uncompressed_library_in_zipfile(buf, z_info, separator);
4874 if (res == 0) {
4875 task->fd = z_info->fd;
4876 task->file_offset = z_info->file_offset;
4877 break;
4878 } else {
4879 memset(z_info->path_buf, 0, sizeof(z_info->path_buf));
4880 }
4881 } else {
4882 if ((task->fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) break;
4883 }
4884 }
4885 s += l;
4886 }
4887 return;
4888 }
4889
handle_asan_path_open_by_task(int fd,const char * name,ns_t * namespace,struct loadtask * task,struct zip_info * z_info)4890 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task,
4891 struct zip_info *z_info)
4892 {
4893 LD_LOGD("handle_asan_path_open_by_task fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4894 fd,
4895 name,
4896 namespace ? namespace->ns_name : "NULL");
4897 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4898 if (namespace->lib_paths && namespace->asan_lib_paths) {
4899 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4900 char *new_lib_paths = malloc(newlen);
4901 memset(new_lib_paths, 0, newlen);
4902 strcpy(new_lib_paths, namespace->asan_lib_paths);
4903 strcat(new_lib_paths, ":");
4904 strcat(new_lib_paths, namespace->lib_paths);
4905 open_library_by_path(name, new_lib_paths, task, z_info);
4906 LD_LOGD("handle_asan_path_open_by_task open_library_by_path new_lib_paths:%{public}s ,fd: %{public}d.",
4907 new_lib_paths,
4908 task->fd);
4909 free(new_lib_paths);
4910 } else if (namespace->asan_lib_paths) {
4911 open_library_by_path(name, namespace->asan_lib_paths, task, z_info);
4912 LD_LOGD("handle_asan_path_open_by_task open_library_by_path asan_lib_paths:%{public}s ,fd: %{public}d.",
4913 namespace->asan_lib_paths,
4914 task->fd);
4915 } else {
4916 open_library_by_path(name, namespace->lib_paths, task, z_info);
4917 LD_LOGD("handle_asan_path_open_by_task open_library_by_path lib_paths:%{public}s ,fd: %{public}d.",
4918 namespace->lib_paths,
4919 task->fd);
4920 }
4921 }
4922 return;
4923 }
4924
4925 /* Used to get an uncompress library offset in zip file, then we can use the offset to mmap the library directly. */
open_uncompressed_library_in_zipfile(const char * path,struct zip_info * z_info,char * separator)4926 int open_uncompressed_library_in_zipfile(const char *path, struct zip_info *z_info, char *separator)
4927 {
4928 struct local_file_header zip_file_header;
4929 struct central_dir_entry c_dir_entry;
4930 struct zip_end_locator end_locator;
4931
4932 /* Use "'!/' to split the path into zipfile path and library path in zipfile.
4933 * For example:
4934 * - path: x/xx/xxx.zip!/x/xx/xxx.so
4935 * - zipfile path: x/xx/xxx.zip
4936 * - library path in zipfile: x/xx/xxx.so */
4937 if (strlcpy(z_info->path_buf, path, PATH_BUF_SIZE) >= PATH_BUF_SIZE) {
4938 LD_LOGE("Open uncompressed library: input path %{public}s is too long.", path);
4939 return -1;
4940 }
4941 z_info->path_buf[separator - path] = '\0';
4942 z_info->file_path_index = separator - path + 2;
4943 char *zip_file_path = z_info->path_buf;
4944 char *lib_path = &z_info->path_buf[z_info->file_path_index];
4945 if (zip_file_path == NULL || lib_path == NULL) {
4946 LD_LOGE("Open uncompressed library: get zip and lib path failed.");
4947 return -1;
4948 }
4949 LD_LOGD("Open uncompressed library: input path: %{public}s, zip file path: %{public}s, library path: %{public}s.",
4950 path, zip_file_path, lib_path);
4951
4952 // Get zip file length
4953 FILE *zip_file = fopen(zip_file_path, "re");
4954 if (zip_file == NULL) {
4955 LD_LOGE("Open uncompressed library: fopen %{public}s failed.", zip_file_path);
4956 return -1;
4957 }
4958 if (fseek(zip_file, 0, SEEK_END) != 0) {
4959 LD_LOGE("Open uncompressed library: fseek SEEK_END failed.");
4960 fclose(zip_file);
4961 return -1;
4962 }
4963 int64_t zip_file_len = ftell(zip_file);
4964 if (zip_file_len == -1) {
4965 LD_LOGE("Open uncompressed library: get zip file length failed.");
4966 fclose(zip_file);
4967 return -1;
4968 }
4969
4970 // Read end of central directory record.
4971 size_t end_locator_len = sizeof(end_locator);
4972 size_t end_locator_pos = zip_file_len - end_locator_len;
4973 if (fseek(zip_file, end_locator_pos, SEEK_SET) != 0) {
4974 LD_LOGE("Open uncompressed library: fseek end locator position failed.");
4975 fclose(zip_file);
4976 return -1;
4977 }
4978 if (fread(&end_locator, sizeof(end_locator), 1, zip_file) != 1 || end_locator.signature != EOCD_SIGNATURE) {
4979 LD_LOGE("Open uncompressed library: fread end locator failed.");
4980 fclose(zip_file);
4981 return -1;
4982 }
4983
4984 char file_name[PATH_BUF_SIZE];
4985 uint64_t current_dir_pos = end_locator.offset;
4986 for (uint16_t i = 0; i < end_locator.total_entries; i++) {
4987 // Read central dir entry.
4988 if (fseek(zip_file, current_dir_pos, SEEK_SET) != 0) {
4989 LD_LOGE("Open uncompressed library: fseek current centra dir entry position failed.");
4990 fclose(zip_file);
4991 return -1;
4992 }
4993 if (fread(&c_dir_entry, sizeof(c_dir_entry), 1, zip_file) != 1 || c_dir_entry.signature != CENTRAL_SIGNATURE) {
4994 LD_LOGE("Open uncompressed library: fread centra dir entry failed.");
4995 fclose(zip_file);
4996 return -1;
4997 }
4998
4999 if (fread(file_name, c_dir_entry.name_size, 1, zip_file) != 1) {
5000 LD_LOGE("Open uncompressed library: fread file name failed.");
5001 fclose(zip_file);
5002 return -1;
5003 }
5004 if (strcmp(file_name, lib_path) == 0) {
5005 // Read local file header.
5006 if (fseek(zip_file, c_dir_entry.local_header_offset, SEEK_SET) != 0) {
5007 LD_LOGE("Open uncompressed library: fseek local file header failed.");
5008 fclose(zip_file);
5009 return -1;
5010 }
5011 if (fread(&zip_file_header, sizeof(zip_file_header), 1, zip_file) != 1) {
5012 LD_LOGE("Open uncompressed library: fread local file header failed.");
5013 fclose(zip_file);
5014 return -1;
5015 }
5016 if (zip_file_header.signature != LOCAL_FILE_HEADER_SIGNATURE) {
5017 LD_LOGE("Open uncompressed library: read local file header signature error.");
5018 fclose(zip_file);
5019 return -1;
5020 }
5021
5022 z_info->file_offset = c_dir_entry.local_header_offset + sizeof(zip_file_header) +
5023 zip_file_header.name_size + zip_file_header.extra_size;
5024 if (zip_file_header.compression_method != COMPRESS_STORED || z_info->file_offset % PAGE_SIZE != 0) {
5025 LD_LOGE("Open uncompressed library: open %{public}s in %{public}s failed because of misalignment or saved with compression."
5026 "compress method %{public}d, file offset %{public}lu",
5027 lib_path, zip_file_path, zip_file_header.compression_method, z_info->file_offset);
5028 fclose(zip_file);
5029 return -2;
5030 }
5031 z_info->found = true;
5032 break;
5033 }
5034
5035 memset(file_name, 0, sizeof(file_name));
5036 current_dir_pos += sizeof(c_dir_entry);
5037 current_dir_pos += c_dir_entry.name_size + c_dir_entry.extra_size + c_dir_entry.comment_size;
5038 }
5039 if (!z_info->found) {
5040 LD_LOGE("Open uncompressed library: %{public}s was not found in %{public}s.", lib_path, zip_file_path);
5041 fclose(zip_file);
5042 return -3;
5043 }
5044 z_info->fd = fileno(zip_file);
5045
5046 return 0;
5047 }
5048
task_check_xpm(struct loadtask * task)5049 static bool task_check_xpm(struct loadtask *task)
5050 {
5051 size_t mapLen = sizeof(Ehdr);
5052 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, task->fd, task->file_offset);
5053 if (map == MAP_FAILED) {
5054 LD_LOGE("Xpm check failed for %{public}s, errno for mmap is: %{public}d", task->name, errno);
5055 return false;
5056 }
5057 munmap(map, mapLen);
5058 return true;
5059 }
5060
map_library_header(struct loadtask * task)5061 static bool map_library_header(struct loadtask *task)
5062 {
5063 off_t off_start;
5064 Phdr *ph;
5065 size_t i;
5066 size_t str_size;
5067 off_t str_table;
5068 if (!task_check_xpm(task)) {
5069 return false;
5070 }
5071
5072 ssize_t l = pread(task->fd, task->ehdr_buf, sizeof task->ehdr_buf, task->file_offset);
5073 task->eh = task->ehdr_buf;
5074 if (l < 0) {
5075 LD_LOGE("Error mapping header %{public}s: failed to read fd errno: %{public}d", task->name, errno);
5076 return false;
5077 }
5078 if (l < sizeof(Ehdr) || (task->eh->e_type != ET_DYN && task->eh->e_type != ET_EXEC)) {
5079 LD_LOGE("Error mapping header %{public}s: invaliled Ehdr l=%{public}d e_type=%{public}hu",
5080 task->name, (int)l, task->eh->e_type);
5081 goto noexec;
5082 }
5083 task->phsize = task->eh->e_phentsize * task->eh->e_phnum;
5084 if (task->phsize > sizeof task->ehdr_buf - sizeof(Ehdr)) {
5085 task->allocated_buf = malloc(task->phsize);
5086 if (!task->allocated_buf) {
5087 LD_LOGE("Error mapping header %{public}s: failed to alloc memory errno: %{public}d", task->name, errno);
5088 return false;
5089 }
5090 l = pread(task->fd, task->allocated_buf, task->phsize, task->eh->e_phoff + task->file_offset);
5091 if (l < 0) {
5092 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5093 goto error;
5094 }
5095 if (l != task->phsize) {
5096 LD_LOGE("Error mapping header %{public}s: unmatched phsize errno: %{public}d", task->name, errno);
5097 goto noexec;
5098 }
5099 ph = task->ph0 = task->allocated_buf;
5100 } else if (task->eh->e_phoff + task->phsize > l) {
5101 l = pread(task->fd, task->ehdr_buf + 1, task->phsize, task->eh->e_phoff + task->file_offset);
5102 if (l < 0) {
5103 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5104 goto error;
5105 }
5106 if (l != task->phsize) {
5107 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
5108 goto noexec;
5109 }
5110 ph = task->ph0 = (void *)(task->ehdr_buf + 1);
5111 } else {
5112 ph = task->ph0 = (void *)((char *)task->ehdr_buf + task->eh->e_phoff);
5113 }
5114
5115 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5116 if (ph->p_type == PT_DYNAMIC) {
5117 task->dyn = ph->p_vaddr;
5118 } else if (ph->p_type == PT_TLS) {
5119 task->tls_image = ph->p_vaddr;
5120 task->tls.align = ph->p_align;
5121 task->tls.len = ph->p_filesz;
5122 task->tls.size = ph->p_memsz;
5123 }
5124
5125 if (ph->p_type != PT_DYNAMIC) {
5126 continue;
5127 }
5128 // map the dynamic segment and the string table of the library
5129 off_start = ph->p_offset;
5130 off_start &= -PAGE_SIZE;
5131 task->dyn_map_len = ph->p_memsz + (ph->p_offset - off_start);
5132 /* The default value of file_offset is 0.
5133 * The value of file_offset may be greater than 0 when opening library from zip file.
5134 * The value of file_offset ensures PAGE_SIZE aligned. */
5135 task->dyn_map = mmap(0, task->dyn_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5136 if (task->dyn_map == MAP_FAILED) {
5137 LD_LOGE("Error mapping header %{public}s: failed to map dynamic section errno: %{public}d", task->name, errno);
5138 goto error;
5139 }
5140 task->dyn_addr = (size_t *)((unsigned char *)task->dyn_map + (ph->p_offset - off_start));
5141 size_t dyn_tmp;
5142 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRTAB)) {
5143 str_table = dyn_tmp;
5144 } else {
5145 LD_LOGE("Error mapping header %{public}s: DT_STRTAB not found", task->name);
5146 goto error;
5147 }
5148 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRSZ)) {
5149 str_size = dyn_tmp;
5150 } else {
5151 LD_LOGE("Error mapping header %{public}s: DT_STRSZ not found", task->name);
5152 goto error;
5153 }
5154 }
5155
5156 task->shsize = task->eh->e_shentsize * task->eh->e_shnum;
5157 off_start = task->eh->e_shoff;
5158 off_start &= -PAGE_SIZE;
5159 task->shsize += task->eh->e_shoff - off_start;
5160 task->shdr_allocated_buf = mmap(0, task->shsize, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5161 if (task->shdr_allocated_buf == MAP_FAILED) {
5162 LD_LOGE("Error mapping section header %{public}s: failed to map shdr_allocated_buf errno: %{public}d",
5163 task->name, errno);
5164 goto error;
5165 }
5166 Shdr *sh = (Shdr *)((char *)task->shdr_allocated_buf + task->eh->e_shoff - off_start);
5167 for (i = task->eh->e_shnum; i; i--, sh = (void *)((char *)sh + task->eh->e_shentsize)) {
5168 if (sh->sh_type != SHT_STRTAB || sh->sh_addr != str_table || sh->sh_size != str_size) {
5169 continue;
5170 }
5171 off_start = sh->sh_offset;
5172 off_start &= -PAGE_SIZE;
5173 task->str_map_len = sh->sh_size + (sh->sh_offset - off_start);
5174 task->str_map = mmap(0, task->str_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5175 if (task->str_map == MAP_FAILED) {
5176 LD_LOGE("Error mapping section header %{public}s: failed to map string section errno: %{public}d",
5177 task->name, errno);
5178 goto error;
5179 }
5180 task->str_addr = (char *)task->str_map + sh->sh_offset - off_start;
5181 break;
5182 }
5183 if (!task->dyn) {
5184 LD_LOGE("Error mapping header %{public}s: dynamic section not found", task->name);
5185 goto noexec;
5186 }
5187 if (task->shdr_allocated_buf != MAP_FAILED) {
5188 munmap(task->shdr_allocated_buf, task->shsize);
5189 task->shdr_allocated_buf = MAP_FAILED;
5190 }
5191 return true;
5192 noexec:
5193 errno = ENOEXEC;
5194 error:
5195 free(task->allocated_buf);
5196 task->allocated_buf = NULL;
5197 if (task->shdr_allocated_buf != MAP_FAILED) {
5198 munmap(task->shdr_allocated_buf, task->shsize);
5199 task->shdr_allocated_buf = MAP_FAILED;
5200 }
5201 return false;
5202 }
5203
task_map_library(struct loadtask * task,struct reserved_address_params * reserved_params)5204 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5205 {
5206 size_t addr_min = SIZE_MAX, addr_max = 0, map_len;
5207 size_t this_min, this_max;
5208 size_t nsegs = 0;
5209 off_t off_start;
5210 Phdr *ph = task->ph0;
5211 unsigned prot;
5212 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
5213 unsigned ext_prot = 0;
5214 #endif
5215 unsigned char *map = MAP_FAILED, *base;
5216 size_t i;
5217 int map_flags = MAP_PRIVATE;
5218 size_t start_addr;
5219 size_t start_alignment = PAGE_SIZE;
5220 bool hugepage_enabled = false;
5221
5222 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5223 if (ph->p_type == PT_GNU_RELRO) {
5224 task->p->relro_start = ph->p_vaddr & -PAGE_SIZE;
5225 task->p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
5226 } else if (ph->p_type == PT_GNU_STACK) {
5227 if (!runtime && ph->p_memsz > __default_stacksize) {
5228 __default_stacksize =
5229 ph->p_memsz < DEFAULT_STACK_MAX ?
5230 ph->p_memsz : DEFAULT_STACK_MAX;
5231 }
5232 }
5233 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
5234 /* Security enhancement: parse extra PROT in ELF.
5235 * Currently only for BTI protection*/
5236 if (ph->p_type == PT_GNU_PROPERTY || ph->p_type == PT_NOTE) {
5237 ext_prot |= parse_extra_prot_fd(task->fd, ph);
5238 }
5239 #endif
5240 if (ph->p_type != PT_LOAD) {
5241 continue;
5242 }
5243 nsegs++;
5244 if (ph->p_vaddr < addr_min) {
5245 addr_min = ph->p_vaddr;
5246 off_start = ph->p_offset;
5247 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5248 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5249 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5250 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
5251 if (ph->p_flags & PF_X) {
5252 prot |= ext_prot;
5253 }
5254 #endif
5255 }
5256 if (ph->p_vaddr + ph->p_memsz > addr_max) {
5257 addr_max = ph->p_vaddr + ph->p_memsz;
5258 }
5259 }
5260 if (!task->dyn) {
5261 LD_LOGE("Error mapping library: !task->dyn dynamic section not found task->name=%{public}s", task->name);
5262 goto noexec;
5263 }
5264 if (DL_FDPIC && !(task->eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
5265 task->p->loadmap = calloc(1, sizeof(struct fdpic_loadmap) + nsegs * sizeof(struct fdpic_loadseg));
5266 if (!task->p->loadmap) {
5267 LD_LOGE("Error mapping library: calloc failed errno=%{public}d nsegs=%{public}zu", errno, nsegs);
5268 goto error;
5269 }
5270 task->p->loadmap->nsegs = nsegs;
5271 for (ph = task->ph0, i = 0; i < nsegs; ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5272 if (ph->p_type != PT_LOAD) {
5273 continue;
5274 }
5275 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5276 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5277 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5278 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
5279 if (ph->p_flags & PF_X) {
5280 prot |= ext_prot;
5281 }
5282 #endif
5283 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1),
5284 prot, MAP_PRIVATE,
5285 task->fd, ph->p_offset & -PAGE_SIZE + task->file_offset);
5286 if (map == MAP_FAILED) {
5287 unmap_library(task->p);
5288 LD_LOGE("Error mapping library: PT_LOAD mmap failed task->name=%{public}s errno=%{public}d map_len=%{public}lu",
5289 task->name, errno, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1));
5290 goto error;
5291 }
5292 task->p->loadmap->segs[i].addr = (size_t)map +
5293 (ph->p_vaddr & PAGE_SIZE - 1);
5294 task->p->loadmap->segs[i].p_vaddr = ph->p_vaddr;
5295 task->p->loadmap->segs[i].p_memsz = ph->p_memsz;
5296 i++;
5297 if (prot & PROT_WRITE) {
5298 size_t brk = (ph->p_vaddr & PAGE_SIZE - 1) + ph->p_filesz;
5299 size_t pgbrk = (brk + PAGE_SIZE - 1) & -PAGE_SIZE;
5300 size_t pgend = (brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
5301 if (pgend > pgbrk && mmap_fixed(map + pgbrk,
5302 pgend - pgbrk, prot,
5303 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5304 -1, off_start) == MAP_FAILED)
5305 LD_LOGE("Error mapping library: PROT_WRITE mmap_fixed failed errno=%{public}d", errno);
5306 goto error;
5307 memset(map + brk, 0, pgbrk - brk);
5308 }
5309 }
5310 map = (void *)task->p->loadmap->segs[0].addr;
5311 map_len = 0;
5312 goto done_mapping;
5313 }
5314 addr_max += PAGE_SIZE - 1;
5315 addr_max &= -PAGE_SIZE;
5316 addr_min &= -PAGE_SIZE;
5317 off_start &= -PAGE_SIZE;
5318 map_len = addr_max - addr_min + off_start;
5319 start_addr = addr_min;
5320
5321 hugepage_enabled = get_transparent_hugepages_supported();
5322 if (hugepage_enabled) {
5323 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(task->ph0, task->eh->e_phnum);
5324
5325 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
5326 }
5327
5328 if (reserved_params) {
5329 if (map_len > reserved_params->reserved_size) {
5330 if (reserved_params->must_use_reserved) {
5331 LD_LOGE("Error mapping library: map len is larger than reserved address task->name=%{public}s", task->name);
5332 goto error;
5333 }
5334 } else {
5335 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
5336 map_flags |= MAP_FIXED;
5337 }
5338 }
5339
5340 /* we will find a mapping_align aligned address as the start of dso
5341 * so we need a tmp_map_len as map_len + mapping_align to make sure
5342 * we have enough space to shift the dso to the correct location. */
5343 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
5344 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
5345
5346 /* map the whole load segments with PROT_READ first for security consideration. */
5347 prot = PROT_READ;
5348
5349 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
5350 if (reserved_params) {
5351 map = DL_NOMMU_SUPPORT
5352 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5353 : mmap((void *)start_addr, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
5354 if (map == MAP_FAILED) {
5355 LD_LOGE("Error mapping library: reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5356 " task->fd=%{public}d task->name=%{public}s map_len=%{public}lu",
5357 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5358 goto error;
5359 }
5360 if (reserved_params && map_len < reserved_params->reserved_size) {
5361 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
5362 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
5363 }
5364 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
5365 } else {
5366 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
5367 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
5368 if (temp_map == MAP_FAILED) {
5369 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d tmp_map_len=%{public}lu",
5370 errno, tmp_map_len);
5371 goto error;
5372 }
5373
5374 /* find the mapping_align aligned address */
5375 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
5376
5377 map = DL_NOMMU_SUPPORT
5378 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5379 /* use map_len to mmap correct space for the dso with file mapping */
5380 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, task->fd, off_start + task->file_offset);
5381 if (map == MAP_FAILED || map != real_map) {
5382 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5383 "task->fd=%{public}d task->name=%{public}s map_len=%{public}lu",
5384 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5385 goto error;
5386 }
5387
5388 /* Free unused memory.
5389 * |--------------------------tmp_map_len--------------------------|
5390 * ^ ^ ^ ^
5391 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
5392 * temp_map real_map(aligned) temp_map_end
5393 */
5394 unsigned char *temp_map_end = temp_map + tmp_map_len;
5395 size_t unused_part_1 = real_map - temp_map;
5396 size_t unused_part_2 = temp_map_end - (real_map + map_len);
5397 if (unused_part_1 > 0) {
5398 int res1 = munmap(temp_map, unused_part_1);
5399 if (res1 == -1) {
5400 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
5401 }
5402 }
5403
5404 if (unused_part_2 > 0) {
5405 int res2 = munmap(real_map + map_len, unused_part_2);
5406 if (res2 == -1) {
5407 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
5408 }
5409 }
5410 }
5411 task->p->map = map;
5412 task->p->map_len = map_len;
5413 /* If the loaded file is not relocatable and the requested address is
5414 * not available, then the load operation must fail. */
5415 if (task->eh->e_type != ET_DYN && addr_min && map != (void *)addr_min) {
5416 LD_LOGE("Error mapping library: ET_DYN task->name=%{public}s", task->name);
5417 errno = EBUSY;
5418 goto error;
5419 }
5420 base = map - addr_min;
5421 task->p->phdr = 0;
5422 task->p->phnum = 0;
5423 for (ph = task->ph0, i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5424 if (ph->p_type == PT_OHOS_RANDOMDATA) {
5425 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
5426 continue;
5427 }
5428 if (ph->p_type != PT_LOAD) {
5429 continue;
5430 }
5431 /* Check if the programs headers are in this load segment, and
5432 * if so, record the address for use by dl_iterate_phdr. */
5433 if (!task->p->phdr && task->eh->e_phoff >= ph->p_offset
5434 && task->eh->e_phoff + task->phsize <= ph->p_offset + ph->p_filesz) {
5435 task->p->phdr = (void *)(base + ph->p_vaddr + (task->eh->e_phoff - ph->p_offset));
5436 task->p->phnum = task->eh->e_phnum;
5437 task->p->phentsize = task->eh->e_phentsize;
5438 }
5439 this_min = ph->p_vaddr & -PAGE_SIZE;
5440 this_max = ph->p_vaddr + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5441 off_start = ph->p_offset & -PAGE_SIZE;
5442 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5443 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5444 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5445 #if defined(BTI_SUPPORT) && (!defined(__LITEOS__))
5446 if (ph->p_flags & PF_X) {
5447 prot |= ext_prot;
5448 }
5449 #endif
5450 /* Reuse the existing mapping for the lowest-address LOAD */
5451 if (mmap_fixed(
5452 base + this_min,
5453 this_max - this_min,
5454 prot, MAP_PRIVATE | MAP_FIXED,
5455 task->fd,
5456 off_start + task->file_offset) == MAP_FAILED) {
5457 LD_LOGE("Error mapping library: mmap fix failed task->name=%{public}s errno=%{public}d", task->name, errno);
5458 goto error;
5459 }
5460 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
5461 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
5462 if (ph->p_memsz > ph->p_filesz && (ph->p_flags & PF_W)) {
5463 size_t brk = (size_t)base + ph->p_vaddr + ph->p_filesz;
5464 size_t pgbrk = brk + PAGE_SIZE - 1 & -PAGE_SIZE;
5465 size_t zeromap_size = (size_t)base + this_max - pgbrk;
5466 memset((void *)brk, 0, pgbrk - brk & PAGE_SIZE - 1);
5467 if (pgbrk - (size_t)base < this_max && mmap_fixed(
5468 (void *)pgbrk,
5469 zeromap_size,
5470 prot,
5471 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5472 -1,
5473 0) == MAP_FAILED) {
5474 LD_LOGE("Error mapping library: PF_W mmap fix failed errno=%{public}d task->name=%{public}s zeromap_size=%{public}lu",
5475 errno, task->name, zeromap_size);
5476 goto error;
5477 }
5478 set_bss_vma_name(task->p->name, (void *)pgbrk, zeromap_size);
5479 }
5480 }
5481 for (i = 0; ((size_t *)(base + task->dyn))[i]; i += NEXT_DYNAMIC_INDEX) {
5482 if (((size_t *)(base + task->dyn))[i] == DT_TEXTREL) {
5483 if (mprotect(map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC) && errno != ENOSYS) {
5484 LD_LOGE("Error mapping library: mprotect failed task->name=%{public}s errno=%{public}d", task->name, errno);
5485 goto error;
5486 }
5487 break;
5488 }
5489 }
5490 done_mapping:
5491 task->p->base = base;
5492 task->p->dynv = laddr(task->p, task->dyn);
5493 if (task->p->tls.size) {
5494 task->p->tls.image = laddr(task->p, task->tls_image);
5495 }
5496 free(task->allocated_buf);
5497 task->allocated_buf = NULL;
5498 return true;
5499 noexec:
5500 errno = ENOEXEC;
5501 error:
5502 if (map != MAP_FAILED) {
5503 unmap_library(task->p);
5504 }
5505 free(task->allocated_buf);
5506 task->allocated_buf = NULL;
5507 return false;
5508 }
5509
resolve_fd_to_realpath(struct loadtask * task)5510 static bool resolve_fd_to_realpath(struct loadtask *task)
5511 {
5512 char proc_self_fd[32];
5513 static char resolved_path[PATH_MAX];
5514
5515 int ret = snprintf(proc_self_fd, sizeof(proc_self_fd), "/proc/self/fd/%d", task->fd);
5516 if (ret < 0 || ret >= sizeof(proc_self_fd)) {
5517 return false;
5518 }
5519 ssize_t len = readlink(proc_self_fd, resolved_path, sizeof(resolved_path) - 1);
5520 if (len < 0) {
5521 return false;
5522 }
5523 resolved_path[len] = '\0';
5524 strncpy(task->buf, resolved_path, PATH_MAX);
5525
5526 return true;
5527 }
5528
load_library_header(struct loadtask * task)5529 static bool load_library_header(struct loadtask *task)
5530 {
5531 const char *name = task->name;
5532 struct dso *needed_by = task->needed_by;
5533 ns_t *namespace = task->namespace;
5534 bool check_inherited = task->check_inherited;
5535 struct zip_info z_info;
5536
5537 bool map = false;
5538 struct stat st;
5539 size_t alloc_size;
5540 int n_th = 0;
5541 int is_self = 0;
5542
5543 if (!*name) {
5544 errno = EINVAL;
5545 return false;
5546 }
5547
5548 /* Catch and block attempts to reload the implementation itself */
5549 if (name[NAME_INDEX_ZERO] == 'l' && name[NAME_INDEX_ONE] == 'i' && name[NAME_INDEX_TWO] == 'b') {
5550 static const char reserved[] =
5551 "c.pthread.rt.m.dl.util.xnet.";
5552 const char *rp, *next;
5553 for (rp = reserved; *rp; rp = next) {
5554 next = strchr(rp, '.') + 1;
5555 if (strncmp(name + NAME_INDEX_THREE, rp, next - rp) == 0) {
5556 break;
5557 }
5558 }
5559 if (*rp) {
5560 if (ldd_mode) {
5561 /* Track which names have been resolved
5562 * and only report each one once. */
5563 static unsigned reported;
5564 unsigned mask = 1U << (rp - reserved);
5565 if (!(reported & mask)) {
5566 reported |= mask;
5567 dprintf(1, "\t%s => %s (%p)\n",
5568 name, ldso.name,
5569 ldso.base);
5570 }
5571 }
5572 is_self = 1;
5573 }
5574 }
5575 if (!strcmp(name, ldso.name)) {
5576 is_self = 1;
5577 }
5578 if (is_self) {
5579 if (!ldso.prev) {
5580 tail->next = &ldso;
5581 ldso.prev = tail;
5582 tail = &ldso;
5583 ldso.namespace = namespace;
5584 ns_add_dso(namespace, &ldso);
5585 }
5586 task->isloaded = true;
5587 task->p = &ldso;
5588 return true;
5589 }
5590 if (strchr(name, '/')) {
5591 char *separator = strstr(name, ZIP_FILE_PATH_SEPARATOR);
5592 if (separator != NULL) {
5593 int res = open_uncompressed_library_in_zipfile(name, &z_info, separator);
5594 if (!res) {
5595 task->pathname = name;
5596 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5597 LD_LOGE("Open uncompressed library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5598 task->pathname, namespace ? namespace->ns_name : "NULL");
5599 task->fd = -1;
5600 } else {
5601 task->fd = z_info.fd;
5602 task->file_offset = z_info.file_offset;
5603 }
5604 } else {
5605 LD_LOGE("Open uncompressed library in zip file failed, name:%{public}s res:%{public}d", name, res);
5606 return false;
5607 }
5608 } else {
5609 task->pathname = name;
5610 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5611 LD_LOGE("Open absolute_path library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5612 task->pathname, namespace ? namespace->ns_name : "NULL");
5613 task->fd = -1;
5614 } else {
5615 task->fd = open(name, O_RDONLY | O_CLOEXEC);
5616 }
5617 }
5618 } else {
5619 /* Search for the name to see if it's already loaded */
5620 /* Search in namespace */
5621 task->p = find_library_by_name(name, namespace, check_inherited);
5622 if (task->p) {
5623 task->isloaded = true;
5624 LD_LOGD("find_library_by_name(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5625 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5626 return true;
5627 }
5628 if (strlen(name) > NAME_MAX) {
5629 LD_LOGE("load_library name length is larger than NAME_MAX:%{public}s.", name);
5630 return false;
5631 }
5632 task->fd = -1;
5633 if (namespace->env_paths) {
5634 open_library_by_path(name, namespace->env_paths, task, &z_info);
5635 }
5636 for (task->p = needed_by; task->fd == -1 && task->p; task->p = task->p->needed_by) {
5637 if (fixup_rpath(task->p, task->buf, sizeof task->buf) < 0) {
5638 task->fd = INVALID_FD_INHIBIT_FURTHER_SEARCH; /* Inhibit further search. */
5639 }
5640 if (task->p->rpath) {
5641 open_library_by_path(name, task->p->rpath, task, &z_info);
5642 if (task->fd != -1 && resolve_fd_to_realpath(task)) {
5643 if (!is_accessible(namespace, task->buf, g_is_asan, check_inherited)) {
5644 LD_LOGE("Open library: check ns accessible failed, name %{public}s namespace %{public}s.",
5645 name, namespace ? namespace->ns_name : "NULL");
5646 close(task->fd);
5647 task->fd = -1;
5648 }
5649 }
5650 }
5651 }
5652 if (g_is_asan) {
5653 handle_asan_path_open_by_task(task->fd, name, namespace, task, &z_info);
5654 LD_LOGD("load_library handle_asan_path_open_by_task fd:%{public}d.", task->fd);
5655 } else {
5656 if (task->fd == -1 && namespace->lib_paths) {
5657 open_library_by_path(name, namespace->lib_paths, task, &z_info);
5658 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", task->fd);
5659 }
5660 }
5661 task->pathname = task->buf;
5662 }
5663 if (task->fd < 0) {
5664 if (!check_inherited || !namespace->ns_inherits) {
5665 LD_LOGE("Error loading header %{public}s, namespace %{public}s has no inherits, errno=%{public}d",
5666 task->name, namespace->ns_name, errno);
5667 return false;
5668 }
5669 /* Load lib in inherited namespace. Do not check inherited again.*/
5670 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
5671 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
5672 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) {
5673 continue;
5674 }
5675 task->namespace = inherit->inherited_ns;
5676 task->check_inherited = false;
5677 if (load_library_header(task)) {
5678 return true;
5679 }
5680 }
5681 LD_LOGE("Error loading header: can't find library %{public}s in namespace: %{public}s",
5682 task->name, namespace->ns_name);
5683 return false;
5684 }
5685
5686 if (fstat(task->fd, &st) < 0) {
5687 LD_LOGE("Error loading header %{public}s: failed to get file state errno=%{public}d", task->name, errno);
5688 close(task->fd);
5689 task->fd = -1;
5690 return false;
5691 }
5692 /* Search in namespace */
5693 task->p = find_library_by_fstat(&st, namespace, check_inherited, task->file_offset);
5694 if (task->p) {
5695 /* If this library was previously loaded with a
5696 * pathname but a search found the same inode,
5697 * setup its shortname so it can be found by name. */
5698 if (!task->p->shortname && task->pathname != name) {
5699 task->p->shortname = strrchr(task->p->name, '/') + 1;
5700 }
5701 close(task->fd);
5702 task->fd = -1;
5703 task->isloaded = true;
5704 LD_LOGD("find_library_by_fstat(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5705 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5706 return true;
5707 }
5708
5709 map = noload ? 0 : map_library_header(task);
5710 if (!map) {
5711 LD_LOGE("Error loading header %{public}s: failed to map header", task->name);
5712 close(task->fd);
5713 task->fd = -1;
5714 return false;
5715 }
5716
5717 /* Allocate storage for the new DSO. When there is TLS, this
5718 * storage must include a reservation for all pre-existing
5719 * threads to obtain copies of both the new TLS, and an
5720 * extended DTV capable of storing an additional slot for
5721 * the newly-loaded DSO. */
5722 alloc_size = sizeof(struct dso) + strlen(task->pathname) + 1;
5723 if (runtime && task->tls.size) {
5724 size_t per_th = task->tls.size + task->tls.align + sizeof(void *) * (tls_cnt + TLS_CNT_INCREASE);
5725 n_th = libc.threads_minus_1 + 1;
5726 if (n_th > SSIZE_MAX / per_th) {
5727 alloc_size = SIZE_MAX;
5728 } else {
5729 alloc_size += n_th * per_th;
5730 }
5731 }
5732 task->p = calloc(1, alloc_size);
5733 if (!task->p) {
5734 LD_LOGE("Error loading header %{public}s: failed to allocate dso", task->name);
5735 close(task->fd);
5736 task->fd = -1;
5737 return false;
5738 }
5739 task->p->dev = st.st_dev;
5740 task->p->ino = st.st_ino;
5741 task->p->file_offset = task->file_offset;
5742 task->p->needed_by = needed_by;
5743 task->p->name = task->p->buf;
5744 strcpy(task->p->name, task->pathname);
5745 task->p->tls = task->tls;
5746 task->p->dynv = task->dyn_addr;
5747 task->p->strings = task->str_addr;
5748 size_t rpath_offset;
5749 size_t runpath_offset;
5750 if (search_vec(task->p->dynv, &rpath_offset, DT_RPATH))
5751 task->p->rpath_orig = task->p->strings + rpath_offset;
5752 if (search_vec(task->p->dynv, &runpath_offset, DT_RUNPATH))
5753 task->p->rpath_orig = task->p->strings + runpath_offset;
5754
5755 /* Add a shortname only if name arg was not an explicit pathname. */
5756 if (task->pathname != name) {
5757 task->p->shortname = strrchr(task->p->name, '/') + 1;
5758 }
5759
5760 if (task->p->tls.size) {
5761 task->p->tls_id = ++tls_cnt;
5762 task->p->new_dtv = (void *)(-sizeof(size_t) &
5763 (uintptr_t)(task->p->name + strlen(task->p->name) + sizeof(size_t)));
5764 task->p->new_tls = (void *)(task->p->new_dtv + n_th * (tls_cnt + 1));
5765 }
5766
5767 tail->next = task->p;
5768 task->p->prev = tail;
5769 tail = task->p;
5770
5771 /* Add dso to namespace */
5772 task->p->namespace = namespace;
5773 ns_add_dso(namespace, task->p);
5774 return true;
5775 }
5776
task_load_library(struct loadtask * task,struct reserved_address_params * reserved_params)5777 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5778 {
5779 LD_LOGD("load_library loading ns=%{public}s name=%{public}s by_dlopen=%{public}d", task->namespace->ns_name, task->p->name, runtime);
5780 bool map = noload ? 0 : task_map_library(task, reserved_params);
5781 __close(task->fd);
5782 task->fd = -1;
5783 if (!map) {
5784 LD_LOGE("Error loading library %{public}s: failed to map library noload=%{public}d errno=%{public}d",
5785 task->name, noload, errno);
5786 error("Error loading library %s: failed to map library noload=%d errno=%d", task->name, noload, errno);
5787 if (runtime) {
5788 longjmp(*rtld_fail, 1);
5789 }
5790 return;
5791 };
5792
5793 /* Avoid the danger of getting two versions of libc mapped into the
5794 * same process when an absolute pathname was used. The symbols
5795 * checked are chosen to catch both musl and glibc, and to avoid
5796 * false positives from interposition-hack libraries. */
5797 decode_dyn(task->p);
5798 if (find_sym(task->p, "__libc_start_main", 1).sym &&
5799 find_sym(task->p, "stdin", 1).sym) {
5800 do_dlclose(task->p, 0);
5801 task->p = NULL;
5802 free((void*)task->name);
5803 task->name = ld_strdup("libc.so");
5804 task->check_inherited = true;
5805 if (!load_library_header(task)) {
5806 LD_LOGE("Error loading library %{public}s: failed to load libc.so", task->name);
5807 error("Error loading library %s: failed to load libc.so", task->name);
5808 if (runtime) {
5809 longjmp(*rtld_fail, 1);
5810 }
5811 }
5812 return;
5813 }
5814 /* Past this point, if we haven't reached runtime yet, ldso has
5815 * committed either to use the mapped library or to abort execution.
5816 * Unmapping is not possible, so we can safely reclaim gaps. */
5817 if (!runtime) {
5818 reclaim_gaps(task->p);
5819 }
5820 task->p->runtime_loaded = runtime;
5821 if (runtime)
5822 task->p->by_dlopen = 1;
5823
5824 ++gencnt;
5825
5826 if (DL_FDPIC) {
5827 makefuncdescs(task->p);
5828 }
5829
5830 if (ldd_mode) {
5831 dprintf(1, "\t%s => %s (%p)\n", task->name, task->pathname, task->p->base);
5832 }
5833
5834 #ifdef ENABLE_HWASAN
5835 if (libc.load_hook) {
5836 libc.load_hook((long unsigned int)task->p->base, task->p->phdr, task->p->phnum);
5837 }
5838 #endif
5839 }
5840
preload_direct_deps(struct dso * p,ns_t * namespace,struct loadtasks * tasks)5841 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks)
5842 {
5843 size_t i, cnt = 0;
5844 if (p->deps) {
5845 return;
5846 }
5847 /* For head, all preloads are direct pseudo-dependencies.
5848 * Count and include them now to avoid realloc later. */
5849 if (p == head) {
5850 for (struct dso *q = p->next; q; q = q->next) {
5851 cnt++;
5852 }
5853 }
5854 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5855 if (p->dynv[i] == DT_NEEDED) {
5856 cnt++;
5857 }
5858 }
5859 /* Use builtin buffer for apps with no external deps, to
5860 * preserve property of no runtime failure paths. */
5861 p->deps = (p == head && cnt < MIN_DEPS_COUNT) ? builtin_deps :
5862 calloc(cnt + 1, sizeof *p->deps);
5863 if (!p->deps) {
5864 LD_LOGE("Error loading dependencies for %{public}s", p->name);
5865 error("Error loading dependencies for %s", p->name);
5866 if (runtime) {
5867 longjmp(*rtld_fail, 1);
5868 }
5869 }
5870 cnt = 0;
5871 if (p == head) {
5872 for (struct dso *q = p->next; q; q = q->next) {
5873 p->deps[cnt++] = q;
5874 }
5875 }
5876 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5877 if (p->dynv[i] != DT_NEEDED) {
5878 continue;
5879 }
5880 const char* dtneed_name = p->strings + p->dynv[i + 1];
5881 LD_LOGD("load_library %{public}s adding DT_NEEDED task %{public}s namespace(%{public}s)", p->name, dtneed_name, namespace->ns_name);
5882 struct loadtask *task = create_loadtask(dtneed_name, p, namespace, true);
5883 if (!task) {
5884 LD_LOGE("Error loading dependencies %{public}s : create load task failed", p->name);
5885 error("Error loading dependencies for %s : create load task failed", p->name);
5886 if (runtime) {
5887 longjmp(*rtld_fail, 1);
5888 }
5889 continue;
5890 }
5891 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
5892 if (!load_library_header(task)) {
5893 free_task(task);
5894 task = NULL;
5895 LD_LOGE("Error loading shared library %{public}s: (needed by %{public}s)",
5896 p->strings + p->dynv[i + 1],
5897 p->name);
5898 error("Error loading shared library %s: %m (needed by %s)",
5899 p->strings + p->dynv[i + 1], p->name);
5900 if (runtime) {
5901 longjmp(*rtld_fail, 1);
5902 }
5903 continue;
5904 }
5905 p->deps[cnt++] = task->p;
5906 if (task->isloaded) {
5907 free_task(task);
5908 task = NULL;
5909 } else {
5910 append_loadtasks(tasks, task);
5911 }
5912 }
5913 p->deps[cnt] = 0;
5914 p->ndeps_direct = cnt;
5915 for (i = 0; i < p->ndeps_direct; i++) {
5916 add_dso_parent(p->deps[i], p);
5917 }
5918 }
5919
unmap_preloaded_sections(struct loadtasks * tasks)5920 static void unmap_preloaded_sections(struct loadtasks *tasks)
5921 {
5922 struct loadtask *task = NULL;
5923 for (size_t i = 0; i < tasks->length; i++) {
5924 task = get_loadtask(tasks, i);
5925 if (!task) {
5926 continue;
5927 }
5928 if (task->dyn_map_len) {
5929 munmap(task->dyn_map, task->dyn_map_len);
5930 task->dyn_map = NULL;
5931 task->dyn_map_len = 0;
5932 if (task->p) {
5933 task->p->dynv = NULL;
5934 }
5935 }
5936 if (task->str_map_len) {
5937 munmap(task->str_map, task->str_map_len);
5938 task->str_map = NULL;
5939 task->str_map_len = 0;
5940 if (task->p) {
5941 task->p->strings = NULL;
5942 }
5943 }
5944 }
5945 }
5946
preload_deps(struct dso * p,struct loadtasks * tasks)5947 static void preload_deps(struct dso *p, struct loadtasks *tasks)
5948 {
5949 if (p->deps) {
5950 return;
5951 }
5952 for (; p; p = p->next) {
5953 preload_direct_deps(p, p->namespace, tasks);
5954 }
5955 }
5956
run_loadtasks(struct loadtasks * tasks,struct reserved_address_params * reserved_params)5957 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params)
5958 {
5959 struct loadtask *task = NULL;
5960 bool reserved_address = false;
5961 for (size_t i = 0; i < tasks->length; i++) {
5962 task = get_loadtask(tasks, i);
5963 if (task) {
5964 if (reserved_params) {
5965 reserved_address = reserved_params->reserved_address_recursive || (reserved_params->target == task->p);
5966 }
5967 task_load_library(task, reserved_address ? reserved_params : NULL);
5968 }
5969 }
5970 }
5971
assign_tls(struct dso * p)5972 UT_STATIC void assign_tls(struct dso *p)
5973 {
5974 while (p) {
5975 if (p->tls.image) {
5976 tls_align = MAXP2(tls_align, p->tls.align);
5977 #ifdef TLS_ABOVE_TP
5978 p->tls.offset = tls_offset + ((p->tls.align - 1) &
5979 (-tls_offset + (uintptr_t)p->tls.image));
5980 tls_offset = p->tls.offset + p->tls.size;
5981 #else
5982 tls_offset += p->tls.size + p->tls.align - 1;
5983 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
5984 & (p->tls.align - 1);
5985 p->tls.offset = tls_offset;
5986 #endif
5987 if (tls_tail) {
5988 tls_tail->next = &p->tls;
5989 } else {
5990 libc.tls_head = &p->tls;
5991 }
5992 tls_tail = &p->tls;
5993 }
5994
5995 p = p->next;
5996 }
5997 }
5998
load_preload(char * s,ns_t * ns,struct loadtasks * tasks)5999 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks)
6000 {
6001 int tmp;
6002 char *z;
6003
6004 struct loadtask *task = NULL;
6005 for (z = s; *z; s = z) {
6006 for (; *s && (isspace(*s) || *s == ':'); s++) {
6007 ;
6008 }
6009 for (z = s; *z && !isspace(*z) && *z != ':'; z++) {
6010 ;
6011 }
6012 tmp = *z;
6013 *z = 0;
6014 task = create_loadtask(s, NULL, ns, true);
6015 if (!task) {
6016 continue;
6017 }
6018 if (load_library_header(task)) {
6019 if (!task->isloaded) {
6020 append_loadtasks(tasks, task);
6021 task = NULL;
6022 }
6023 }
6024 if (task) {
6025 free_task(task);
6026 }
6027 *z = tmp;
6028 }
6029 }
6030 #endif
6031
serialize_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6032 static int serialize_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6033 {
6034 ssize_t count = dso->relro_end - dso->relro_start;
6035 ssize_t offset = 0;
6036 while (count > 0) {
6037 ssize_t write_size = TEMP_FAILURE_RETRY(write(fd, laddr(dso, dso->relro_start + offset), count));
6038 if (-1 == write_size) {
6039 LD_LOGE("Error serializing relro %{public}s: failed to write GNU_RELRO", dso->name);
6040 return -1;
6041 }
6042 offset += write_size;
6043 count -= write_size;
6044 }
6045
6046 ssize_t size = dso->relro_end - dso->relro_start;
6047 void *map = mmap(
6048 laddr(dso, dso->relro_start),
6049 size,
6050 PROT_READ,
6051 MAP_PRIVATE | MAP_FIXED,
6052 fd,
6053 *file_offset);
6054 if (map == MAP_FAILED) {
6055 LD_LOGE("Error serializing relro %{public}s: failed to map GNU_RELRO", dso->name);
6056 return -1;
6057 }
6058 *file_offset += size;
6059 return 0;
6060 }
6061
map_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6062 static int map_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6063 {
6064 ssize_t ext_fd_file_size = 0;
6065 struct stat ext_fd_file_stat;
6066 if (TEMP_FAILURE_RETRY(fstat(fd, &ext_fd_file_stat)) != 0) {
6067 LD_LOGE("Error mapping relro %{public}s: failed to get file state", dso->name);
6068 return -1;
6069 }
6070 ext_fd_file_size = ext_fd_file_stat.st_size;
6071
6072 void *ext_temp_map = MAP_FAILED;
6073 ext_temp_map = mmap(NULL, ext_fd_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
6074 if (ext_temp_map == MAP_FAILED) {
6075 LD_LOGE("Error mapping relro %{public}s: failed to map fd", dso->name);
6076 return -1;
6077 }
6078
6079 char *file_base = (char *)(ext_temp_map) + *file_offset;
6080 char *mem_base = (char *)(laddr(dso, dso->relro_start));
6081 ssize_t start_offset = 0;
6082 ssize_t size = dso->relro_end - dso->relro_start;
6083
6084 if (size > ext_fd_file_size - *file_offset) {
6085 LD_LOGE("Error mapping relro %{public}s: invalid file size", dso->name);
6086 return -1;
6087 }
6088 while (start_offset < size) {
6089 // Find start location.
6090 while (start_offset < size) {
6091 if (memcmp(mem_base + start_offset, file_base + start_offset, PAGE_SIZE) == 0) {
6092 break;
6093 }
6094 start_offset += PAGE_SIZE;
6095 }
6096
6097 // Find end location.
6098 ssize_t end_offset = start_offset;
6099 while (end_offset < size) {
6100 if (memcmp(mem_base + end_offset, file_base + end_offset, PAGE_SIZE) != 0) {
6101 break;
6102 }
6103 end_offset += PAGE_SIZE;
6104 }
6105
6106 // Map pages.
6107 ssize_t map_length = end_offset - start_offset;
6108 ssize_t map_offset = *file_offset + start_offset;
6109 if (map_length > 0) {
6110 void *map = mmap(
6111 mem_base + start_offset, map_length, PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, map_offset);
6112 if (map == MAP_FAILED) {
6113 LD_LOGE("Error mapping relro %{public}s: failed to map GNU_RELRO", dso->name);
6114 munmap(ext_temp_map, ext_fd_file_size);
6115 return -1;
6116 }
6117 }
6118
6119 start_offset = end_offset;
6120 }
6121 *file_offset += size;
6122 munmap(ext_temp_map, ext_fd_file_size);
6123 return 0;
6124 }
6125
handle_relro_sharing(struct dso * p,const dl_extinfo * extinfo,ssize_t * relro_fd_offset)6126 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset)
6127 {
6128 if (extinfo == NULL) {
6129 return;
6130 }
6131 if (extinfo->flag & DL_EXT_WRITE_RELRO) {
6132 LD_LOGD("Serializing GNU_RELRO %{public}s", p->name);
6133 if (serialize_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6134 LD_LOGE("Error serializing GNU_RELRO %{public}s", p->name);
6135 error("Error serializing GNU_RELRO");
6136 if (runtime) longjmp(*rtld_fail, 1);
6137 }
6138 } else if (extinfo->flag & DL_EXT_USE_RELRO) {
6139 LD_LOGD("Mapping GNU_RELRO %{public}s", p->name);
6140 if (map_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6141 LD_LOGE("Error mapping GNU_RELRO %{public}s", p->name);
6142 error("Error mapping GNU_RELRO");
6143 if (runtime) longjmp(*rtld_fail, 1);
6144 }
6145 }
6146 }
6147
set_bss_vma_name(char * path_name,void * addr,size_t zeromap_size)6148 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size)
6149 {
6150 char so_bss_name[ANON_NAME_MAX_LEN];
6151 if (path_name == NULL) {
6152 snprintf(so_bss_name, ANON_NAME_MAX_LEN, ".bss");
6153 } else {
6154 char *t = strrchr(path_name, '/');
6155 if (t) {
6156 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", ++t);
6157 } else {
6158 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", path_name);
6159 }
6160 }
6161
6162 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, zeromap_size, so_bss_name);
6163 }
6164
find_and_set_bss_name(struct dso * p)6165 static void find_and_set_bss_name(struct dso *p)
6166 {
6167 size_t cnt;
6168 Phdr *ph = p->phdr;
6169 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
6170 if (ph->p_type != PT_LOAD) continue;
6171 size_t seg_start = p->base + ph->p_vaddr;
6172 size_t seg_file_end = seg_start + ph->p_filesz + PAGE_SIZE - 1 & -PAGE_SIZE;
6173 size_t seg_max_addr = seg_start + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
6174 size_t zeromap_size = seg_max_addr - seg_file_end;
6175 if (zeromap_size > 0 && (ph->p_flags & PF_W)) {
6176 set_bss_vma_name(p->name, (void *)seg_file_end, zeromap_size);
6177 }
6178 }
6179 }
6180
sync_with_debugger(void)6181 static void sync_with_debugger(void)
6182 {
6183 debug.ver = 1;
6184 debug.bp = dl_debug_state;
6185 debug.head = NULL;
6186 debug.base = ldso.base;
6187
6188 add_dso_info_to_debug_map(head);
6189
6190 debug.state = RT_CONSISTENT;
6191 _dl_debug_state();
6192 }
6193
notify_addition_to_debugger(struct dso * p)6194 static void notify_addition_to_debugger(struct dso *p)
6195 {
6196 debug.state = RT_ADD;
6197 _dl_debug_state();
6198
6199 add_dso_info_to_debug_map(p);
6200
6201 debug.state = RT_CONSISTENT;
6202 _dl_debug_state();
6203 }
6204
notify_remove_to_debugger(struct dso * p)6205 static void notify_remove_to_debugger(struct dso *p)
6206 {
6207 debug.state = RT_DELETE;
6208 _dl_debug_state();
6209
6210 remove_dso_info_from_debug_map(p);
6211
6212 debug.state = RT_CONSISTENT;
6213 _dl_debug_state();
6214 }
6215
add_dso_info_to_debug_map(struct dso * p)6216 static void add_dso_info_to_debug_map(struct dso *p)
6217 {
6218 for (struct dso *so = p; so != NULL; so = so->next) {
6219 struct dso_debug_info *debug_info = malloc(sizeof(struct dso_debug_info));
6220 if (debug_info == NULL) {
6221 LD_LOGE("malloc error! dso name: %{public}s.", so->name);
6222 continue;
6223 }
6224 #if DL_FDPIC
6225 debug_info->loadmap = so->loadmap;
6226 #else
6227 debug_info->base = so->base;
6228 #endif
6229 debug_info->name = so->name;
6230 debug_info->dynv = so->dynv;
6231 if (debug.head == NULL) {
6232 debug_info->prev = NULL;
6233 debug_info->next = NULL;
6234 debug.head = debug_tail = debug_info;
6235 } else {
6236 debug_info->prev = debug_tail;
6237 debug_info->next = NULL;
6238 debug_tail->next = debug_info;
6239 debug_tail = debug_info;
6240 }
6241 so->debug_info = debug_info;
6242 }
6243 }
6244
remove_dso_info_from_debug_map(struct dso * p)6245 static void remove_dso_info_from_debug_map(struct dso *p)
6246 {
6247 struct dso_debug_info *debug_info = p->debug_info;
6248 if (debug_info == debug_tail) {
6249 debug_tail = debug_tail->prev;
6250 debug_tail->next = NULL;
6251 } else {
6252 debug_info->next->prev = debug_info->prev;
6253 debug_info->prev->next = debug_info->next;
6254 }
6255 free(debug_info);
6256 }
6257
6258 typedef struct dso_handle_node {
6259 void *dso_handle; // Used to located dso.
6260 uint32_t count;
6261 struct dso* dso;
6262 struct dso_handle_node* next;
6263 } dso_handle_node;
6264
6265 static dso_handle_node* dso_handle_list = NULL;
6266
find_dso_handle_node(void * dso_handle)6267 dso_handle_node* find_dso_handle_node(void *dso_handle)
6268 {
6269 dso_handle_node *cur = dso_handle_list;
6270 while(cur) {
6271 if (cur->dso_handle == dso_handle) {
6272 return cur;
6273 }
6274 cur =cur->next;
6275 }
6276 return NULL;
6277 }
6278
add_dso_handle_node(void * dso_handle)6279 void add_dso_handle_node(void *dso_handle)
6280 {
6281 pthread_rwlock_wrlock(&lock);
6282 if (!dso_handle) {
6283 LD_LOGW("[cxa_thread] add_dso_handle_node return because dso_handle is null.\n");
6284 pthread_rwlock_unlock(&lock);
6285 return;
6286 }
6287
6288 dso_handle_node *node = find_dso_handle_node(dso_handle);
6289 if (node) {
6290 node->count++;
6291 LD_LOGD("[cxa_thread] increase dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count);
6292 pthread_rwlock_unlock(&lock);
6293 return;
6294 }
6295 dso_handle_node *cur = __libc_malloc(sizeof(*cur));
6296 if (!cur) {
6297 pthread_rwlock_unlock(&lock);
6298 LD_LOGE("[cxa_thread] alloc dso_handle_node failed.");
6299 error("[cxa_thread]: alloc dso_handle_node failed.");
6300 return;
6301 }
6302
6303 struct dso* p = addr2dso(dso_handle);
6304 if (!p) {
6305 pthread_rwlock_unlock(&lock);
6306 LD_LOGE("[cxa_thread] can't find dso by dso_handle(%{public}p)", dso_handle);
6307 error("[cxa_thread] can't find dso by dso_handle(%p)", dso_handle);
6308 return;
6309 }
6310
6311 // We don't need to care about the so which by_dlopen is false because it will never be unload.
6312 if (p->by_dlopen) {
6313 p->nr_dlopen++;
6314 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6315 p->name, p->name, p->nr_dlopen);
6316 if (p->bfs_built) {
6317 for (size_t i = 0; p->deps[i]; i++) {
6318 p->deps[i]->nr_dlopen++;
6319 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6320 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
6321 }
6322 } else {
6323 // Get all the direct and indirect deps.
6324 extend_bfs_deps(p, 1);
6325 for (size_t i = 0; p->deps_all[i]; i++) {
6326 p->deps_all[i]->nr_dlopen++;
6327 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6328 p->deps_all[i]->name, p->name, p->deps_all[i]->nr_dlopen);
6329 }
6330 }
6331 }
6332 cur->dso = p;
6333 cur->dso_handle = dso_handle;
6334 cur->count = 1;
6335 cur->next = dso_handle_list;
6336 dso_handle_list = cur;
6337 pthread_rwlock_unlock(&lock);
6338
6339 return;
6340 }
6341
remove_dso_handle_node(void * dso_handle)6342 void remove_dso_handle_node(void *dso_handle)
6343 {
6344 pthread_rwlock_wrlock(&lock);
6345 if (dso_handle == NULL) {
6346 LD_LOGW("[cxa_thread] remove_dso_handle_node return because dso_handle is null.\n");
6347 pthread_rwlock_unlock(&lock);
6348 return;
6349 }
6350
6351 dso_handle_node *node = find_dso_handle_node(dso_handle);
6352 if (node && node->count) {
6353 LD_LOGD("[cxa_thread] decrease dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count - 1);
6354 if ((--node->count) == 0) {
6355 LD_LOGD("[cxa_thread] call do_dlclose(%{public}s) when count is 0", node->dso->name);
6356 do_dlclose(node->dso, 1);
6357 // Invalidate current node.
6358 node->dso_handle = NULL;
6359 }
6360 pthread_rwlock_unlock(&lock);
6361 return;
6362 } else {
6363 LD_LOGE("[cxa_thread] can't find matched dso handle node by %{public}p, count:%{public}d", dso_handle, node->count);
6364 error("[cxa_thread] can't find matched dso handle node by %p, count:%d", dso_handle, node->count);
6365 }
6366 pthread_rwlock_unlock(&lock);
6367
6368 return;
6369 }
6370