1 #define _GNU_SOURCE
2 #define SYSCALL_NO_TLS 1
3
4 #include "dynlink.h"
5
6 #include <stdbool.h>
7 #include <stdlib.h>
8 #include <stdarg.h>
9 #include <stddef.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <elf.h>
14 #include <sys/mman.h>
15 #include <limits.h>
16 #include <fcntl.h>
17 #include <sys/stat.h>
18 #include <errno.h>
19 #include <link.h>
20 #include <setjmp.h>
21 #include <pthread.h>
22 #include <ctype.h>
23 #include <dlfcn.h>
24 #include <semaphore.h>
25 #include <sys/membarrier.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <sys/prctl.h>
29 #include <sys/queue.h>
30
31 #include "cfi.h"
32 #include "dlfcn_ext.h"
33 #include "dynlink_rand.h"
34 #include "ld_log.h"
35 #include "libc.h"
36 #include "musl_fdsan.h"
37 #include "namespace.h"
38 #include "ns_config.h"
39 #include "pthread_impl.h"
40 #include "fork_impl.h"
41 #include "strops.h"
42 #include "trace/trace_marker.h"
43
44 #ifdef IS_ASAN
45 #if defined (__arm__)
46 #define LIB "/lib/"
47 #elif defined (__aarch64__)
48 #define LIB "/lib64/"
49 #else
50 #error "unsupported arch"
51 #endif
52 #endif
53
54 #ifdef OHOS_ENABLE_PARAMETER
55 #include "sys_param.h"
56 #endif
57 #ifdef LOAD_ORDER_RANDOMIZATION
58 #include "zip_archive.h"
59 #endif
60
61 #ifdef USE_ENCAPS
62 #include <sys/ioctl.h>
63
64 #define OH_ENCAPS_MAGIC 'E'
65 #define OH_ENCAPS_SYNC_BASE 0x19
66 #define SYNC_ENCAPS_CMD _IO(OH_ENCAPS_MAGIC, OH_ENCAPS_SYNC_BASE)
67 static int encpas_cost_time = 0;
68 struct timespec encaps_time_start, encaps_time_end;
69 #endif
70
71 static size_t ldso_page_size;
72 #ifndef PAGE_SIZE
73 #define PAGE_SIZE ldso_page_size
74 #endif
75
76 #define malloc __libc_malloc
77 #define calloc __libc_calloc
78 #define realloc __libc_realloc
79 #define free __libc_free
80
81 static void error_impl(const char *, ...);
82 static void error_noop(const char *, ...);
83 static void (*error)(const char *, ...) = error_noop;
84
85 #define MAXP2(a,b) (-(-(a)&-(b)))
86 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
87 #define GNU_HASH_FILTER(ght, ghm, gho) \
88 const size_t *bloomwords = (const void *)(ght + 4); \
89 size_t f = bloomwords[gho & (ght[2] - 1)]; \
90 if (!(f & ghm)) continue; \
91 f >>= (gh >> ght[3]) % (8 * sizeof f); \
92 if (!(f & 1)) continue;
93
94 #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m)))
95 #define countof(a) ((sizeof (a))/(sizeof (a)[0]))
96 #define DSO_FLAGS_NODELETE 0x1
97
98 #ifdef HANDLE_RANDOMIZATION
99 #define NEXT_DYNAMIC_INDEX 2
100 #define MIN_DEPS_COUNT 2
101 #define NAME_INDEX_ZERO 0
102 #define NAME_INDEX_ONE 1
103 #define NAME_INDEX_TWO 2
104 #define NAME_INDEX_THREE 3
105 #define TLS_CNT_INCREASE 3
106 #define INVALID_FD_INHIBIT_FURTHER_SEARCH (-2)
107 #endif
108
109 #define MAP_XPM 0x40
110 #define PARENTS_BASE_CAPACITY 8
111 #define RELOC_CAN_SEARCH_DSO_BASE_CAPACITY 32
112 #define ANON_NAME_MAX_LEN 70
113
114 #define KPMD_SIZE (1UL << 21)
115 #define HUGEPAGES_SUPPORTED_STR_SIZE (32)
116
117 #ifdef UNIT_TEST_STATIC
118 #define UT_STATIC
119 #else
120 #define UT_STATIC static
121 #endif
122
123 /* Used for dlclose */
124 #define UNLOAD_NR_DLOPEN_CHECK 1
125 #define UNLOAD_COMMON_CHECK 2
126 #define UNLOAD_ALL_CHECK 3
127 struct dso_entry {
128 struct dso *dso;
129 TAILQ_ENTRY(dso_entry) entries;
130 };
131
132 struct debug {
133 int ver;
134 void *head;
135 void (*bp)(void);
136 int state;
137 void *base;
138 };
139
140 struct reserved_address_params {
141 void* start_addr;
142 size_t reserved_size;
143 bool must_use_reserved;
144 bool reserved_address_recursive;
145 #ifdef LOAD_ORDER_RANDOMIZATION
146 struct dso *target;
147 #endif
148 };
149
150 typedef void (*stage3_func)(size_t *, size_t *, size_t *);
151
152 static struct builtin_tls {
153 char c[8];
154 struct pthread pt;
155 void *space[16];
156 } builtin_tls[1];
157 #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
158
159 #define ADDEND_LIMIT 4096
160 static size_t *saved_addends, *apply_addends_to;
161 static bool g_is_asan;
162 static struct dso ldso;
163 static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head;
164 static struct dso_debug_info *debug_tail = NULL;
165 static char *env_path, *sys_path;
166 static unsigned long long gencnt;
167 static unsigned long long subcnt;
168 static int runtime;
169 static int ldd_mode;
170 static int ldso_fail;
171 static int noload;
172 static int shutting_down;
173 static jmp_buf *rtld_fail;
174 static pthread_rwlock_t lock;
175 static pthread_mutex_t dlclose_lock = { { PTHREAD_MUTEX_RECURSIVE } }; // set mutex type to PTHREAD_MUTEX_RECURSIVE
176 static struct debug debug;
177 static struct tls_module *tls_tail;
178 static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
179 static size_t static_tls_cnt;
180 static pthread_mutex_t init_fini_lock;
181 static pthread_mutex_t dl_phdr_lock;
182 static pthread_cond_t ctor_cond;
183 static struct dso *builtin_deps[2];
184 static struct dso *const no_deps[1];
185 static struct dso *builtin_ctor_queue[4];
186 static struct dso **main_ctor_queue;
187 static struct fdpic_loadmap *app_loadmap;
188 static struct fdpic_dummy_loadmap app_dummy_loadmap;
189
190 struct debug *_dl_debug_addr = &debug;
191
192 extern weak hidden char __ehdr_start[];
193
194 extern hidden int __malloc_replaced;
195
196 hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
197
198 extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void);
199
200 #ifdef USE_GWP_ASAN
201 extern bool init_gwp_asan_by_libc(bool force_init);
202 #endif
203
204 weak_alias(__init_array_start, __init_array_end);
205 weak_alias(__fini_array_start, __fini_array_end);
206 #ifdef DFX_SIGNAL_LIBC
__InstallSignalHandler()207 UT_STATIC void __InstallSignalHandler()
208 {
209 }
210 weak_alias(__InstallSignalHandler, DFX_InstallSignalHandler);
211 #endif
212
213 #ifdef HANDLE_RANDOMIZATION
214 static int do_dlclose(struct dso *p, bool check_deps_all);
215 #endif
216
217 #ifdef LOAD_ORDER_RANDOMIZATION
218 static bool task_check_xpm(struct loadtask *task);
219 static bool map_library_header(struct loadtask *task);
220 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params);
221 static bool resolve_fd_to_realpath(struct loadtask *task);
222 static bool load_library_header(struct loadtask *task);
223 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params);
224 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks);
225 static void unmap_preloaded_sections(struct loadtasks *tasks);
226 static void preload_deps(struct dso *p, struct loadtasks *tasks);
227 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params);
228 UT_STATIC void assign_tls(struct dso *p);
229 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks);
230 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info);
231 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task, struct zip_info *z_info);
232 #endif
233
234 extern int __close(int fd);
235
236 /* Sharing relro */
237 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset);
238
239 /* asan path open */
240 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size);
241
242 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size);
243
244 static void find_and_set_bss_name(struct dso *p);
245
246 /* lldb debug function */
247 static void sync_with_debugger();
248 static void notify_addition_to_debugger(struct dso *p);
249 static void notify_remove_to_debugger(struct dso *p);
250 static void add_dso_info_to_debug_map(struct dso *p);
251 static void remove_dso_info_from_debug_map(struct dso *p);
252
253 /* add namespace function */
254 static void get_sys_path(ns_configor *conf);
255 static void dlclose_ns(struct dso *p);
get_app_path(char * path,size_t size)256 static bool get_app_path(char *path, size_t size)
257 {
258 int l = 0;
259 l = readlink("/proc/self/exe", path, size);
260 if (l < 0 || l >= size) {
261 LD_LOGD("get_app_path readlink failed!");
262 return false;
263 }
264 path[l] = 0;
265 LD_LOGD("get_app_path path:%{public}s.", path);
266 return true;
267 }
268
init_default_namespace(struct dso * app)269 static void init_default_namespace(struct dso *app)
270 {
271 ns_t *default_ns = get_default_ns();
272 memset(default_ns, 0, sizeof *default_ns);
273 ns_set_name(default_ns, NS_DEFAULT_NAME);
274 if (env_path) ns_set_env_paths(default_ns, env_path);
275 ns_set_lib_paths(default_ns, sys_path);
276 ns_set_separated(default_ns, false);
277 app->namespace = default_ns;
278 ns_add_dso(default_ns, app);
279 LD_LOGD("init_default_namespace default_namespace:"
280 "nsname: default ,"
281 "lib_paths:%{public}s ,"
282 "env_path:%{public}s ,"
283 "separated: false.",
284 sys_path, env_path);
285 return;
286 }
287
set_ns_attrs(ns_t * ns,ns_configor * conf)288 UT_STATIC void set_ns_attrs(ns_t *ns, ns_configor *conf)
289 {
290 if (!ns || !conf) {
291 return;
292 }
293
294 char *lib_paths, *asan_lib_paths, *permitted_paths, *asan_permitted_paths, *allowed_libs;
295
296 ns_set_separated(ns, conf->get_separated(ns->ns_name));
297
298 lib_paths = conf->get_lib_paths(ns->ns_name);
299 if (lib_paths) ns_set_lib_paths(ns, lib_paths);
300
301 asan_lib_paths = conf->get_asan_lib_paths(ns->ns_name);
302 if (asan_lib_paths) ns_set_asan_lib_paths(ns, asan_lib_paths);
303
304 permitted_paths = conf->get_permitted_paths(ns->ns_name);
305 if (permitted_paths) ns_set_permitted_paths(ns, permitted_paths);
306
307 asan_permitted_paths = conf->get_asan_permitted_paths(ns->ns_name);
308 if (asan_permitted_paths) ns_set_asan_permitted_paths(ns, asan_permitted_paths);
309
310 allowed_libs = conf->get_allowed_libs(ns->ns_name);
311 if (allowed_libs) ns_set_allowed_libs(ns, allowed_libs);
312
313 LD_LOGD("set_ns_attrs :"
314 "ns_name: %{public}s ,"
315 "separated:%{public}d ,"
316 "lib_paths:%{public}s ,"
317 "asan_lib_paths:%{public}s ,"
318 "permitted_paths:%{public}s ,"
319 "asan_permitted_paths:%{public}s ,"
320 "allowed_libs: %{public}s .",
321 ns->ns_name, ns->separated, ns->lib_paths, ns->asan_lib_paths, permitted_paths,
322 asan_permitted_paths, allowed_libs);
323 }
324
set_ns_inherits(ns_t * ns,ns_configor * conf)325 UT_STATIC void set_ns_inherits(ns_t *ns, ns_configor *conf)
326 {
327 if (!ns || !conf) {
328 return;
329 }
330
331 strlist *inherits = conf->get_inherits(ns->ns_name);
332 if (inherits) {
333 for (size_t i = 0; i < inherits->num; i++) {
334 ns_t *inherited_ns = find_ns_by_name(inherits->strs[i]);
335 if (inherited_ns) {
336 char *shared_libs = conf->get_inherit_shared_libs(ns->ns_name, inherited_ns->ns_name);
337 ns_add_inherit(ns, inherited_ns, shared_libs);
338 LD_LOGD("set_ns_inherits :"
339 "ns_name: %{public}s ,"
340 "separated:%{public}d ,"
341 "lib_paths:%{public}s ,"
342 "asan_lib_paths:%{public}s ,",
343 inherited_ns->ns_name, inherited_ns->separated, inherited_ns->lib_paths,
344 inherited_ns->asan_lib_paths);
345 }
346 }
347 strlist_free(inherits);
348 } else {
349 LD_LOGD("set_ns_inherits inherits is NULL!");
350 }
351 }
352
init_namespace(struct dso * app)353 static void init_namespace(struct dso *app)
354 {
355 char app_path[PATH_MAX + 1];
356 if (!get_app_path(app_path, sizeof app_path)) {
357 strcpy(app_path, app->name);
358 }
359 char *t = strrchr(app_path, '/');
360 if (t) {
361 *t = 0;
362 } else {
363 app_path[0] = '.';
364 app_path[1] = 0;
365 }
366
367 nslist *nsl = nslist_init();
368 ns_configor *conf = configor_init();
369 char file_path[sizeof "/etc/ld-musl-namespace-" + sizeof (LDSO_ARCH) + sizeof ".ini" + 1] = {0};
370 (void)snprintf(file_path, sizeof file_path, "/etc/ld-musl-namespace-%s.ini", LDSO_ARCH);
371 LD_LOGI("init_namespace file_path:%{public}s", file_path);
372 trace_marker_reset();
373 trace_marker_begin(HITRACE_TAG_MUSL, "parse linker config", file_path);
374 int ret = conf->parse(file_path, app_path);
375 if (ret < 0) {
376 LD_LOGE("init_namespace ini file parse failed!");
377 /* Init_default_namespace is required even if the ini file parsing fails */
378 if (!sys_path) get_sys_path(conf);
379 init_default_namespace(app);
380 configor_free();
381 trace_marker_end(HITRACE_TAG_MUSL);
382 return;
383 }
384
385 /* sys_path needs to be parsed through ini file */
386 if (!sys_path) get_sys_path(conf);
387 init_default_namespace(app);
388
389 /* Init default namespace */
390 ns_t *d_ns = get_default_ns();
391 set_ns_attrs(d_ns, conf);
392
393 /* Init other namespace */
394 if (!nsl) {
395 LD_LOGE("init nslist fail!");
396 configor_free();
397 trace_marker_end(HITRACE_TAG_MUSL);
398 return;
399 }
400 strlist *s_ns = conf->get_namespaces();
401 if (s_ns) {
402 for (size_t i = 0; i < s_ns->num; i++) {
403 ns_t *ns = ns_alloc();
404 ns_set_name(ns, s_ns->strs[i]);
405 set_ns_attrs(ns, conf);
406 ns_add_dso(ns, app);
407 nslist_add_ns(ns);
408 }
409 strlist_free(s_ns);
410 }
411 /* Set inherited namespace */
412 set_ns_inherits(d_ns, conf);
413 for (size_t i = 0; i < nsl->num; i++) {
414 set_ns_inherits(nsl->nss[i], conf);
415 }
416 configor_free();
417 trace_marker_end(HITRACE_TAG_MUSL);
418 return;
419 }
420
421 /* Compute load address for a virtual address in a given dso. */
422 #if DL_FDPIC
laddr(const struct dso * p,size_t v)423 void *laddr(const struct dso *p, size_t v)
424 {
425 size_t j=0;
426 if (!p->loadmap) return p->base + v;
427 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
428 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
429 }
laddr_pg(const struct dso * p,size_t v)430 static void *laddr_pg(const struct dso *p, size_t v)
431 {
432 size_t j=0;
433 size_t pgsz = PAGE_SIZE;
434 if (!p->loadmap) return p->base + v;
435 for (j=0; ; j++) {
436 size_t a = p->loadmap->segs[j].p_vaddr;
437 size_t b = a + p->loadmap->segs[j].p_memsz;
438 a &= -pgsz;
439 b += pgsz-1;
440 b &= -pgsz;
441 if (v-a<b-a) break;
442 }
443 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
444 }
fdbarrier(void * p)445 static void (*fdbarrier(void *p))()
446 {
447 void (*fd)();
448 __asm__("" : "=r"(fd) : "0"(p));
449 return fd;
450 }
451 #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
452 laddr(p, v), (p)->got }))
453 #else
454 #define laddr(p, v) (void *)((p)->base + (v))
455 #define laddr_pg(p, v) laddr(p, v)
456 #define fpaddr(p, v) ((void (*)())laddr(p, v))
457 #endif
458
decode_vec(size_t * v,size_t * a,size_t cnt)459 static void decode_vec(size_t *v, size_t *a, size_t cnt)
460 {
461 size_t i;
462 for (i=0; i<cnt; i++) a[i] = 0;
463 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
464 if (v[0] < 8 * sizeof(long)) {
465 a[0] |= 1UL<<v[0];
466 }
467 a[v[0]] = v[1];
468 }
469 }
470
search_vec(size_t * v,size_t * r,size_t key)471 static int search_vec(size_t *v, size_t *r, size_t key)
472 {
473 for (; v[0]!=key; v+=2)
474 if (!v[0]) return 0;
475 *r = v[1];
476 return 1;
477 }
478
check_vna_hash(Verdef * def,int16_t vsym,uint32_t vna_hash)479 UT_STATIC int check_vna_hash(Verdef *def, int16_t vsym, uint32_t vna_hash)
480 {
481 int matched = 0;
482
483 vsym &= 0x7fff;
484 Verdef *verdef = def;
485 for (;;) {
486 if ((verdef->vd_ndx & 0x7fff) == vsym) {
487 if (vna_hash == verdef->vd_hash) {
488 matched = 1;
489 }
490 break;
491 }
492 if (matched) {
493 break;
494 }
495 if (verdef->vd_next == 0) {
496 break;
497 }
498 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
499 }
500 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
501 if (!matched) {
502 LD_LOGD("check_vna_hash no matched found. vsym=%{public}d vna_hash=%{public}x", vsym, vna_hash);
503 }
504 #endif
505 return matched;
506 }
507
check_verinfo(Verdef * def,int16_t * versym,uint32_t index,struct verinfo * verinfo,char * strings)508 UT_STATIC int check_verinfo(Verdef *def, int16_t *versym, uint32_t index, struct verinfo *verinfo, char *strings)
509 {
510 /* if the versym and verinfo is null , then not need version. */
511 if (!versym || !def) {
512 if (strlen(verinfo->v) == 0) {
513 return 1;
514 } else {
515 LD_LOGD("check_verinfo versym or def is null and verinfo->v exist, s:%{public}s v:%{public}s.",
516 verinfo->s, verinfo->v);
517 return 0;
518 }
519 }
520
521 int16_t vsym = versym[index];
522
523 /* find the verneed symbol. */
524 if (verinfo->use_vna_hash) {
525 if (vsym != VER_NDX_LOCAL && versym != VER_NDX_GLOBAL) {
526 return check_vna_hash(def, vsym, verinfo->vna_hash);
527 }
528 }
529
530 /* if the version length is zero and vsym not less than zero, then library hava default version symbol. */
531 if (strlen(verinfo->v) == 0) {
532 if (vsym >= 0) {
533 return 1;
534 } else {
535 LD_LOGD("check_verinfo not default version. vsym:%{public}d s:%{public}s", vsym, verinfo->s);
536 return 0;
537 }
538 }
539
540 /* find the version of symbol. */
541 vsym &= 0x7fff;
542 for (;;) {
543 if (!(def->vd_flags & VER_FLG_BASE) && (def->vd_ndx & 0x7fff) == vsym) {
544 break;
545 }
546 if (def->vd_next == 0) {
547 return 0;
548 }
549 def = (Verdef *)((char *)def + def->vd_next);
550 }
551
552 Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux);
553
554 int ret = !strcmp(verinfo->v, strings + aux->vda_name);
555 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
556 if (!ret) {
557 LD_LOGD("check_verinfo version not match. s=%{public}s v=%{public}s vsym=%{public}d vda_name=%{public}s",
558 verinfo->s, verinfo->v, vsym, strings + aux->vda_name);
559 }
560 #endif
561 return ret;
562 }
563
sysv_hash(const char * s0)564 static struct sym_info_pair sysv_hash(const char *s0)
565 {
566 struct sym_info_pair s_info_p;
567 const unsigned char *s = (void *)s0;
568 uint_fast32_t h = 0;
569 while (*s) {
570 h = 16*h + *s++;
571 h ^= h>>24 & 0xf0;
572 }
573 s_info_p.sym_h = h & 0xfffffff;
574 s_info_p.sym_l = (char *)s - s0;
575 return s_info_p;
576 }
577
gnu_hash(const char * s0)578 struct sym_info_pair gnu_hash(const char *s0)
579 {
580 struct sym_info_pair s_info_p;
581 const unsigned char *s = (void *)s0;
582 uint_fast32_t h = 5381;
583 for (; *s; s++)
584 h += h*32 + *s;
585 s_info_p.sym_h = h;
586 s_info_p.sym_l = (char *)s - s0;
587 return s_info_p;
588 }
589
sysv_lookup(struct verinfo * verinfo,struct sym_info_pair s_info_p,struct dso * dso)590 static Sym *sysv_lookup(struct verinfo *verinfo, struct sym_info_pair s_info_p, struct dso *dso)
591 {
592 size_t i;
593 uint32_t h = s_info_p.sym_h;
594 Sym *syms = dso->syms;
595 Elf_Symndx *hashtab = dso->hashtab;
596 char *strings = dso->strings;
597 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
598 if ((!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
599 && (!memcmp(verinfo->s, strings+syms[i].st_name, s_info_p.sym_l))) {
600 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
601 continue;
602 }
603
604 return syms+i;
605 }
606
607 }
608 LD_LOGD("sysv_lookup not find the symbol, "
609 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
610 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
611 return 0;
612 }
613
gnu_lookup(struct sym_info_pair s_info_p,uint32_t * hashtab,struct dso * dso,struct verinfo * verinfo)614 static Sym *gnu_lookup(struct sym_info_pair s_info_p, uint32_t *hashtab, struct dso *dso, struct verinfo *verinfo)
615 {
616 uint32_t h1 = s_info_p.sym_h;
617 uint32_t nbuckets = hashtab[0];
618 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
619 uint32_t i = buckets[h1 % nbuckets];
620
621 if (!i) {
622 LD_LOGD("gnu_lookup symbol not found (bloom filter), so:%{public}s s:%{public}s", dso->name, verinfo->s);
623 return 0;
624 }
625
626 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
627
628 for (h1 |= 1; ; i++) {
629 uint32_t h2 = *hashval++;
630 if ((h1 == (h2|1)) && (!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
631 && !memcmp(verinfo->s, dso->strings + dso->syms[i].st_name, s_info_p.sym_l)) {
632 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
633 continue;
634 }
635
636 return dso->syms+i;
637 }
638
639 if (h2 & 1) break;
640 }
641
642 LD_LOGD("gnu_lookup symbol not found, "
643 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
644 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
645 return 0;
646 }
647
check_sym_accessible(struct dso * dso,ns_t * ns)648 static bool check_sym_accessible(struct dso *dso, ns_t *ns)
649 {
650 if (!dso || !dso->namespace || !ns) {
651 LD_LOGD("check_sym_accessible invalid parameter!");
652 return false;
653 }
654 if (dso->namespace == ns) {
655 return true;
656 }
657 for (int i = 0; i < dso->parents_count; i++) {
658 if (dso->parents[i]->namespace == ns) {
659 return true;
660 }
661 }
662 LD_LOGD(
663 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
664 return false;
665 }
666
is_dso_accessible(struct dso * dso,ns_t * ns)667 static inline bool is_dso_accessible(struct dso *dso, ns_t *ns)
668 {
669 if (dso->namespace == ns) {
670 return true;
671 }
672 for (int i = 0; i < dso->parents_count; i++) {
673 if (dso->parents[i]->namespace == ns) {
674 return true;
675 }
676 }
677 LD_LOGD(
678 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
679 return false;
680 }
681
find_dso_parent(struct dso * p,struct dso * target)682 static int find_dso_parent(struct dso *p, struct dso *target)
683 {
684 int index = -1;
685 for (int i = 0; i < p->parents_count; i++) {
686 if (p->parents[i] == target) {
687 index = i;
688 break;
689 }
690 }
691 return index;
692 }
693
add_dso_parent(struct dso * p,struct dso * parent)694 static void add_dso_parent(struct dso *p, struct dso *parent)
695 {
696 int index = find_dso_parent(p, parent);
697 if (index != -1) {
698 return;
699 }
700 if (p->parents_count + 1 > p->parents_capacity) {
701 if (p->parents_capacity == 0) {
702 p->parents = (struct dso **)malloc(sizeof(struct dso *) * PARENTS_BASE_CAPACITY);
703 if (!p->parents) {
704 return;
705 }
706 p->parents_capacity = PARENTS_BASE_CAPACITY;
707 } else {
708 struct dso ** realloced = (struct dso **)realloc(
709 p->parents, sizeof(struct dso *) * (p->parents_capacity + PARENTS_BASE_CAPACITY));
710 if (!realloced) {
711 return;
712 }
713 p->parents = realloced;
714 p->parents_capacity += PARENTS_BASE_CAPACITY;
715 }
716 }
717 p->parents[p->parents_count] = parent;
718 p->parents_count++;
719 }
720
remove_dso_parent(struct dso * p,struct dso * parent)721 static void remove_dso_parent(struct dso *p, struct dso *parent)
722 {
723 int index = find_dso_parent(p, parent);
724 if (index == -1) {
725 return;
726 }
727 int i;
728 for (i = 0; i < index; i++) {
729 p->parents[i] = p->parents[i];
730 }
731 for (i = index; i < p->parents_count - 1; i++) {
732 p->parents[i] = p->parents[i + 1];
733 }
734 p->parents_count--;
735 }
736
add_reloc_can_search_dso(struct dso * p,struct dso * can_search_so)737 static void add_reloc_can_search_dso(struct dso *p, struct dso *can_search_so)
738 {
739 if (p->reloc_can_search_dso_count + 1 > p->reloc_can_search_dso_capacity) {
740 if (p->reloc_can_search_dso_capacity == 0) {
741 p->reloc_can_search_dso_list =
742 (struct dso **)malloc(sizeof(struct dso *) * RELOC_CAN_SEARCH_DSO_BASE_CAPACITY);
743 if (!p->reloc_can_search_dso_list) {
744 return;
745 }
746 p->reloc_can_search_dso_capacity = RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
747 } else {
748 struct dso ** realloced = (struct dso **)realloc(
749 p->reloc_can_search_dso_list,
750 sizeof(struct dso *) * (p->reloc_can_search_dso_capacity + RELOC_CAN_SEARCH_DSO_BASE_CAPACITY));
751 if (!realloced) {
752 return;
753 }
754 p->reloc_can_search_dso_list = realloced;
755 p->reloc_can_search_dso_capacity += RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
756 }
757 }
758 p->reloc_can_search_dso_list[p->reloc_can_search_dso_count] = can_search_so;
759 p->reloc_can_search_dso_count++;
760 }
761
free_reloc_can_search_dso(struct dso * p)762 static void free_reloc_can_search_dso(struct dso *p)
763 {
764 if (p->reloc_can_search_dso_list) {
765 free(p->reloc_can_search_dso_list);
766 p->reloc_can_search_dso_list = NULL;
767 p->reloc_can_search_dso_count = 0;
768 p->reloc_can_search_dso_capacity = 0;
769 }
770 }
771
772 /* The list of so that can be accessed during relocation include:
773 * - The is_global flag of the so is true which means accessible by default.
774 * Global so includes exe, ld preload so and ldso.
775 * - We only check whether ns is accessible for the so if is_reloc_head_so_dep is true.
776 *
777 * How to set is_reloc_head_so_dep:
778 * When dlopen A, we set is_reloc_head_so_dep to true for
779 * all direct and indirect dependent sos of A, including A itself. */
add_can_search_so_list_in_dso(struct dso * dso_relocating,struct dso * start_check_dso)780 static void add_can_search_so_list_in_dso(struct dso *dso_relocating, struct dso *start_check_dso) {
781 struct dso *p = start_check_dso;
782 for (; p; p = p->syms_next) {
783 if (p->is_global) {
784 add_reloc_can_search_dso(dso_relocating, p);
785 continue;
786 }
787
788 if (p->is_reloc_head_so_dep) {
789 if (dso_relocating->namespace && check_sym_accessible(p, dso_relocating->namespace)) {
790 add_reloc_can_search_dso(dso_relocating, p);
791 }
792 }
793 }
794
795 return;
796 }
797
798 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
799 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
800
801 #ifndef ARCH_SYM_REJECT_UND
802 #define ARCH_SYM_REJECT_UND(s) 0
803 #endif
804
805 #if defined(__GNUC__)
806 __attribute__((always_inline))
807 #endif
808
find_sym_impl(struct dso * dso,struct verinfo * verinfo,struct sym_info_pair s_info_g,int need_def,ns_t * ns)809 struct symdef find_sym_impl(
810 struct dso *dso, struct verinfo *verinfo, struct sym_info_pair s_info_g, int need_def, ns_t *ns)
811 {
812 Sym *sym;
813 struct sym_info_pair s_info_s = {0, 0};
814 uint32_t *ght;
815 uint32_t gh = s_info_g.sym_h;
816 uint32_t gho = gh / (8 * sizeof(size_t));
817 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
818 struct symdef def = {0};
819 if (ns && !check_sym_accessible(dso, ns))
820 return def;
821
822 if ((ght = dso->ghashtab)) {
823 const size_t *bloomwords = (const void *)(ght + 4);
824 size_t f = bloomwords[gho & (ght[2] - 1)];
825 if (!(f & ghm))
826 return def;
827
828 f >>= (gh >> ght[3]) % (8 * sizeof f);
829 if (!(f & 1))
830 return def;
831
832 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
833 } else {
834 if (!s_info_s.sym_h)
835 s_info_s = sysv_hash(verinfo->s);
836
837 sym = sysv_lookup(verinfo, s_info_s, dso);
838 }
839
840 if (!sym)
841 return def;
842
843 if (!sym->st_shndx)
844 if (need_def || (sym->st_info & 0xf) == STT_TLS || ARCH_SYM_REJECT_UND(sym))
845 return def;
846
847 if (!sym->st_value)
848 if ((sym->st_info & 0xf) != STT_TLS)
849 return def;
850
851 if (!(1 << (sym->st_info & 0xf) & OK_TYPES))
852 return def;
853
854 if (!(1 << (sym->st_info >> 4) & OK_BINDS))
855 return def;
856
857 def.sym = sym;
858 def.dso = dso;
859 return def;
860 }
861
find_sym2(struct dso * dso,struct verinfo * verinfo,int need_def,int use_deps,ns_t * ns)862 static inline struct symdef find_sym2(struct dso *dso, struct verinfo *verinfo, int need_def, int use_deps, ns_t *ns)
863 {
864 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
865 struct sym_info_pair s_info_s = {0, 0};
866 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
867 size_t ghm = 1ul << gh % (8*sizeof(size_t));
868 struct symdef def = {0};
869 struct dso **deps = use_deps ? dso->deps : 0;
870 for (; dso; dso=use_deps ? *deps++ : dso->syms_next) {
871 Sym *sym;
872 // for ldso, app, preload so which is global, should be accessible in all exist namespaces
873 if (!dso->is_preload && ns && !check_sym_accessible(dso, ns)) {
874 continue;
875 }
876 if ((ght = dso->ghashtab)) {
877 GNU_HASH_FILTER(ght, ghm, gho)
878 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
879 } else {
880 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
881 sym = sysv_lookup(verinfo, s_info_s, dso);
882 }
883
884 if (!sym) continue;
885 if (!sym->st_shndx)
886 if (need_def || (sym->st_info&0xf) == STT_TLS
887 || ARCH_SYM_REJECT_UND(sym))
888 continue;
889 if (!sym->st_value)
890 if ((sym->st_info&0xf) != STT_TLS)
891 continue;
892 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
893 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
894 def.sym = sym;
895 def.dso = dso;
896 break;
897 }
898 return def;
899 }
900
find_sym_by_deps(struct dso * dso,struct verinfo * verinfo,int need_def,ns_t * ns)901 static inline struct symdef find_sym_by_deps(struct dso *dso, struct verinfo *verinfo, int need_def, ns_t *ns)
902 {
903 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
904 struct sym_info_pair s_info_s = {0, 0};
905 uint32_t h = 0, gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
906 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
907 struct symdef def = {0};
908 struct dso **deps = dso->deps;
909 for (; dso; dso = *deps++) {
910 Sym *sym;
911 if (!is_dso_accessible(dso, ns)) {
912 continue;
913 }
914 if ((ght = dso->ghashtab)) {
915 GNU_HASH_FILTER(ght, ghm, gho)
916 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
917 } else {
918 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
919 sym = sysv_lookup(verinfo, s_info_s, dso);
920 }
921
922 if (!sym) continue;
923 if (!sym->st_shndx)
924 if (need_def || (sym->st_info&0xf) == STT_TLS
925 || ARCH_SYM_REJECT_UND(sym))
926 continue;
927 if (!sym->st_value)
928 if ((sym->st_info&0xf) != STT_TLS)
929 continue;
930 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
931 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
932 def.sym = sym;
933 def.dso = dso;
934 break;
935 }
936 return def;
937 }
938
find_sym_by_saved_so_list(int sym_type,struct dso * dso,struct verinfo * verinfo,int need_def,struct dso * dso_relocating)939 static inline struct symdef find_sym_by_saved_so_list(
940 int sym_type, struct dso *dso, struct verinfo *verinfo, int need_def, struct dso *dso_relocating)
941 {
942 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
943 struct sym_info_pair s_info_s = {0, 0};
944 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
945 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
946 struct symdef def = {0};
947 // skip head dso.
948 int start_search_index = sym_type==REL_COPY ? 1 : 0;
949 struct dso *dso_searching = 0;
950 for (int i = start_search_index; i < dso_relocating->reloc_can_search_dso_count; i++) {
951 dso_searching = dso_relocating->reloc_can_search_dso_list[i];
952 Sym *sym;
953 if ((ght = dso_searching->ghashtab)) {
954 GNU_HASH_FILTER(ght, ghm, gho)
955 sym = gnu_lookup(s_info_g, ght, dso_searching, verinfo);
956 } else {
957 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
958 sym = sysv_lookup(verinfo, s_info_s, dso_searching);
959 }
960 if (!sym) continue;
961 if (!sym->st_shndx)
962 if (need_def || (sym->st_info&0xf) == STT_TLS
963 || ARCH_SYM_REJECT_UND(sym))
964 continue;
965 if (!sym->st_value)
966 if ((sym->st_info&0xf) != STT_TLS)
967 continue;
968 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
969 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
970 def.sym = sym;
971 def.dso = dso_searching;
972 break;
973 }
974 return def;
975 }
976
find_sym(struct dso * dso,const char * s,int need_def)977 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
978 {
979 struct verinfo verinfo = { .s = s, .v = "", .use_vna_hash = false };
980 return find_sym2(dso, &verinfo, need_def, 0, NULL);
981 }
982
get_vna_hash(struct dso * dso,int sym_index,uint32_t * vna_hash)983 static bool get_vna_hash(struct dso *dso, int sym_index, uint32_t *vna_hash)
984 {
985 if (!dso->versym || !dso->verneed) {
986 return false;
987 }
988
989 uint16_t vsym = dso->versym[sym_index];
990 if (vsym == VER_NDX_LOCAL || vsym == VER_NDX_GLOBAL) {
991 return false;
992 }
993
994 bool result = false;
995 Verneed *verneed = dso->verneed;
996 Vernaux *vernaux;
997 vsym &= 0x7fff;
998
999 for (;;) {
1000 vernaux = (Vernaux *)((char *)verneed + verneed->vn_aux);
1001
1002 for (size_t cnt = 0; cnt < verneed->vn_cnt; cnt++) {
1003 if ((vernaux->vna_other & 0x7fff) == vsym) {
1004 result = true;
1005 *vna_hash = vernaux->vna_hash;
1006 break;
1007 }
1008
1009 vernaux = (Vernaux *)((char *)vernaux + vernaux->vna_next);
1010 }
1011
1012 if (result) {
1013 break;
1014 }
1015
1016 if (verneed->vn_next == 0) {
1017 break;
1018 }
1019
1020 verneed = (Verneed *)((char *)verneed + verneed->vn_next);
1021 }
1022 return result;
1023 }
1024
get_verinfo(struct dso * dso,int sym_index,struct verinfo * vinfo)1025 static void get_verinfo(struct dso *dso, int sym_index, struct verinfo *vinfo)
1026 {
1027 char *strings = dso->strings;
1028 // try to get version number from .gnu.version
1029 int16_t vsym = dso->versym[sym_index];
1030 Verdef *verdef = dso->verdef;
1031 vsym &= 0x7fff;
1032 if (!verdef) {
1033 return;
1034 }
1035 int version_found = 0;
1036 for (;;) {
1037 if (!verdef) {
1038 break;
1039 }
1040 if (!(verdef->vd_flags & VER_FLG_BASE) && (verdef->vd_ndx & 0x7fff) == vsym) {
1041 version_found = 1;
1042 break;
1043 }
1044 if (verdef->vd_next == 0) {
1045 break;
1046 }
1047 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
1048 }
1049 if (version_found) {
1050 Verdaux *aux = (Verdaux *)((char *)verdef + verdef->vd_aux);
1051 if (aux && aux->vda_name && strings && (dso->strings + aux->vda_name)) {
1052 vinfo->v = dso->strings + aux->vda_name;
1053 }
1054 }
1055 }
1056
do_relocs(struct dso * dso,size_t * rel,size_t rel_size,size_t stride)1057 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
1058 {
1059 unsigned char *base = dso->base;
1060 Sym *syms = dso->syms;
1061 char *strings = dso->strings;
1062 Sym *sym;
1063 const char *name;
1064 void *ctx;
1065 int type;
1066 int sym_index;
1067 struct symdef def;
1068 size_t *reloc_addr;
1069 size_t sym_val;
1070 size_t tls_val;
1071 size_t addend;
1072 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
1073
1074 if (dso == &ldso) {
1075 /* Only ldso's REL table needs addend saving/reuse. */
1076 if (rel == apply_addends_to)
1077 reuse_addends = 1;
1078 skip_relative = 1;
1079 }
1080
1081 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
1082 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue;
1083 type = R_TYPE(rel[1]);
1084 if (type == REL_NONE) continue;
1085 reloc_addr = laddr(dso, rel[0]);
1086
1087 if (stride > 2) {
1088 addend = rel[2];
1089 } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) {
1090 addend = 0;
1091 } else if (reuse_addends) {
1092 /* Save original addend in stage 2 where the dso
1093 * chain consists of just ldso; otherwise read back
1094 * saved addend since the inline one was clobbered. */
1095 if (head==&ldso)
1096 saved_addends[save_slot] = *reloc_addr;
1097 addend = saved_addends[save_slot++];
1098 } else {
1099 addend = *reloc_addr;
1100 }
1101
1102 sym_index = R_SYM(rel[1]);
1103 if (sym_index) {
1104 sym = syms + sym_index;
1105 name = strings + sym->st_name;
1106 ctx = type==REL_COPY ? head->syms_next : head;
1107 struct verinfo vinfo = { .s = name, .v = ""};
1108
1109 vinfo.use_vna_hash = get_vna_hash(dso, sym_index, &vinfo.vna_hash);
1110 if (!vinfo.use_vna_hash && dso->versym && (dso->versym[sym_index] & 0x7fff) >= 0) {
1111 get_verinfo(dso, sym_index, &vinfo);
1112 }
1113 if (dso->cache_sym_index == sym_index) {
1114 def = (struct symdef){ .dso = dso->cache_dso, .sym = dso->cache_sym };
1115 } else {
1116 def = (sym->st_info>>4) == STB_LOCAL
1117 ? (struct symdef){ .dso = dso, .sym = sym }
1118 : dso != &ldso ? find_sym_by_saved_so_list(type, ctx, &vinfo, type==REL_PLT, dso)
1119 : find_sym2(ctx, &vinfo, type==REL_PLT, 0, dso->namespace);
1120 dso->cache_sym_index = sym_index;
1121 dso->cache_dso = def.dso;
1122 dso->cache_sym = def.sym;
1123 }
1124
1125 if (!def.sym && (sym->st_shndx != SHN_UNDEF
1126 || sym->st_info>>4 != STB_WEAK)) {
1127 if (dso->lazy && (type==REL_PLT || type==REL_GOT)) {
1128 dso->lazy[3*dso->lazy_cnt+0] = rel[0];
1129 dso->lazy[3*dso->lazy_cnt+1] = rel[1];
1130 dso->lazy[3*dso->lazy_cnt+2] = addend;
1131 dso->lazy_cnt++;
1132 continue;
1133 }
1134 LD_LOGE("relocating failed: symbol not found. "
1135 "dso=%{public}s s=%{public}s use_vna_hash=%{public}d van_hash=%{public}x",
1136 dso->name, name, vinfo.use_vna_hash, vinfo.vna_hash);
1137 error("Error relocating %s: %s: symbol not found",
1138 dso->name, name);
1139 if (runtime) longjmp(*rtld_fail, 1);
1140 continue;
1141 }
1142 } else {
1143 sym = 0;
1144 def.sym = 0;
1145 def.dso = dso;
1146 }
1147
1148 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0;
1149 tls_val = def.sym ? def.sym->st_value : 0;
1150
1151 if ((type == REL_TPOFF || type == REL_TPOFF_NEG)
1152 && def.dso->tls_id > static_tls_cnt) {
1153 error("Error relocating %s: %s: initial-exec TLS "
1154 "resolves to dynamic definition in %s",
1155 dso->name, name, def.dso->name);
1156 longjmp(*rtld_fail, 1);
1157 }
1158
1159 switch(type) {
1160 case REL_OFFSET:
1161 addend -= (size_t)reloc_addr;
1162 case REL_SYMBOLIC:
1163 case REL_GOT:
1164 case REL_PLT:
1165 *reloc_addr = sym_val + addend;
1166 break;
1167 case REL_USYMBOLIC:
1168 memcpy(reloc_addr, &(size_t){sym_val + addend}, sizeof(size_t));
1169 break;
1170 case REL_RELATIVE:
1171 *reloc_addr = (size_t)base + addend;
1172 break;
1173 case REL_SYM_OR_REL:
1174 if (sym) *reloc_addr = sym_val + addend;
1175 else *reloc_addr = (size_t)base + addend;
1176 break;
1177 case REL_COPY:
1178 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
1179 break;
1180 case REL_OFFSET32:
1181 *(uint32_t *)reloc_addr = sym_val + addend
1182 - (size_t)reloc_addr;
1183 break;
1184 case REL_FUNCDESC:
1185 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
1186 + (def.sym - def.dso->syms)) : 0;
1187 break;
1188 case REL_FUNCDESC_VAL:
1189 if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val;
1190 else *reloc_addr = sym_val;
1191 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
1192 break;
1193 case REL_DTPMOD:
1194 *reloc_addr = def.dso->tls_id;
1195 break;
1196 case REL_DTPOFF:
1197 *reloc_addr = tls_val + addend - DTP_OFFSET;
1198 break;
1199 #ifdef TLS_ABOVE_TP
1200 case REL_TPOFF:
1201 *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
1202 break;
1203 #else
1204 case REL_TPOFF:
1205 *reloc_addr = tls_val - def.dso->tls.offset + addend;
1206 break;
1207 case REL_TPOFF_NEG:
1208 *reloc_addr = def.dso->tls.offset - tls_val + addend;
1209 break;
1210 #endif
1211 case REL_TLSDESC:
1212 if (stride<3) addend = reloc_addr[!TLSDESC_BACKWARDS];
1213 if (def.dso->tls_id > static_tls_cnt) {
1214 struct td_index *new = malloc(sizeof *new);
1215 if (!new) {
1216 error(
1217 "Error relocating %s: cannot allocate TLSDESC for %s",
1218 dso->name, sym ? name : "(local)" );
1219 longjmp(*rtld_fail, 1);
1220 }
1221 new->next = dso->td_index;
1222 dso->td_index = new;
1223 new->args[0] = def.dso->tls_id;
1224 new->args[1] = tls_val + addend - DTP_OFFSET;
1225 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
1226 reloc_addr[1] = (size_t)new;
1227 } else {
1228 reloc_addr[0] = (size_t)__tlsdesc_static;
1229 #ifdef TLS_ABOVE_TP
1230 reloc_addr[1] = tls_val + def.dso->tls.offset
1231 + TPOFF_K + addend;
1232 #else
1233 reloc_addr[1] = tls_val - def.dso->tls.offset
1234 + addend;
1235 #endif
1236 }
1237 /* Some archs (32-bit ARM at least) invert the order of
1238 * the descriptor members. Fix them up here. */
1239 if (TLSDESC_BACKWARDS) {
1240 size_t tmp = reloc_addr[0];
1241 reloc_addr[0] = reloc_addr[1];
1242 reloc_addr[1] = tmp;
1243 }
1244 break;
1245 default:
1246 error("Error relocating %s: unsupported relocation type %d",
1247 dso->name, type);
1248 if (runtime) longjmp(*rtld_fail, 1);
1249 continue;
1250 }
1251 }
1252 }
1253
redo_lazy_relocs()1254 static void redo_lazy_relocs()
1255 {
1256 struct dso *p = lazy_head, *next;
1257 lazy_head = 0;
1258 for (; p; p=next) {
1259 next = p->lazy_next;
1260 size_t size = p->lazy_cnt*3*sizeof(size_t);
1261 p->lazy_cnt = 0;
1262 do_relocs(p, p->lazy, size, 3);
1263 if (p->lazy_cnt) {
1264 p->lazy_next = lazy_head;
1265 lazy_head = p;
1266 } else {
1267 free(p->lazy);
1268 p->lazy = 0;
1269 p->lazy_next = 0;
1270 }
1271 }
1272 }
1273
1274 /* A huge hack: to make up for the wastefulness of shared libraries
1275 * needing at least a page of dirty memory even if they have no global
1276 * data, we reclaim the gaps at the beginning and end of writable maps
1277 * and "donate" them to the heap. */
1278
reclaim(struct dso * dso,size_t start,size_t end)1279 static void reclaim(struct dso *dso, size_t start, size_t end)
1280 {
1281 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
1282 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
1283 if (start >= end) return;
1284 char *base = laddr_pg(dso, start);
1285 __malloc_donate(base, base+(end-start));
1286 }
1287
reclaim_gaps(struct dso * dso)1288 static void reclaim_gaps(struct dso *dso)
1289 {
1290 Phdr *ph = dso->phdr;
1291 size_t phcnt = dso->phnum;
1292
1293 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
1294 if (ph->p_type!=PT_LOAD) continue;
1295 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
1296 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
1297 reclaim(dso, ph->p_vaddr+ph->p_memsz,
1298 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
1299 }
1300 }
1301
read_loop(int fd,void * p,size_t n)1302 static ssize_t read_loop(int fd, void *p, size_t n)
1303 {
1304 for (size_t i=0; i<n; ) {
1305 ssize_t l = read(fd, (char *)p+i, n-i);
1306 if (l<0) {
1307 if (errno==EINTR) continue;
1308 else return -1;
1309 }
1310 if (l==0) return i;
1311 i += l;
1312 }
1313 return n;
1314 }
1315
mmap_fixed(void * p,size_t n,int prot,int flags,int fd,off_t off)1316 UT_STATIC void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
1317 {
1318 static int no_map_fixed;
1319 char *q;
1320 if (!n) return p;
1321 if (!no_map_fixed) {
1322 q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
1323 if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
1324 return q;
1325 no_map_fixed = 1;
1326 }
1327 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
1328 if (flags & MAP_ANONYMOUS) {
1329 memset(p, 0, n);
1330 return p;
1331 }
1332 ssize_t r;
1333 if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
1334 for (q=p; n; q+=r, off+=r, n-=r) {
1335 r = read(fd, q, n);
1336 if (r < 0 && errno != EINTR) return MAP_FAILED;
1337 if (!r) {
1338 memset(q, 0, n);
1339 break;
1340 }
1341 }
1342 return p;
1343 }
1344
unmap_library(struct dso * dso)1345 UT_STATIC void unmap_library(struct dso *dso)
1346 {
1347 if (dso->loadmap) {
1348 size_t i;
1349 for (i=0; i<dso->loadmap->nsegs; i++) {
1350 if (!dso->loadmap->segs[i].p_memsz)
1351 continue;
1352 if (!is_dlclose_debug_enable()) {
1353 munmap((void *)dso->loadmap->segs[i].addr,
1354 dso->loadmap->segs[i].p_memsz);
1355 } else {
1356 (void)mprotect((void *)dso->loadmap->segs[i].addr,
1357 dso->loadmap->segs[i].p_memsz, PROT_NONE);
1358 }
1359 }
1360 free(dso->loadmap);
1361 } else if (dso->map && dso->map_len) {
1362 if (!is_dlclose_debug_enable()) {
1363 munmap(dso->map, dso->map_len);
1364 } else {
1365 mprotect(dso->map, dso->map_len, PROT_NONE);
1366 }
1367 }
1368 }
1369
get_random(void * buf,size_t buflen)1370 UT_STATIC bool get_random(void *buf, size_t buflen)
1371 {
1372 int ret;
1373 int fd = open("/dev/urandom", O_RDONLY);
1374 if (fd < 0) {
1375 return false;
1376 }
1377
1378 ret = read(fd, buf, buflen);
1379 if (ret < 0) {
1380 close(fd);
1381 return false;
1382 }
1383
1384 close(fd);
1385 return true;
1386 }
1387
fill_random_data(void * buf,size_t buflen)1388 UT_STATIC void fill_random_data(void *buf, size_t buflen)
1389 {
1390 uint64_t x;
1391 int i;
1392 int pos = 0;
1393 struct timespec ts;
1394 /* Try to use urandom to get the random number first */
1395 if (!get_random(buf, buflen)) {
1396 /* Can't get random number from /dev/urandom, generate from addr based on ASLR and time */
1397 for (i = 1; i <= (buflen / sizeof(x)); i++) {
1398 (void)clock_gettime(CLOCK_REALTIME, &ts);
1399 x = (((uint64_t)get_random) << 32) ^ (uint64_t)fill_random_data ^ ts.tv_nsec;
1400 memcpy((char *)buf + pos, &x, sizeof(x));
1401 pos += sizeof(x);
1402 }
1403 }
1404 return;
1405 }
1406
get_transparent_hugepages_supported(void)1407 static bool get_transparent_hugepages_supported(void)
1408 {
1409 int fd = -1;
1410 ssize_t read_size = 0;
1411 bool enable = false;
1412 char buf[HUGEPAGES_SUPPORTED_STR_SIZE] = {'0'};
1413
1414 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
1415 if (fd < 0)
1416 goto done;
1417
1418 read_size = read(fd, buf, HUGEPAGES_SUPPORTED_STR_SIZE - 1);
1419 if (read_size < 0)
1420 goto close_fd;
1421
1422 buf[HUGEPAGES_SUPPORTED_STR_SIZE - 1] = '\0';
1423 if (strstr(buf, "[never]") == NULL)
1424 enable = true;
1425
1426 close_fd:
1427 close(fd);
1428 done:
1429 return enable;
1430 }
1431
phdr_table_get_maxinum_alignment(Phdr * phdr_table,size_t phdr_count)1432 static size_t phdr_table_get_maxinum_alignment(Phdr *phdr_table, size_t phdr_count)
1433 {
1434 #if defined(__LP64__)
1435 size_t maxinum_alignment = PAGE_SIZE;
1436 size_t i = 0;
1437
1438 for (i = 0; i < phdr_count; ++i) {
1439 const Phdr *phdr = &phdr_table[i];
1440
1441 /* p_align must be 0, 1, or a positive, integral power of two */
1442 if ((phdr->p_type != PT_LOAD) || ((phdr->p_align & (phdr->p_align - 1)) != 0))
1443 continue;
1444
1445 if (phdr->p_align > maxinum_alignment)
1446 maxinum_alignment = phdr->p_align;
1447 }
1448
1449 return maxinum_alignment;
1450 #else
1451 return PAGE_SIZE;
1452 #endif
1453 }
1454
1455 #ifdef USE_ENCAPS
do_sync_to_other()1456 static int do_sync_to_other()
1457 {
1458 int fd;
1459 int ret;
1460
1461 fd = open("/dev/encaps", O_RDONLY);
1462 if (fd < 0) {
1463 LD_LOGE("open encaps failed, %{public}s", strerror(errno));
1464 return -1;
1465 }
1466
1467 ret = ioctl(fd, SYNC_ENCAPS_CMD);
1468 if (ret != 0) {
1469 LD_LOGE("ioctl encaps failed, %{public}s", strerror(errno));
1470 close(fd);
1471 return -1;
1472 }
1473
1474 close(fd);
1475 return 0;
1476 }
1477
sync_to_other()1478 static void sync_to_other()
1479 {
1480 __synccall(do_sync_to_other, NULL);
1481 }
1482
is_section_exist(Ehdr * eh_buf,uint32_t en_size,int fd,char * section_name)1483 static bool is_section_exist(Ehdr *eh_buf, uint32_t en_size, int fd, char *section_name)
1484 {
1485 char *shstrtab_content = NULL;
1486 size_t i, len;
1487 size_t shsize;
1488 uint16_t index;
1489 void *sh_buf = NULL;
1490 Shdr *sh, *sh0, shstrtab;
1491
1492 if (eh_buf == NULL) {
1493 return false;
1494 }
1495
1496 if (eh_buf->e_type != ET_DYN) {
1497 goto error_without_free;
1498 }
1499
1500 shsize = eh_buf->e_shentsize * eh_buf->e_shnum;
1501 index = eh_buf->e_shstrndx;
1502 if (index >= eh_buf->e_shnum) {
1503 goto error_without_free;
1504 }
1505
1506 if (shsize > en_size - sizeof(Ehdr)) {
1507 sh_buf = malloc(shsize);
1508 if (!sh_buf) {
1509 goto error_without_free;
1510 }
1511 len = pread(fd, sh_buf, shsize, eh_buf->e_shoff);
1512 if (len != shsize) {
1513 free(sh_buf);
1514 goto error_without_free;
1515 }
1516 sh = sh0 = sh_buf;
1517 } else if (eh_buf->e_shoff + shsize > len) {
1518 len = pread(fd, eh_buf + 1, shsize, eh_buf->e_shoff);
1519 if (len != shsize) {
1520 goto error_without_free;
1521 }
1522 sh = sh0 = (void *)(eh_buf + 1);
1523 } else {
1524 sh = sh0 = (void *)((char *)eh_buf + eh_buf->e_shoff);
1525 }
1526
1527 shstrtab = sh[index];
1528 shstrtab_content = (char *)malloc(shstrtab.sh_size);
1529 if (!shstrtab_content) {
1530 free(sh_buf);
1531 goto error_without_free;
1532 }
1533
1534 len = pread(fd, shstrtab_content, shstrtab.sh_size, shstrtab.sh_offset);
1535 if (len != shstrtab.sh_size) {
1536 goto error;
1537 }
1538 for (i = eh_buf->e_shnum; i != 0; i--) {
1539 char *shname = shstrtab_content + sh0[i - 1].sh_name; // this name is offset in shstrtab
1540 if ((shname == NULL) || (sh0[i - 1].sh_name > shstrtab.sh_size)) {
1541 continue;
1542 }
1543 if (strcmp(shname, section_name) == 0) {
1544 goto done_search;
1545 }
1546 }
1547
1548 error:
1549 free(shstrtab_content);
1550 if (sh_buf != NULL) {
1551 free(sh_buf);
1552 }
1553 error_without_free:
1554 errno = ENOEXEC;
1555 return false;
1556 done_search:
1557 free(shstrtab_content);
1558 if (sh_buf != NULL) {
1559 free(sh_buf);
1560 }
1561 sync_to_other();
1562 return true;
1563 }
1564 #endif
1565
check_xpm(int fd)1566 static bool check_xpm(int fd)
1567 {
1568 size_t mapLen = sizeof(Ehdr);
1569 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, fd, 0);
1570 if (map == MAP_FAILED) {
1571 LD_LOGE("Xpm check failed for so file, errno for mmap is: %{public}d", errno);
1572 return false;
1573 }
1574 munmap(map, mapLen);
1575 return true;
1576 }
1577
map_library(int fd,struct dso * dso,struct reserved_address_params * reserved_params)1578 UT_STATIC void *map_library(int fd, struct dso *dso, struct reserved_address_params *reserved_params)
1579 {
1580 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
1581 void *allocated_buf=0;
1582 size_t phsize;
1583 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
1584 size_t this_min, this_max;
1585 size_t nsegs = 0;
1586 off_t off_start;
1587 Ehdr *eh;
1588 Phdr *ph, *ph0;
1589 unsigned prot;
1590 unsigned char *map=MAP_FAILED, *base;
1591 size_t dyn=0;
1592 size_t tls_image=0;
1593 size_t i;
1594 int map_flags = MAP_PRIVATE;
1595 size_t start_addr;
1596 size_t start_alignment = PAGE_SIZE;
1597 bool hugepage_enabled = false;
1598 if (!check_xpm(fd)) {
1599 return 0;
1600 }
1601
1602 ssize_t l = read(fd, buf, sizeof buf);
1603 eh = buf;
1604 if (l<0) return 0;
1605 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
1606 goto noexec;
1607 phsize = eh->e_phentsize * eh->e_phnum;
1608 if (phsize > sizeof buf - sizeof *eh) {
1609 allocated_buf = malloc(phsize);
1610 if (!allocated_buf) return 0;
1611 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
1612 if (l < 0) goto error;
1613 if (l != phsize) goto noexec;
1614 ph = ph0 = allocated_buf;
1615 } else if (eh->e_phoff + phsize > l) {
1616 l = pread(fd, buf+1, phsize, eh->e_phoff);
1617 if (l < 0) goto error;
1618 if (l != phsize) goto noexec;
1619 ph = ph0 = (void *)(buf + 1);
1620 } else {
1621 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
1622 }
1623 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1624 if (ph->p_type == PT_DYNAMIC) {
1625 dyn = ph->p_vaddr;
1626 } else if (ph->p_type == PT_TLS) {
1627 tls_image = ph->p_vaddr;
1628 dso->tls.align = ph->p_align;
1629 dso->tls.len = ph->p_filesz;
1630 dso->tls.size = ph->p_memsz;
1631 } else if (ph->p_type == PT_GNU_RELRO) {
1632 dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
1633 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
1634 } else if (ph->p_type == PT_GNU_STACK) {
1635 if (!runtime && ph->p_memsz > __default_stacksize) {
1636 __default_stacksize =
1637 ph->p_memsz < DEFAULT_STACK_MAX ?
1638 ph->p_memsz : DEFAULT_STACK_MAX;
1639 }
1640 }
1641 if (ph->p_type != PT_LOAD) continue;
1642 nsegs++;
1643 if (ph->p_vaddr < addr_min) {
1644 addr_min = ph->p_vaddr;
1645 off_start = ph->p_offset;
1646 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1647 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1648 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1649 }
1650 if (ph->p_vaddr+ph->p_memsz > addr_max) {
1651 addr_max = ph->p_vaddr+ph->p_memsz;
1652 }
1653 }
1654 if (!dyn) goto noexec;
1655 if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
1656 dso->loadmap = calloc(1, sizeof *dso->loadmap
1657 + nsegs * sizeof *dso->loadmap->segs);
1658 if (!dso->loadmap) goto error;
1659 dso->loadmap->nsegs = nsegs;
1660 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
1661 if (ph->p_type != PT_LOAD) continue;
1662 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1663 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1664 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1665 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
1666 prot, MAP_PRIVATE,
1667 fd, ph->p_offset & -PAGE_SIZE);
1668 if (map == MAP_FAILED) {
1669 unmap_library(dso);
1670 goto error;
1671 }
1672 dso->loadmap->segs[i].addr = (size_t)map +
1673 (ph->p_vaddr & PAGE_SIZE-1);
1674 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
1675 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
1676 i++;
1677 if (prot & PROT_WRITE) {
1678 size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
1679 + ph->p_filesz;
1680 size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
1681 size_t pgend = brk + ph->p_memsz - ph->p_filesz
1682 + PAGE_SIZE-1 & -PAGE_SIZE;
1683 if (pgend > pgbrk && mmap_fixed(map+pgbrk,
1684 pgend-pgbrk, prot,
1685 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
1686 -1, off_start) == MAP_FAILED)
1687 goto error;
1688 memset(map + brk, 0, pgbrk-brk);
1689 }
1690 }
1691 map = (void *)dso->loadmap->segs[0].addr;
1692 map_len = 0;
1693 goto done_mapping;
1694 }
1695 addr_max += PAGE_SIZE-1;
1696 addr_max &= -PAGE_SIZE;
1697 addr_min &= -PAGE_SIZE;
1698 off_start &= -PAGE_SIZE;
1699 map_len = addr_max - addr_min + off_start;
1700 start_addr = addr_min;
1701
1702 hugepage_enabled = get_transparent_hugepages_supported();
1703 if (hugepage_enabled) {
1704 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(ph0, eh->e_phnum);
1705
1706 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
1707 }
1708
1709 if (reserved_params) {
1710 if (map_len > reserved_params->reserved_size) {
1711 if (reserved_params->must_use_reserved) {
1712 goto error;
1713 }
1714 } else {
1715 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
1716 map_flags |= MAP_FIXED;
1717 }
1718 }
1719
1720 /* we will find a mapping_align aligned address as the start of dso
1721 * so we need a tmp_map_len as map_len + mapping_align to make sure
1722 * we have enough space to shift the dso to the correct location. */
1723 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
1724 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
1725
1726 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
1727 if (reserved_params) {
1728 map = DL_NOMMU_SUPPORT
1729 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1730 : mmap((void *)start_addr, map_len, prot, map_flags, fd, off_start);
1731 if (map == MAP_FAILED) {
1732 goto error;
1733 }
1734 if (reserved_params && map_len < reserved_params->reserved_size) {
1735 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
1736 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
1737 }
1738 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
1739 } else {
1740 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
1741 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1742 if (temp_map == MAP_FAILED) {
1743 goto error;
1744 }
1745
1746 /* find the mapping_align aligned address */
1747 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
1748 map = DL_NOMMU_SUPPORT
1749 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1750 /* use map_len to mmap correct space for the dso with file mapping */
1751 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, fd, off_start);
1752 if (map == MAP_FAILED || map != real_map) {
1753 LD_LOGE("mmap MAP_FIXED failed");
1754 goto error;
1755 }
1756
1757 /* Free unused memory.
1758 * |--------------------------tmp_map_len--------------------------|
1759 * ^ ^ ^ ^
1760 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
1761 * temp_map real_map(aligned) temp_map_end
1762 */
1763 unsigned char *temp_map_end = temp_map + tmp_map_len;
1764 size_t unused_part_1 = real_map - temp_map;
1765 size_t unused_part_2 = temp_map_end - (real_map + map_len);
1766 if (unused_part_1 > 0) {
1767 int res1 = munmap(temp_map, unused_part_1);
1768 if (res1 == -1) {
1769 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
1770 }
1771 }
1772
1773 if (unused_part_2 > 0) {
1774 int res2 = munmap(real_map + map_len, unused_part_2);
1775 if (res2 == -1) {
1776 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
1777 }
1778 }
1779 }
1780 dso->map = map;
1781 dso->map_len = map_len;
1782 /* If the loaded file is not relocatable and the requested address is
1783 * not available, then the load operation must fail. */
1784 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
1785 errno = EBUSY;
1786 goto error;
1787 }
1788 base = map - addr_min;
1789 dso->phdr = 0;
1790 dso->phnum = 0;
1791 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1792 if (ph->p_type == PT_OHOS_RANDOMDATA) {
1793 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
1794 continue;
1795 }
1796 if (ph->p_type != PT_LOAD) continue;
1797 /* Check if the programs headers are in this load segment, and
1798 * if so, record the address for use by dl_iterate_phdr. */
1799 if (!dso->phdr && eh->e_phoff >= ph->p_offset
1800 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
1801 dso->phdr = (void *)(base + ph->p_vaddr
1802 + (eh->e_phoff-ph->p_offset));
1803 dso->phnum = eh->e_phnum;
1804 dso->phentsize = eh->e_phentsize;
1805 }
1806 this_min = ph->p_vaddr & -PAGE_SIZE;
1807 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
1808 off_start = ph->p_offset & -PAGE_SIZE;
1809 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1810 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1811 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1812 /* Reuse the existing mapping for the lowest-address LOAD */
1813 if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
1814 if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
1815 goto error;
1816 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
1817 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
1818 if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
1819 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
1820 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
1821 size_t zeromap_size = (size_t)base + this_max - pgbrk;
1822 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
1823 if (pgbrk - (size_t)base < this_max && mmap_fixed((void *)pgbrk, zeromap_size, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
1824 goto error;
1825 set_bss_vma_name(dso->name, (void *)pgbrk, zeromap_size);
1826 }
1827 }
1828 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
1829 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
1830 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
1831 && errno != ENOSYS)
1832 goto error;
1833 break;
1834 }
1835 done_mapping:
1836 #ifdef USE_ENCAPS
1837 clock_gettime(CLOCK_MONOTONIC, &encaps_time_start);
1838 (void)is_section_exist(buf, sizeof(buf), fd, ".kernelpermission");
1839 clock_gettime(CLOCK_MONOTONIC, &encaps_time_end);
1840 encpas_cost_time = (encaps_time_end.tv_sec - encaps_time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
1841 + (encaps_time_end.tv_nsec - encaps_time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
1842 #endif
1843 dso->base = base;
1844 dso->dynv = laddr(dso, dyn);
1845 if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
1846 free(allocated_buf);
1847 return map;
1848 noexec:
1849 errno = ENOEXEC;
1850 error:
1851 if (map!=MAP_FAILED) unmap_library(dso);
1852 free(allocated_buf);
1853 return 0;
1854 }
1855
path_open(const char * name,const char * s,char * buf,size_t buf_size)1856 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
1857 {
1858 size_t l;
1859 int fd;
1860 for (;;) {
1861 s += strspn(s, ":\n");
1862 l = strcspn(s, ":\n");
1863 if (l-1 >= INT_MAX) return -1;
1864 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
1865 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
1866 switch (errno) {
1867 case ENOENT:
1868 case ENOTDIR:
1869 case EACCES:
1870 case ENAMETOOLONG:
1871 break;
1872 default:
1873 /* Any negative value but -1 will inhibit
1874 * futher path search. */
1875 return -2;
1876 }
1877 }
1878 s += l;
1879 }
1880 }
1881
fixup_rpath(struct dso * p,char * buf,size_t buf_size)1882 UT_STATIC int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
1883 {
1884 size_t n, l;
1885 const char *s, *t, *origin;
1886 char *d;
1887 if (p->rpath || !p->rpath_orig) return 0;
1888 if (!strchr(p->rpath_orig, '$')) {
1889 p->rpath = ld_strdup(p->rpath_orig);
1890 return 0;
1891 }
1892 n = 0;
1893 s = p->rpath_orig;
1894 while ((t=strchr(s, '$'))) {
1895 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
1896 return 0;
1897 s = t+1;
1898 n++;
1899 }
1900 if (n > SSIZE_MAX/PATH_MAX) return 0;
1901
1902 if (p->kernel_mapped) {
1903 /* $ORIGIN searches cannot be performed for the main program
1904 * when it is suid/sgid/AT_SECURE. This is because the
1905 * pathname is under the control of the caller of execve.
1906 * For libraries, however, $ORIGIN can be processed safely
1907 * since the library's pathname came from a trusted source
1908 * (either system paths or a call to dlopen). */
1909 if (libc.secure)
1910 return 0;
1911 l = readlink("/proc/self/exe", buf, buf_size);
1912 if (l == -1) switch (errno) {
1913 case ENOENT:
1914 case ENOTDIR:
1915 case EACCES:
1916 return 0;
1917 default:
1918 return -1;
1919 }
1920 if (l >= buf_size)
1921 return 0;
1922 buf[l] = 0;
1923 origin = buf;
1924 } else {
1925 origin = p->name;
1926 }
1927 t = strrchr(origin, '/');
1928 if (t) {
1929 l = t-origin;
1930 } else {
1931 /* Normally p->name will always be an absolute or relative
1932 * pathname containing at least one '/' character, but in the
1933 * case where ldso was invoked as a command to execute a
1934 * program in the working directory, app.name may not. Fix. */
1935 origin = ".";
1936 l = 1;
1937 }
1938 /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */
1939 if (libc.secure && *origin != '/')
1940 return 0;
1941 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
1942 if (!p->rpath) return -1;
1943
1944 d = p->rpath;
1945 s = p->rpath_orig;
1946 while ((t=strchr(s, '$'))) {
1947 memcpy(d, s, t-s);
1948 d += t-s;
1949 memcpy(d, origin, l);
1950 d += l;
1951 /* It was determined previously that the '$' is followed
1952 * either by "ORIGIN" or "{ORIGIN}". */
1953 s = t + 7 + 2*(t[1]=='{');
1954 }
1955 strcpy(d, s);
1956 return 0;
1957 }
1958
decode_dyn(struct dso * p)1959 static void decode_dyn(struct dso *p)
1960 {
1961 size_t dyn[DYN_CNT];
1962 size_t flags1 = 0;
1963 decode_vec(p->dynv, dyn, DYN_CNT);
1964 search_vec(p->dynv, &flags1, DT_FLAGS_1);
1965 if (flags1 & DF_1_GLOBAL) {
1966 LD_LOGI("Add DF_1_GLOBAL for %{public}s", p->name);
1967 p->is_global = true;
1968 }
1969 if (flags1 & DF_1_NODELETE) {
1970 p->flags |= DSO_FLAGS_NODELETE;
1971 }
1972 p->syms = laddr(p, dyn[DT_SYMTAB]);
1973 p->strings = laddr(p, dyn[DT_STRTAB]);
1974 if (dyn[0]&(1<<DT_HASH))
1975 p->hashtab = laddr(p, dyn[DT_HASH]);
1976 if (dyn[0]&(1<<DT_RPATH))
1977 p->rpath_orig = p->strings + dyn[DT_RPATH];
1978 if (dyn[0]&(1<<DT_RUNPATH))
1979 p->rpath_orig = p->strings + dyn[DT_RUNPATH];
1980 if (dyn[0]&(1<<DT_PLTGOT))
1981 p->got = laddr(p, dyn[DT_PLTGOT]);
1982 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
1983 p->ghashtab = laddr(p, *dyn);
1984 if (search_vec(p->dynv, dyn, DT_VERSYM))
1985 p->versym = laddr(p, *dyn);
1986 if (search_vec(p->dynv, dyn, DT_VERDEF))
1987 p->verdef = laddr(p, *dyn);
1988 if (search_vec(p->dynv, dyn, DT_VERNEED))
1989 p->verneed = laddr(p, *dyn);
1990 }
1991
count_syms(struct dso * p)1992 UT_STATIC size_t count_syms(struct dso *p)
1993 {
1994 if (p->hashtab) return p->hashtab[1];
1995
1996 size_t nsym, i;
1997 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1998 uint32_t *hashval;
1999 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
2000 if (buckets[i] > nsym)
2001 nsym = buckets[i];
2002 }
2003 if (nsym) {
2004 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
2005 do nsym++;
2006 while (!(*hashval++ & 1));
2007 }
2008 return nsym;
2009 }
2010
dl_mmap(size_t n)2011 static void *dl_mmap(size_t n)
2012 {
2013 void *p;
2014 int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE;
2015 #ifdef SYS_mmap2
2016 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0);
2017 #else
2018 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0);
2019 #endif
2020 return (unsigned long)p > -4096UL ? 0 : p;
2021 }
2022
makefuncdescs(struct dso * p)2023 static void makefuncdescs(struct dso *p)
2024 {
2025 static int self_done;
2026 size_t nsym = count_syms(p);
2027 size_t i, size = nsym * sizeof(*p->funcdescs);
2028
2029 if (!self_done) {
2030 p->funcdescs = dl_mmap(size);
2031 self_done = 1;
2032 } else {
2033 p->funcdescs = malloc(size);
2034 }
2035 if (!p->funcdescs) {
2036 if (!runtime) a_crash();
2037 error("Error allocating function descriptors for %s", p->name);
2038 longjmp(*rtld_fail, 1);
2039 }
2040 for (i=0; i<nsym; i++) {
2041 if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) {
2042 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value);
2043 p->funcdescs[i].got = p->got;
2044 } else {
2045 p->funcdescs[i].addr = 0;
2046 p->funcdescs[i].got = 0;
2047 }
2048 }
2049 }
2050
get_sys_path(ns_configor * conf)2051 static void get_sys_path(ns_configor *conf)
2052 {
2053 LD_LOGD("get_sys_path g_is_asan:%{public}d", g_is_asan);
2054 /* Use ini file's system paths when Asan is not enabled */
2055 if (!g_is_asan) {
2056 sys_path = conf->get_sys_paths();
2057 } else {
2058 /* Use ini file's asan system paths when the Asan is enabled
2059 * Merge two strings when both sys_paths and asan_sys_paths are valid */
2060 sys_path = conf->get_asan_sys_paths();
2061 char *sys_path_default = conf->get_sys_paths();
2062 if (!sys_path) {
2063 sys_path = sys_path_default;
2064 } else if (sys_path_default) {
2065 size_t newlen = strlen(sys_path) + strlen(sys_path_default) + 2;
2066 char *new_syspath = malloc(newlen);
2067 memset(new_syspath, 0, newlen);
2068 strcpy(new_syspath, sys_path);
2069 strcat(new_syspath, ":");
2070 strcat(new_syspath, sys_path_default);
2071 sys_path = new_syspath;
2072 }
2073 }
2074 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib:/lib64";
2075 LD_LOGD("get_sys_path sys_path:%{public}s", sys_path);
2076 }
2077
search_dso_by_name(const char * name,const ns_t * ns)2078 static struct dso *search_dso_by_name(const char *name, const ns_t *ns) {
2079 LD_LOGD("search_dso_by_name name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2080 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2081 struct dso *p = ns->ns_dsos->dsos[i];
2082 if (p->shortname && !strcmp(p->shortname, name)) {
2083 LD_LOGD("search_dso_by_name found name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2084 return p;
2085 }
2086 }
2087 return NULL;
2088 }
2089
search_dso_by_fstat(const struct stat * st,const ns_t * ns,uint64_t file_offset)2090 static struct dso *search_dso_by_fstat(const struct stat *st, const ns_t *ns, uint64_t file_offset) {
2091 LD_LOGD("search_dso_by_fstat ns_name:%{public}s", ns ? ns->ns_name : "NULL");
2092 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2093 struct dso *p = ns->ns_dsos->dsos[i];
2094 if (p->dev == st->st_dev && p->ino == st->st_ino && p->file_offset == file_offset) {
2095 LD_LOGD("search_dso_by_fstat found dev:%{public}lu, ino:%{public}lu, ns_name:%{public}s",
2096 st->st_dev, st->st_ino, ns ? ns->ns_name : "NULL");
2097 return p;
2098 }
2099 }
2100 return NULL;
2101 }
2102
app_has_same_name_so(const char * so_name,const ns_t * ns)2103 static inline int app_has_same_name_so(const char *so_name, const ns_t *ns)
2104 {
2105 int fd = -1;
2106 /* Only check system app. */
2107 if (((ns->flag & LOCAL_NS_PREFERED) != 0) && ns->lib_paths) {
2108 char tmp_buf[PATH_MAX + 1];
2109 fd = path_open(so_name, ns->lib_paths, tmp_buf, sizeof tmp_buf);
2110 }
2111 return fd;
2112 }
2113
2114 /* Find loaded so by name */
find_library_by_name(const char * name,const ns_t * ns,bool check_inherited)2115 static struct dso *find_library_by_name(const char *name, const ns_t *ns, bool check_inherited)
2116 {
2117 LD_LOGD("find_library_by_name name:%{public}s, ns_name:%{public}s, check_inherited:%{public}d",
2118 name,
2119 ns ? ns->ns_name : "NULL",
2120 !!check_inherited);
2121 struct dso *p = search_dso_by_name(name, ns);
2122 if (p) return p;
2123 if (check_inherited && ns->ns_inherits) {
2124 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2125 ns_inherit * inherit = ns->ns_inherits->inherits[i];
2126 p = search_dso_by_name(name, inherit->inherited_ns);
2127 if (p && is_sharable(inherit, name)) {
2128 if (app_has_same_name_so(name, ns) != -1) {
2129 return NULL;
2130 }
2131 return p;
2132 }
2133 }
2134 }
2135 return NULL;
2136 }
2137 /* Find loaded so by file stat */
find_library_by_fstat(const struct stat * st,const ns_t * ns,bool check_inherited,uint64_t file_offset)2138 UT_STATIC struct dso *find_library_by_fstat(const struct stat *st, const ns_t *ns, bool check_inherited, uint64_t file_offset) {
2139 LD_LOGD("find_library_by_fstat ns_name:%{public}s, check_inherited :%{public}d",
2140 ns ? ns->ns_name : "NULL",
2141 !!check_inherited);
2142 struct dso *p = search_dso_by_fstat(st, ns, file_offset);
2143 if (p) return p;
2144 if (check_inherited && ns->ns_inherits) {
2145 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2146 ns_inherit *inherit = ns->ns_inherits->inherits[i];
2147 p = search_dso_by_fstat(st, inherit->inherited_ns, file_offset);
2148 if (p && is_sharable(inherit, p->shortname)) return p;
2149 }
2150 }
2151 return NULL;
2152 }
2153
2154 #ifndef LOAD_ORDER_RANDOMIZATION
2155 /* add namespace function */
load_library(const char * name,struct dso * needed_by,ns_t * namespace,bool check_inherited,struct reserved_address_params * reserved_params)2156 struct dso *load_library(
2157 const char *name, struct dso *needed_by, ns_t *namespace, bool check_inherited, struct reserved_address_params *reserved_params)
2158 {
2159 char buf[PATH_MAX + 1];
2160 const char *pathname;
2161 unsigned char *map;
2162 struct dso *p, temp_dso = {0};
2163 int fd;
2164 struct stat st;
2165 size_t alloc_size;
2166 int n_th = 0;
2167 int is_self = 0;
2168
2169 if (!*name) {
2170 errno = EINVAL;
2171 return 0;
2172 }
2173
2174 /* Catch and block attempts to reload the implementation itself */
2175 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
2176 static const char reserved[] =
2177 "c.pthread.rt.m.dl.util.xnet.";
2178 const char *rp, *next;
2179 for (rp=reserved; *rp; rp=next) {
2180 next = strchr(rp, '.') + 1;
2181 if (strncmp(name+3, rp, next-rp) == 0)
2182 break;
2183 }
2184 if (*rp) {
2185 if (ldd_mode) {
2186 /* Track which names have been resolved
2187 * and only report each one once. */
2188 static unsigned reported;
2189 unsigned mask = 1U<<(rp-reserved);
2190 if (!(reported & mask)) {
2191 reported |= mask;
2192 dprintf(1, "\t%s => %s (%p)\n",
2193 name, ldso.name,
2194 ldso.base);
2195 }
2196 }
2197 is_self = 1;
2198 }
2199 }
2200 if (!strcmp(name, ldso.name)) is_self = 1;
2201 if (is_self) {
2202 if (!ldso.prev) {
2203 tail->next = &ldso;
2204 ldso.prev = tail;
2205 tail = &ldso;
2206 ldso.namespace = namespace;
2207 ns_add_dso(namespace, &ldso);
2208 }
2209 return &ldso;
2210 }
2211 if (strchr(name, '/')) {
2212 pathname = name;
2213
2214 if (!is_accessible(namespace, pathname, g_is_asan, check_inherited)) {
2215 fd = -1;
2216 LD_LOGD("load_library is_accessible return false,fd = -1");
2217 } else {
2218 fd = open(name, O_RDONLY|O_CLOEXEC);
2219 LD_LOGD("load_library is_accessible return true, open file fd:%{public}d .", fd);
2220 }
2221 } else {
2222 /* Search for the name to see if it's already loaded */
2223 /* Search in namespace */
2224 p = find_library_by_name(name, namespace, check_inherited);
2225 if (p) {
2226 LD_LOGD("load_library find_library_by_name found p, return it!");
2227 return p;
2228 }
2229 if (strlen(name) > NAME_MAX) {
2230 LD_LOGE("load_library name exceeding the maximum length, return 0!");
2231 return 0;
2232 }
2233 fd = -1;
2234 if (namespace->env_paths) fd = path_open(name, namespace->env_paths, buf, sizeof buf);
2235 for (p = needed_by; fd == -1 && p; p = p->needed_by) {
2236 if (fixup_rpath(p, buf, sizeof buf) < 0) {
2237 LD_LOGD("load_library Inhibit further search,fd = -2.");
2238 fd = -2; /* Inhibit further search. */
2239 }
2240 if (p->rpath) {
2241 fd = path_open(name, p->rpath, buf, sizeof buf);
2242 LD_LOGD("load_library p->rpath path_open fd:%{public}d.", fd);
2243 }
2244
2245 }
2246 if (g_is_asan) {
2247 fd = handle_asan_path_open(fd, name, namespace, buf, sizeof buf);
2248 LD_LOGD("load_library handle_asan_path_open fd:%{public}d.", fd);
2249 } else {
2250 if (fd == -1 && namespace->lib_paths) {
2251 fd = path_open(name, namespace->lib_paths, buf, sizeof buf);
2252 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", fd);
2253 }
2254 }
2255 pathname = buf;
2256 LD_LOGD("load_library lib_paths pathname:%{public}s.", pathname);
2257 }
2258 if (fd < 0) {
2259 if (!check_inherited || !namespace->ns_inherits) return 0;
2260 /* Load lib in inherited namespace. Do not check inherited again.*/
2261 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
2262 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
2263 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) continue;
2264 p = load_library(name, needed_by, inherit->inherited_ns, false, reserved_params);
2265 if (p) {
2266 LD_LOGD("load_library search in inherited, found p ,inherited_ns name:%{public}s",
2267 inherit->inherited_ns->ns_name);
2268 return p;
2269 }
2270 }
2271 return 0;
2272 }
2273 if (fstat(fd, &st) < 0) {
2274 close(fd);
2275 LD_LOGE("load_library fstat < 0,return 0!");
2276 return 0;
2277 }
2278 /* Search in namespace */
2279 p = find_library_by_fstat(&st, namespace, check_inherited, 0);
2280 if (p) {
2281 /* If this library was previously loaded with a
2282 * pathname but a search found the same inode,
2283 * setup its shortname so it can be found by name. */
2284 if (!p->shortname && pathname != name)
2285 p->shortname = strrchr(p->name, '/')+1;
2286 close(fd);
2287 LD_LOGD("load_library find_library_by_fstat, found p and return it!");
2288 return p;
2289 }
2290 map = noload ? 0 : map_library(fd, &temp_dso, reserved_params);
2291 close(fd);
2292 if (!map) return 0;
2293
2294 /* Avoid the danger of getting two versions of libc mapped into the
2295 * same process when an absolute pathname was used. The symbols
2296 * checked are chosen to catch both musl and glibc, and to avoid
2297 * false positives from interposition-hack libraries. */
2298 decode_dyn(&temp_dso);
2299 if (find_sym(&temp_dso, "__libc_start_main", 1).sym &&
2300 find_sym(&temp_dso, "stdin", 1).sym) {
2301 unmap_library(&temp_dso);
2302 return load_library("libc.so", needed_by, namespace, true, reserved_params);
2303 }
2304 /* Past this point, if we haven't reached runtime yet, ldso has
2305 * committed either to use the mapped library or to abort execution.
2306 * Unmapping is not possible, so we can safely reclaim gaps. */
2307 if (!runtime) reclaim_gaps(&temp_dso);
2308
2309 /* Allocate storage for the new DSO. When there is TLS, this
2310 * storage must include a reservation for all pre-existing
2311 * threads to obtain copies of both the new TLS, and an
2312 * extended DTV capable of storing an additional slot for
2313 * the newly-loaded DSO. */
2314 alloc_size = sizeof *p + strlen(pathname) + 1;
2315 if (runtime && temp_dso.tls.image) {
2316 size_t per_th = temp_dso.tls.size + temp_dso.tls.align
2317 + sizeof(void *) * (tls_cnt+3);
2318 n_th = libc.threads_minus_1 + 1;
2319 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
2320 else alloc_size += n_th * per_th;
2321 }
2322 p = calloc(1, alloc_size);
2323 if (!p) {
2324 unmap_library(&temp_dso);
2325 return 0;
2326 }
2327 memcpy(p, &temp_dso, sizeof temp_dso);
2328 p->dev = st.st_dev;
2329 p->ino = st.st_ino;
2330 p->needed_by = needed_by;
2331 p->name = p->buf;
2332 p->runtime_loaded = runtime;
2333 strcpy(p->name, pathname);
2334 /* Add a shortname only if name arg was not an explicit pathname. */
2335 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
2336 if (p->tls.image) {
2337 p->tls_id = ++tls_cnt;
2338 tls_align = MAXP2(tls_align, p->tls.align);
2339 #ifdef TLS_ABOVE_TP
2340 p->tls.offset = tls_offset + ( (p->tls.align-1) &
2341 (-tls_offset + (uintptr_t)p->tls.image) );
2342 tls_offset = p->tls.offset + p->tls.size;
2343 #else
2344 tls_offset += p->tls.size + p->tls.align - 1;
2345 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
2346 & (p->tls.align-1);
2347 p->tls.offset = tls_offset;
2348 #endif
2349 p->new_dtv = (void *)(-sizeof(size_t) &
2350 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
2351 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
2352 if (tls_tail) tls_tail->next = &p->tls;
2353 else libc.tls_head = &p->tls;
2354 tls_tail = &p->tls;
2355 }
2356
2357 tail->next = p;
2358 p->prev = tail;
2359 tail = p;
2360
2361 /* Add dso to namespace */
2362 p->namespace = namespace;
2363 ns_add_dso(namespace, p);
2364 if (runtime)
2365 p->by_dlopen = 1;
2366
2367 if (DL_FDPIC) makefuncdescs(p);
2368
2369 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
2370
2371 return p;
2372 }
2373
load_direct_deps(struct dso * p,ns_t * namespace,struct reserved_address_params * reserved_params)2374 static void load_direct_deps(struct dso *p, ns_t *namespace, struct reserved_address_params *reserved_params)
2375 {
2376 size_t i, cnt=0;
2377
2378 if (p->deps) return;
2379 /* For head, all preloads are direct pseudo-dependencies.
2380 * Count and include them now to avoid realloc later. */
2381 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2382 cnt++;
2383 for (i=0; p->dynv[i]; i+=2)
2384 if (p->dynv[i] == DT_NEEDED) cnt++;
2385 /* Use builtin buffer for apps with no external deps, to
2386 * preserve property of no runtime failure paths. */
2387 p->deps = (p==head && cnt<2) ? builtin_deps :
2388 calloc(cnt+1, sizeof *p->deps);
2389 if (!p->deps) {
2390 error("Error loading dependencies for %s", p->name);
2391 if (runtime) longjmp(*rtld_fail, 1);
2392 }
2393 cnt=0;
2394 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2395 p->deps[cnt++] = q;
2396 for (i=0; p->dynv[i]; i+=2) {
2397 if (p->dynv[i] != DT_NEEDED) continue;
2398 struct dso *dep = load_library(p->strings + p->dynv[i + 1], p, namespace, true, reserved_params);
2399 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
2400 if (!dep) {
2401 error("Error loading shared library %s: %m (needed by %s)",
2402 p->strings + p->dynv[i+1], p->name);
2403 if (runtime) longjmp(*rtld_fail, 1);
2404 continue;
2405 }
2406 p->deps[cnt++] = dep;
2407 }
2408 p->deps[cnt] = 0;
2409 p->ndeps_direct = cnt;
2410 for (i = 0; i < p->ndeps_direct; i++) {
2411 add_dso_parent(p->deps[i], p);
2412 }
2413 }
2414
load_deps(struct dso * p,struct reserved_address_params * reserved_params)2415 static void load_deps(struct dso *p, struct reserved_address_params *reserved_params)
2416 {
2417 if (p->deps) return;
2418 for (; p; p = p->next)
2419 load_direct_deps(p, p->namespace, reserved_params);
2420 }
2421 #endif
2422
extend_bfs_deps(struct dso * p,bool to_deps_all)2423 static void extend_bfs_deps(struct dso *p, bool to_deps_all)
2424 {
2425 size_t i, j, cnt, ndeps_all;
2426 struct dso **tmp;
2427
2428 /* Can't use realloc if the original p->deps was allocated at
2429 * program entry and malloc has been replaced, or if it's
2430 * the builtin non-allocated trivial main program deps array. */
2431 int no_realloc = (__malloc_replaced && !p->runtime_loaded)
2432 || p->deps == builtin_deps;
2433
2434 if (p->bfs_built) return;
2435 if (to_deps_all && p->deps_all_built) {
2436 return;
2437 }
2438
2439 ndeps_all = p->ndeps_direct;
2440 if (to_deps_all) {
2441 // Use one more because the last one of the deps is NULL.
2442 p->deps_all = calloc(ndeps_all + 1, sizeof *p->deps);
2443 }
2444
2445 /* Mark existing (direct) deps so they won't be duplicated. */
2446 for (i=0; p->deps[i]; i++) {
2447 if (to_deps_all) {
2448 p->deps_all[i] = p->deps[i];
2449 }
2450 p->deps[i]->mark = 1;
2451 }
2452
2453 /* For each dependency already in the list, copy its list of direct
2454 * dependencies to the list, excluding any items already in the
2455 * list. Note that the list this loop iterates over will grow during
2456 * the loop, but since duplicates are excluded, growth is bounded. */
2457 if (to_deps_all) {
2458 for (i=0; p->deps_all[i]; i++) {
2459 struct dso *dep = p->deps_all[i];
2460 for (j=cnt=0; j<dep->ndeps_direct; j++)
2461 if (!dep->deps[j]->mark) cnt++;
2462 tmp = no_realloc ?
2463 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2464 realloc(p->deps_all, sizeof(*tmp) * (ndeps_all+cnt+1));
2465 if (!tmp) {
2466 error("Error recording dependencies for %s", p->name);
2467 if (runtime) longjmp(*rtld_fail, 1);
2468 continue;
2469 }
2470 if (no_realloc) {
2471 memcpy(tmp, p->deps_all, sizeof(*tmp) * (ndeps_all+1));
2472 no_realloc = 0;
2473 }
2474 p->deps_all = tmp;
2475 for (j=0; j<dep->ndeps_direct; j++) {
2476 if (dep->deps[j]->mark) continue;
2477 dep->deps[j]->mark = 1;
2478 p->deps_all[ndeps_all++] = dep->deps[j];
2479 }
2480 p->deps_all[ndeps_all] = 0;
2481 }
2482 p->deps_all_built = 1;
2483 } else {
2484 for (i=0; p->deps[i]; i++) {
2485 struct dso *dep = p->deps[i];
2486 for (j=cnt=0; j<dep->ndeps_direct; j++)
2487 if (!dep->deps[j]->mark) cnt++;
2488 tmp = no_realloc ?
2489 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2490 realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1));
2491 if (!tmp) {
2492 error("Error recording dependencies for %s", p->name);
2493 if (runtime) longjmp(*rtld_fail, 1);
2494 continue;
2495 }
2496 if (no_realloc) {
2497 memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1));
2498 no_realloc = 0;
2499 }
2500 p->deps = tmp;
2501 for (j=0; j<dep->ndeps_direct; j++) {
2502 if (dep->deps[j]->mark) continue;
2503 dep->deps[j]->mark = 1;
2504 p->deps[ndeps_all++] = dep->deps[j];
2505 }
2506 p->deps[ndeps_all] = 0;
2507 }
2508 p->bfs_built = 1;
2509 }
2510 for (p=head; p; p=p->next)
2511 p->mark = 0;
2512 }
2513
2514 #ifndef LOAD_ORDER_RANDOMIZATION
load_preload(char * s,ns_t * ns)2515 static void load_preload(char *s, ns_t *ns)
2516 {
2517 int tmp;
2518 char *z;
2519 for (z=s; *z; s=z) {
2520 for ( ; *s && (isspace(*s) || *s==':'); s++);
2521 for (z=s; *z && !isspace(*z) && *z!=':'; z++);
2522 tmp = *z;
2523 *z = 0;
2524 load_library(s, 0, ns, true, NULL);
2525 *z = tmp;
2526 }
2527 }
2528 #endif
2529
add_syms(struct dso * p)2530 static void add_syms(struct dso *p)
2531 {
2532 if (!p->syms_next && syms_tail != p) {
2533 syms_tail->syms_next = p;
2534 syms_tail = p;
2535 }
2536 }
2537
revert_syms(struct dso * old_tail)2538 static void revert_syms(struct dso *old_tail)
2539 {
2540 struct dso *p, *next;
2541 /* Chop off the tail of the list of dsos that participate in
2542 * the global symbol table, reverting them to RTLD_LOCAL. */
2543 for (p=old_tail; p; p=next) {
2544 next = p->syms_next;
2545 p->syms_next = 0;
2546 }
2547 syms_tail = old_tail;
2548 }
2549
do_mips_relocs(struct dso * p,size_t * got)2550 static void do_mips_relocs(struct dso *p, size_t *got)
2551 {
2552 size_t i, j, rel[2];
2553 unsigned char *base = p->base;
2554 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO);
2555 if (p==&ldso) {
2556 got += i;
2557 } else {
2558 while (i--) *got++ += (size_t)base;
2559 }
2560 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
2561 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
2562 Sym *sym = p->syms + j;
2563 rel[0] = (unsigned char *)got - base;
2564 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
2565 rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT);
2566 do_relocs(p, rel, sizeof rel, 2);
2567 }
2568 }
2569
sleb128_decoder(uint8_t * current,uint8_t * end,size_t * value)2570 static uint8_t* sleb128_decoder(uint8_t* current, uint8_t* end, size_t* value)
2571 {
2572 size_t result = 0;
2573 static const size_t size = CHAR_BIT * sizeof(result);
2574
2575 size_t shift = 0;
2576 uint8_t byte;
2577
2578 do {
2579 if (current >= end) {
2580 a_crash();
2581 }
2582
2583 byte = *current++;
2584 result |= ((size_t)(byte & 127) << shift);
2585 shift += 7;
2586 } while (byte & 128);
2587
2588 if (shift < size && (byte & 64)) {
2589 result |= -((size_t)(1) << shift);
2590 }
2591
2592 *value = result;
2593
2594 return current;
2595 }
2596
do_android_relocs(struct dso * p,size_t dt_name,size_t dt_size)2597 static void do_android_relocs(struct dso *p, size_t dt_name, size_t dt_size)
2598 {
2599 size_t android_rel_addr = 0, android_rel_size = 0;
2600 uint8_t *android_rel_curr, *android_rel_end;
2601
2602 search_vec(p->dynv, &android_rel_addr, dt_name);
2603 search_vec(p->dynv, &android_rel_size, dt_size);
2604
2605 if (!android_rel_addr || (android_rel_size < 4)) {
2606 return;
2607 }
2608
2609 android_rel_curr = laddr(p, android_rel_addr);
2610 if (memcmp(android_rel_curr, "APS2", ANDROID_REL_SIGN_SIZE)) {
2611 return;
2612 }
2613
2614 android_rel_curr += ANDROID_REL_SIGN_SIZE;
2615 android_rel_size -= ANDROID_REL_SIGN_SIZE;
2616
2617 android_rel_end = android_rel_curr + android_rel_size;
2618
2619 size_t relocs_num;
2620 size_t rel[3] = {0};
2621
2622 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &relocs_num);
2623 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[0]);
2624
2625 for (size_t i = 0; i < relocs_num;) {
2626
2627 size_t group_size, group_flags;
2628
2629 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_size);
2630 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_flags);
2631
2632 size_t group_r_offset_delta = 0;
2633
2634 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2635 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_r_offset_delta);
2636 }
2637
2638 if (group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) {
2639 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2640 }
2641
2642 const size_t addend_flags = group_flags & (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG);
2643
2644 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2645 } else if (addend_flags == (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG)) {
2646 size_t addend;
2647 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2648 rel[2] += addend;
2649 } else {
2650 rel[2] = 0;
2651 }
2652
2653 for (size_t j = 0; j < group_size; j++) {
2654 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2655 rel[0] += group_r_offset_delta;
2656 } else {
2657 size_t offset_detla;
2658 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &offset_detla);
2659
2660 rel[0] += offset_detla;
2661 }
2662
2663 if ((group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) == 0) {
2664 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2665 }
2666
2667 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2668 size_t addend;
2669 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2670 rel[2] += addend;
2671 }
2672
2673 if (dt_name == DT_ANDROID_REL) {
2674 do_relocs(p, rel, sizeof(size_t) * 2, 2);
2675 } else {
2676 do_relocs(p, rel, sizeof(size_t) * 3, 3);
2677 }
2678 }
2679
2680 i += group_size;
2681 }
2682 }
2683
do_relr_relocs(struct dso * dso,size_t * relr,size_t relr_size)2684 static void do_relr_relocs(struct dso *dso, size_t *relr, size_t relr_size)
2685 {
2686 if (dso == &ldso) return; /* self-relocation was done in _dlstart */
2687 unsigned char *base = dso->base;
2688 size_t *reloc_addr;
2689 for (; relr_size; relr++, relr_size -= sizeof(size_t))
2690 if ((relr[0] & 1) == 0) {
2691 reloc_addr = laddr(dso, relr[0]);
2692 *reloc_addr++ += (size_t)base;
2693 } else {
2694 int i = 0;
2695 for (size_t bitmap = relr[0]; (bitmap >>= 1); i++)
2696 if (bitmap & 1)
2697 reloc_addr[i] += (size_t)base;
2698 reloc_addr += 8 * sizeof(size_t) - 1;
2699 }
2700 }
2701
reloc_all(struct dso * p,const dl_extinfo * extinfo)2702 static void reloc_all(struct dso *p, const dl_extinfo *extinfo)
2703 {
2704 ssize_t relro_fd_offset = 0;
2705 size_t dyn[DYN_CNT];
2706 for (; p; p=p->next) {
2707 if (p->relocated) continue;
2708 if (p != &ldso) {
2709 add_can_search_so_list_in_dso(p, head);
2710 }
2711 decode_vec(p->dynv, dyn, DYN_CNT);
2712 if (NEED_MIPS_GOT_RELOCS)
2713 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT]));
2714 do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
2715 2+(dyn[DT_PLTREL]==DT_RELA));
2716 do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2);
2717 do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3);
2718 if (!DL_FDPIC)
2719 do_relr_relocs(p, laddr(p, dyn[DT_RELR]), dyn[DT_RELRSZ]);
2720
2721 do_android_relocs(p, DT_ANDROID_REL, DT_ANDROID_RELSZ);
2722 do_android_relocs(p, DT_ANDROID_RELA, DT_ANDROID_RELASZ);
2723
2724 if (head != &ldso && p->relro_start != p->relro_end &&
2725 mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ)
2726 && errno != ENOSYS) {
2727 error("Error relocating %s: RELRO protection failed: %m",
2728 p->name);
2729 if (runtime) longjmp(*rtld_fail, 1);
2730 }
2731 /* Handle serializing/mapping the RELRO segment */
2732 handle_relro_sharing(p, extinfo, &relro_fd_offset);
2733
2734 p->relocated = 1;
2735 free_reloc_can_search_dso(p);
2736 }
2737 }
2738
kernel_mapped_dso(struct dso * p)2739 static void kernel_mapped_dso(struct dso *p)
2740 {
2741 size_t min_addr = -1, max_addr = 0, cnt;
2742 Phdr *ph = p->phdr;
2743 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
2744 if (ph->p_type == PT_DYNAMIC) {
2745 p->dynv = laddr(p, ph->p_vaddr);
2746 } else if (ph->p_type == PT_GNU_RELRO) {
2747 p->relro_start = ph->p_vaddr & -PAGE_SIZE;
2748 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
2749 } else if (ph->p_type == PT_GNU_STACK) {
2750 if (!runtime && ph->p_memsz > __default_stacksize) {
2751 __default_stacksize =
2752 ph->p_memsz < DEFAULT_STACK_MAX ?
2753 ph->p_memsz : DEFAULT_STACK_MAX;
2754 }
2755 }
2756 if (ph->p_type != PT_LOAD) continue;
2757 if (ph->p_vaddr < min_addr)
2758 min_addr = ph->p_vaddr;
2759 if (ph->p_vaddr+ph->p_memsz > max_addr)
2760 max_addr = ph->p_vaddr+ph->p_memsz;
2761 }
2762 min_addr &= -PAGE_SIZE;
2763 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
2764 p->map = p->base + min_addr;
2765 p->map_len = max_addr - min_addr;
2766 p->kernel_mapped = 1;
2767 }
2768
__libc_exit_fini()2769 void __libc_exit_fini()
2770 {
2771 struct dso *p;
2772 size_t dyn[DYN_CNT];
2773 pthread_t self = __pthread_self();
2774
2775 /* Take both locks before setting shutting_down, so that
2776 * either lock is sufficient to read its value. The lock
2777 * order matches that in dlopen to avoid deadlock. */
2778 pthread_rwlock_wrlock(&lock);
2779 pthread_mutex_lock(&init_fini_lock);
2780 shutting_down = 1;
2781 pthread_rwlock_unlock(&lock);
2782 for (p=fini_head; p; p=p->fini_next) {
2783 while (p->ctor_visitor && p->ctor_visitor!=self)
2784 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2785 if (!p->constructed) continue;
2786 decode_vec(p->dynv, dyn, DYN_CNT);
2787 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
2788 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
2789 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n;
2790 while (n--) ((void (*)(void))*--fn)();
2791 }
2792 #ifndef NO_LEGACY_INITFINI
2793 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
2794 fpaddr(p, dyn[DT_FINI])();
2795 #endif
2796 }
2797 }
2798
__pthread_mutex_unlock_atfork(int who)2799 void __pthread_mutex_unlock_atfork(int who)
2800 {
2801 if (who == 0) {
2802 // If a multithread process lock dlclose_lock and call fork,
2803 // dlclose_lock will never unlock before child process call execve.
2804 // so reset dlclose_lock to make sure child process can call dlclose after fork
2805 __pthread_mutex_unlock_recursive_inner(&dlclose_lock);
2806 }
2807 }
2808
__ldso_atfork(int who)2809 void __ldso_atfork(int who)
2810 {
2811 if (who<0) {
2812 pthread_rwlock_wrlock(&lock);
2813 pthread_mutex_lock(&init_fini_lock);
2814 } else {
2815 pthread_mutex_unlock(&init_fini_lock);
2816 pthread_rwlock_unlock(&lock);
2817 }
2818 }
2819
queue_ctors(struct dso * dso)2820 static struct dso **queue_ctors(struct dso *dso)
2821 {
2822 size_t cnt, qpos, spos, i;
2823 struct dso *p, **queue, **stack;
2824
2825 if (ldd_mode) return 0;
2826
2827 /* Bound on queue size is the total number of indirect deps.
2828 * If a bfs deps list was built, we can use it. Otherwise,
2829 * bound by the total number of DSOs, which is always safe and
2830 * is reasonable we use it (for main app at startup). */
2831 if (dso->bfs_built) {
2832 for (cnt=0; dso->deps[cnt]; cnt++)
2833 dso->deps[cnt]->mark = 0;
2834 cnt++; /* self, not included in deps */
2835 } else {
2836 for (cnt=0, p=head; p; cnt++, p=p->next)
2837 p->mark = 0;
2838 }
2839 cnt++; /* termination slot */
2840 if (dso==head && cnt <= countof(builtin_ctor_queue))
2841 queue = builtin_ctor_queue;
2842 else
2843 queue = calloc(cnt, sizeof *queue);
2844
2845 if (!queue) {
2846 error("Error allocating constructor queue: %m\n");
2847 if (runtime) longjmp(*rtld_fail, 1);
2848 return 0;
2849 }
2850
2851 /* Opposite ends of the allocated buffer serve as an output queue
2852 * and a working stack. Setup initial stack with just the argument
2853 * dso and initial queue empty... */
2854 stack = queue;
2855 qpos = 0;
2856 spos = cnt;
2857 stack[--spos] = dso;
2858 dso->next_dep = 0;
2859 dso->mark = 1;
2860
2861 /* Then perform pseudo-DFS sort, but ignoring circular deps. */
2862 while (spos<cnt) {
2863 p = stack[spos++];
2864 while (p->next_dep < p->ndeps_direct) {
2865 if (p->deps[p->next_dep]->mark) {
2866 p->next_dep++;
2867 } else {
2868 stack[--spos] = p;
2869 p = p->deps[p->next_dep];
2870 p->next_dep = 0;
2871 p->mark = 1;
2872 }
2873 }
2874 queue[qpos++] = p;
2875 }
2876 queue[qpos] = 0;
2877 for (i=0; i<qpos; i++) queue[i]->mark = 0;
2878
2879 return queue;
2880 }
2881
do_init_fini(struct dso ** queue)2882 static void do_init_fini(struct dso **queue)
2883 {
2884 struct dso *p;
2885 size_t dyn[DYN_CNT], i;
2886 pthread_t self = __pthread_self();
2887
2888 pthread_mutex_lock(&init_fini_lock);
2889 for (i=0; (p=queue[i]); i++) {
2890 while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down)
2891 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2892 if (p->ctor_visitor || p->constructed)
2893 continue;
2894 p->ctor_visitor = self;
2895
2896 decode_vec(p->dynv, dyn, DYN_CNT);
2897 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
2898 p->fini_next = fini_head;
2899 fini_head = p;
2900 }
2901
2902 pthread_mutex_unlock(&init_fini_lock);
2903
2904 #ifndef NO_LEGACY_INITFINI
2905 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
2906 fpaddr(p, dyn[DT_INIT])();
2907 #endif
2908 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
2909 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
2910 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]);
2911 if (p != &ldso) {
2912 trace_marker_begin(HITRACE_TAG_MUSL, "calling constructors: ", p->name);
2913 }
2914 while (n--) ((void (*)(void))*fn++)();
2915 if (p != &ldso) {
2916 trace_marker_end(HITRACE_TAG_MUSL);
2917 }
2918 }
2919
2920 pthread_mutex_lock(&init_fini_lock);
2921 p->ctor_visitor = 0;
2922 p->constructed = 1;
2923 pthread_cond_broadcast(&ctor_cond);
2924 }
2925 pthread_mutex_unlock(&init_fini_lock);
2926 }
2927
__libc_start_init(void)2928 void __libc_start_init(void)
2929 {
2930 do_init_fini(main_ctor_queue);
2931 if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
2932 free(main_ctor_queue);
2933 main_ctor_queue = 0;
2934 }
2935
dl_debug_state(void)2936 static void dl_debug_state(void)
2937 {
2938 }
2939
2940 weak_alias(dl_debug_state, _dl_debug_state);
2941
__init_tls(size_t * auxv)2942 void __init_tls(size_t *auxv)
2943 {
2944 }
2945
update_tls_size()2946 static void update_tls_size()
2947 {
2948 libc.tls_cnt = tls_cnt;
2949 libc.tls_align = tls_align;
2950 libc.tls_size = ALIGN(
2951 (1+tls_cnt) * sizeof(void *) +
2952 tls_offset +
2953 sizeof(struct pthread) +
2954 tls_align * 2,
2955 tls_align);
2956 }
2957
install_new_tls(void)2958 static void install_new_tls(void)
2959 {
2960 sigset_t set;
2961 pthread_t self = __pthread_self(), td;
2962 struct dso *dtv_provider = container_of(tls_tail, struct dso, tls);
2963 uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv;
2964 struct dso *p;
2965 size_t i, j;
2966 size_t old_cnt = self->dtv[0];
2967
2968 __block_app_sigs(&set);
2969 __tl_lock();
2970 if (get_tl_lock_caller_count()) {
2971 get_tl_lock_caller_count()->install_new_tls_tl_lock++;
2972 }
2973 /* Copy existing dtv contents from all existing threads. */
2974 for (i=0, td=self; !i || td!=self; i++, td=td->next) {
2975 memcpy(newdtv+i, td->dtv,
2976 (old_cnt+1)*sizeof(uintptr_t));
2977 newdtv[i][0] = tls_cnt;
2978 }
2979 /* Install new dtls into the enlarged, uninstalled dtv copies. */
2980 for (p=head; ; p=p->next) {
2981 if (p->tls_id <= old_cnt) continue;
2982 unsigned char *mem = p->new_tls;
2983 for (j=0; j<i; j++) {
2984 unsigned char *new = mem;
2985 new += ((uintptr_t)p->tls.image - (uintptr_t)mem)
2986 & (p->tls.align-1);
2987 memcpy(new, p->tls.image, p->tls.len);
2988 newdtv[j][p->tls_id] =
2989 (uintptr_t)new + DTP_OFFSET;
2990 mem += p->tls.size + p->tls.align;
2991 }
2992 if (p->tls_id == tls_cnt) break;
2993 }
2994
2995 /* Broadcast barrier to ensure contents of new dtv is visible
2996 * if the new dtv pointer is. The __membarrier function has a
2997 * fallback emulation using signals for kernels that lack the
2998 * feature at the syscall level. */
2999
3000 __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
3001
3002 /* Install new dtv for each thread. */
3003 for (j=0, td=self; !j || td!=self; j++, td=td->next) {
3004 td->dtv = newdtv[j];
3005 }
3006
3007 if (get_tl_lock_caller_count()) {
3008 get_tl_lock_caller_count()->install_new_tls_tl_lock--;
3009 }
3010 __tl_unlock();
3011 __restore_sigs(&set);
3012 }
3013
3014 /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
3015 * following stage 2 and stage 3 functions via primitive symbolic lookup
3016 * since it does not have access to their addresses to begin with. */
3017
3018 /* Stage 2 of the dynamic linker is called after relative relocations
3019 * have been processed. It can make function calls to static functions
3020 * and access string literals and static data, but cannot use extern
3021 * symbols. Its job is to perform symbolic relocations on the dynamic
3022 * linker itself, but some of the relocations performed may need to be
3023 * replaced later due to copy relocations in the main program. */
3024
__dls2(unsigned char * base,size_t * sp)3025 hidden void __dls2(unsigned char *base, size_t *sp)
3026 {
3027 size_t *auxv;
3028 for (auxv=sp+1+*sp+1; *auxv; auxv++);
3029 auxv++;
3030 if (DL_FDPIC) {
3031 void *p1 = (void *)sp[-2];
3032 void *p2 = (void *)sp[-1];
3033 if (!p1) {
3034 size_t aux[AUX_CNT];
3035 decode_vec(auxv, aux, AUX_CNT);
3036 if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE];
3037 else ldso.base = (void *)(aux[AT_PHDR] & -4096);
3038 }
3039 app_loadmap = p2 ? p1 : 0;
3040 ldso.loadmap = p2 ? p2 : p1;
3041 ldso.base = laddr(&ldso, 0);
3042 } else {
3043 ldso.base = base;
3044 }
3045 size_t aux[AUX_CNT];
3046 decode_vec(auxv, aux, AUX_CNT);
3047 libc.page_size = aux[AT_PAGESZ];
3048 Ehdr *ehdr = __ehdr_start ? (void *)__ehdr_start : (void *)ldso.base;
3049 ldso.name = ldso.shortname = "libc.so";
3050 ldso.phnum = ehdr->e_phnum;
3051 ldso.phdr = laddr(&ldso, ehdr->e_phoff);
3052 ldso.phentsize = ehdr->e_phentsize;
3053 ldso.is_global = true;
3054 search_vec(auxv, &ldso_page_size, AT_PAGESZ);
3055 kernel_mapped_dso(&ldso);
3056 decode_dyn(&ldso);
3057
3058 if (DL_FDPIC) makefuncdescs(&ldso);
3059
3060 /* Prepare storage for to save clobbered REL addends so they
3061 * can be reused in stage 3. There should be very few. If
3062 * something goes wrong and there are a huge number, abort
3063 * instead of risking stack overflow. */
3064 size_t dyn[DYN_CNT];
3065 decode_vec(ldso.dynv, dyn, DYN_CNT);
3066 size_t *rel = laddr(&ldso, dyn[DT_REL]);
3067 size_t rel_size = dyn[DT_RELSZ];
3068 size_t symbolic_rel_cnt = 0;
3069 apply_addends_to = rel;
3070 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
3071 if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++;
3072 if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash();
3073 size_t addends[symbolic_rel_cnt+1];
3074 saved_addends = addends;
3075
3076 head = &ldso;
3077 reloc_all(&ldso, NULL);
3078
3079 ldso.relocated = 0;
3080
3081 /* Call dynamic linker stage-2b, __dls2b, looking it up
3082 * symbolically as a barrier against moving the address
3083 * load across the above relocation processing. */
3084 struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0);
3085 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp, auxv, aux);
3086 else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp, auxv, aux);
3087 }
3088
3089 /* Stage 2b sets up a valid thread pointer, which requires relocations
3090 * completed in stage 2, and on which stage 3 is permitted to depend.
3091 * This is done as a separate stage, with symbolic lookup as a barrier,
3092 * so that loads of the thread pointer and &errno can be pure/const and
3093 * thereby hoistable. */
3094
__dls2b(size_t * sp,size_t * auxv,size_t * aux)3095 void __dls2b(size_t *sp, size_t *auxv, size_t *aux)
3096 {
3097 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
3098 * use during dynamic linking. If possible it will also serve as the
3099 * thread pointer at runtime. */
3100 search_vec(auxv, &__hwcap, AT_HWCAP);
3101 libc.auxv = auxv;
3102 libc.tls_size = sizeof builtin_tls;
3103 libc.tls_align = tls_align;
3104 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
3105 a_crash();
3106 }
3107 __pthread_self()->stack = (void *)(sp + 1);
3108 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
3109 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp, auxv, aux);
3110 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp, auxv, aux);
3111 }
3112
3113 /* Stage 3 of the dynamic linker is called with the dynamic linker/libc
3114 * fully functional. Its job is to load (if not already loaded) and
3115 * process dependencies and relocations for the main application and
3116 * transfer control to its entry point. */
3117
__dls3(size_t * sp,size_t * auxv,size_t * aux)3118 void __dls3(size_t *sp, size_t *auxv, size_t *aux)
3119 {
3120 static struct dso app, vdso;
3121 size_t i;
3122 char *env_preload=0;
3123 char *replace_argv0=0;
3124 size_t vdso_base;
3125 int argc = *sp;
3126 char **argv = (void *)(sp+1);
3127 char **argv_orig = argv;
3128 char **envp = argv+argc+1;
3129
3130 /* Find aux vector just past environ[] and use it to initialize
3131 * global data that may be needed before we can make syscalls. */
3132 __environ = envp;
3133 search_vec(auxv, &__sysinfo, AT_SYSINFO);
3134 __pthread_self()->sysinfo = __sysinfo;
3135 libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
3136 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]);
3137
3138 /* Only trust user/env if kernel says we're not suid/sgid */
3139 if (!libc.secure) {
3140 env_path = getenv("LD_LIBRARY_PATH");
3141 env_preload = getenv("LD_PRELOAD");
3142 }
3143
3144 /* Activate error handler function */
3145 error = error_impl;
3146
3147 #ifdef OHOS_ENABLE_PARAMETER
3148 InitParameterClient();
3149 #endif
3150 // we may abort when linking other libs, load signal handler before stage start
3151 #ifdef DFX_SIGNAL_LIBC
3152 DFX_InstallSignalHandler();
3153 #endif
3154 InitHilogSocketFd();
3155 __init_fdsan();
3156 InitTimeZoneParam();
3157 /* If the main program was already loaded by the kernel,
3158 * AT_PHDR will point to some location other than the dynamic
3159 * linker's program headers. */
3160 if (aux[AT_PHDR] != (size_t)ldso.phdr) {
3161 size_t interp_off = 0;
3162 size_t tls_image = 0;
3163 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
3164 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR];
3165 app.phnum = aux[AT_PHNUM];
3166 app.phentsize = aux[AT_PHENT];
3167 for (i = aux[AT_PHNUM]; i; i--, phdr = (void *)((char *)phdr + aux[AT_PHENT])) {
3168 if (phdr->p_type == PT_PHDR)
3169 app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
3170 else if (phdr->p_type == PT_INTERP)
3171 interp_off = (size_t)phdr->p_vaddr;
3172 else if (phdr->p_type == PT_TLS) {
3173 tls_image = phdr->p_vaddr;
3174 app.tls.len = phdr->p_filesz;
3175 app.tls.size = phdr->p_memsz;
3176 app.tls.align = phdr->p_align;
3177 }
3178 }
3179 if (DL_FDPIC) app.loadmap = app_loadmap;
3180 if (app.tls.size) app.tls.image = laddr(&app, tls_image);
3181 if (interp_off) ldso.name = laddr(&app, interp_off);
3182 if ((aux[0] & (1UL<<AT_EXECFN))
3183 && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
3184 app.name = (char *)aux[AT_EXECFN];
3185 else
3186 app.name = argv[0];
3187 kernel_mapped_dso(&app);
3188 } else {
3189 int fd;
3190 char *ldname = argv[0];
3191 size_t l = strlen(ldname);
3192 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
3193 argv++;
3194 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
3195 char *opt = argv[0]+2;
3196 *argv++ = (void *)-1;
3197 if (!*opt) {
3198 break;
3199 } else if (!memcmp(opt, "list", 5)) {
3200 ldd_mode = 1;
3201 } else if (!memcmp(opt, "library-path", 12)) {
3202 if (opt[12]=='=') env_path = opt+13;
3203 else if (opt[12]) *argv = 0;
3204 else if (*argv) env_path = *argv++;
3205 } else if (!memcmp(opt, "preload", 7)) {
3206 if (opt[7]=='=') env_preload = opt+8;
3207 else if (opt[7]) *argv = 0;
3208 else if (*argv) env_preload = *argv++;
3209 } else if (!memcmp(opt, "argv0", 5)) {
3210 if (opt[5]=='=') replace_argv0 = opt+6;
3211 else if (opt[5]) *argv = 0;
3212 else if (*argv) replace_argv0 = *argv++;
3213 } else {
3214 argv[0] = 0;
3215 }
3216 }
3217 argv[-1] = (void *)(argc - (argv-argv_orig));
3218 if (!argv[0]) {
3219 dprintf(2, "musl libc (" LDSO_ARCH ")\n"
3220 "Version %s\n"
3221 "Dynamic Program Loader\n"
3222 "Usage: %s [options] [--] pathname%s\n",
3223 __libc_version, ldname,
3224 ldd_mode ? "" : " [args]");
3225 _exit(1);
3226 }
3227 fd = open(argv[0], O_RDONLY);
3228 if (fd < 0) {
3229 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
3230 _exit(1);
3231 }
3232 Ehdr *ehdr = map_library(fd, &app, NULL);
3233 if (!ehdr) {
3234 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
3235 _exit(1);
3236 }
3237 close(fd);
3238 ldso.name = ldname;
3239 app.name = argv[0];
3240 aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry);
3241 /* Find the name that would have been used for the dynamic
3242 * linker had ldd not taken its place. */
3243 if (ldd_mode) {
3244 for (i=0; i<app.phnum; i++) {
3245 if (app.phdr[i].p_type == PT_INTERP)
3246 ldso.name = laddr(&app, app.phdr[i].p_vaddr);
3247 }
3248 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
3249 }
3250 }
3251 if (app.tls.size) {
3252 libc.tls_head = tls_tail = &app.tls;
3253 app.tls_id = tls_cnt = 1;
3254 #ifdef TLS_ABOVE_TP
3255 app.tls.offset = GAP_ABOVE_TP;
3256 app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image)
3257 & (app.tls.align-1);
3258 tls_offset = app.tls.offset + app.tls.size;
3259 #else
3260 tls_offset = app.tls.offset = app.tls.size
3261 + ( -((uintptr_t)app.tls.image + app.tls.size)
3262 & (app.tls.align-1) );
3263 #endif
3264 tls_align = MAXP2(tls_align, app.tls.align);
3265 }
3266 decode_dyn(&app);
3267 if (DL_FDPIC) {
3268 makefuncdescs(&app);
3269 if (!app.loadmap) {
3270 app.loadmap = (void *)&app_dummy_loadmap;
3271 app.loadmap->nsegs = 1;
3272 app.loadmap->segs[0].addr = (size_t)app.map;
3273 app.loadmap->segs[0].p_vaddr = (size_t)app.map
3274 - (size_t)app.base;
3275 app.loadmap->segs[0].p_memsz = app.map_len;
3276 }
3277 argv[-3] = (void *)app.loadmap;
3278 }
3279 app.is_global = true;
3280
3281 /* Initial dso chain consists only of the app. */
3282 head = tail = syms_tail = &app;
3283
3284 /* Donate unused parts of app and library mapping to malloc */
3285 reclaim_gaps(&app);
3286 reclaim_gaps(&ldso);
3287
3288 find_and_set_bss_name(&app);
3289 find_and_set_bss_name(&ldso);
3290
3291 /* Load preload/needed libraries, add symbols to global namespace. */
3292 ldso.deps = (struct dso **)no_deps;
3293 /* Init g_is_asan */
3294 g_is_asan = false;
3295 LD_LOGD("__dls3 ldso.name:%{public}s.", ldso.name);
3296 /* Through ldso Name to judge whether the Asan function is enabled */
3297 if (strstr(ldso.name, "-asan")) {
3298 g_is_asan = true;
3299 LD_LOGD("__dls3 g_is_asan is true.");
3300 }
3301 /* Init all namespaces by config file. there is a default namespace always*/
3302 init_namespace(&app);
3303
3304 #ifdef LOAD_ORDER_RANDOMIZATION
3305 struct loadtasks *tasks = create_loadtasks();
3306 if (!tasks) {
3307 _exit(1);
3308 }
3309 if (env_preload) {
3310 load_preload(env_preload, get_default_ns(), tasks);
3311 }
3312 for (struct dso *q = head; q; q = q->next) {
3313 q->is_global = true;
3314 q->is_preload = true;
3315 }
3316 preload_deps(&app, tasks);
3317 unmap_preloaded_sections(tasks);
3318 shuffle_loadtasks(tasks);
3319 run_loadtasks(tasks, NULL);
3320 free_loadtasks(tasks);
3321 assign_tls(app.next);
3322 #else
3323 if (env_preload) load_preload(env_preload, get_default_ns());
3324 for (struct dso *q = head; q; q = q->next) {
3325 q->is_global = true;
3326 q->is_preload = true;
3327 }
3328 load_deps(&app, NULL);
3329 #endif
3330
3331 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of app, including app self. */
3332 for (struct dso *p = head; p; p = p->next) {
3333 p->is_reloc_head_so_dep = true;
3334 add_syms(p);
3335 }
3336
3337 /* Attach to vdso, if provided by the kernel, last so that it does
3338 * not become part of the global namespace. */
3339 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) {
3340 Ehdr *ehdr = (void *)vdso_base;
3341 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
3342 vdso.phnum = ehdr->e_phnum;
3343 vdso.phentsize = ehdr->e_phentsize;
3344 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
3345 if (phdr->p_type == PT_DYNAMIC)
3346 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
3347 if (phdr->p_type == PT_LOAD)
3348 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
3349 }
3350 vdso.name = "";
3351 vdso.shortname = "linux-gate.so.1";
3352 vdso.relocated = 1;
3353 vdso.deps = (struct dso **)no_deps;
3354 decode_dyn(&vdso);
3355 vdso.prev = tail;
3356 tail->next = &vdso;
3357 tail = &vdso;
3358 vdso.namespace = get_default_ns();
3359 ns_add_dso(vdso.namespace, &vdso);
3360 }
3361
3362 for (i=0; app.dynv[i]; i+=2) {
3363 if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG)
3364 app.dynv[i+1] = (size_t)&debug;
3365 if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) {
3366 size_t *ptr = (size_t *) app.dynv[i+1];
3367 *ptr = (size_t)&debug;
3368 }
3369 if (app.dynv[i]==DT_DEBUG_INDIRECT_REL) {
3370 size_t *ptr = (size_t *)((size_t)&app.dynv[i] + app.dynv[i+1]);
3371 *ptr = (size_t)&debug;
3372 }
3373 }
3374
3375 /* This must be done before final relocations, since it calls
3376 * malloc, which may be provided by the application. Calling any
3377 * application code prior to the jump to its entry point is not
3378 * valid in our model and does not work with FDPIC, where there
3379 * are additional relocation-like fixups that only the entry point
3380 * code can see to perform. */
3381 main_ctor_queue = queue_ctors(&app);
3382
3383 /* Initial TLS must also be allocated before final relocations
3384 * might result in calloc being a call to application code. */
3385 update_tls_size();
3386 void *initial_tls = builtin_tls;
3387 if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) {
3388 initial_tls = calloc(libc.tls_size, 1);
3389 if (!initial_tls) {
3390 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
3391 argv[0], libc.tls_size);
3392 _exit(127);
3393 }
3394 }
3395 static_tls_cnt = tls_cnt;
3396
3397 /* The main program must be relocated LAST since it may contain
3398 * copy relocations which depend on libraries' relocations. */
3399 reloc_all(app.next, NULL);
3400 reloc_all(&app, NULL);
3401 for (struct dso *q = head; q; q = q->next) {
3402 q->is_reloc_head_so_dep = false;
3403 }
3404
3405 /* Actual copying to new TLS needs to happen after relocations,
3406 * since the TLS images might have contained relocated addresses. */
3407 if (initial_tls != builtin_tls) {
3408 pthread_t self = __pthread_self();
3409 pthread_t td = __copy_tls(initial_tls);
3410 if (__init_tp(td) < 0) {
3411 a_crash();
3412 }
3413 td->tsd = self->tsd;
3414 } else {
3415 size_t tmp_tls_size = libc.tls_size;
3416 pthread_t self = __pthread_self();
3417 /* Temporarily set the tls size to the full size of
3418 * builtin_tls so that __copy_tls will use the same layout
3419 * as it did for before. Then check, just to be safe. */
3420 libc.tls_size = sizeof builtin_tls;
3421 if (__copy_tls((void*)builtin_tls) != self) a_crash();
3422 libc.tls_size = tmp_tls_size;
3423 }
3424
3425 if (init_cfi_shadow(head, &ldso) == CFI_FAILED) {
3426 error("[%s] init_cfi_shadow failed: %m", __FUNCTION__);
3427 }
3428
3429 if (ldso_fail) _exit(127);
3430 if (ldd_mode) _exit(0);
3431
3432 /* Determine if malloc was interposed by a replacement implementation
3433 * so that calloc and the memalign family can harden against the
3434 * possibility of incomplete replacement. */
3435 if (find_sym(head, "malloc", 1).dso != &ldso)
3436 __malloc_replaced = 1;
3437 if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
3438 __aligned_alloc_replaced = 1;
3439
3440 /* Switch to runtime mode: any further failures in the dynamic
3441 * linker are a reportable failure rather than a fatal startup
3442 * error. */
3443 runtime = 1;
3444
3445 sync_with_debugger();
3446
3447 if (replace_argv0) argv[0] = replace_argv0;
3448
3449 #ifdef USE_GWP_ASAN
3450 init_gwp_asan_by_libc(false);
3451 #endif
3452
3453 errno = 0;
3454
3455 CRTJMP((void *)aux[AT_ENTRY], argv - 1);
3456 for(;;);
3457 }
3458
prepare_lazy(struct dso * p)3459 static void prepare_lazy(struct dso *p)
3460 {
3461 size_t dyn[DYN_CNT], n, flags1=0;
3462 decode_vec(p->dynv, dyn, DYN_CNT);
3463 search_vec(p->dynv, &flags1, DT_FLAGS_1);
3464 if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW))
3465 return;
3466 n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1;
3467 if (NEED_MIPS_GOT_RELOCS) {
3468 size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
3469 size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
3470 n += i-j;
3471 }
3472 p->lazy = calloc(n, 3*sizeof(size_t));
3473 if (!p->lazy) {
3474 error("Error preparing lazy relocation for %s: %m", p->name);
3475 longjmp(*rtld_fail, 1);
3476 }
3477 p->lazy_next = lazy_head;
3478 lazy_head = p;
3479 }
3480
dlopen_post(struct dso * p,int mode)3481 static void *dlopen_post(struct dso* p, int mode) {
3482 if (p == NULL) {
3483 return p;
3484 }
3485 bool is_dlclose_debug = false;
3486 if (is_dlclose_debug_enable()) {
3487 is_dlclose_debug = true;
3488 }
3489 p->nr_dlopen++;
3490 if (is_dlclose_debug) {
3491 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d ",
3492 p->name, p->name, p->nr_dlopen);
3493 }
3494 if (p->bfs_built) {
3495 for (int i = 0; p->deps[i]; i++) {
3496 p->deps[i]->nr_dlopen++;
3497 if (is_dlclose_debug) {
3498 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d",
3499 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
3500 }
3501 if (mode & RTLD_NODELETE) {
3502 p->deps[i]->flags |= DSO_FLAGS_NODELETE;
3503 }
3504 }
3505 }
3506
3507 #ifdef HANDLE_RANDOMIZATION
3508 void *handle = assign_valid_handle(p);
3509 if (handle == NULL) {
3510 LD_LOGE("dlopen_post: generate random handle failed");
3511 do_dlclose(p, 0);
3512 }
3513
3514 return handle;
3515 #endif
3516
3517 return p;
3518 }
3519
3520 static char *dlopen_permitted_list[] =
3521 {
3522 "default",
3523 "ndk",
3524 };
3525
3526 #define PERMITIED_TARGET "nweb_ns"
in_permitted_list(char * caller,char * target)3527 static bool in_permitted_list(char *caller, char *target)
3528 {
3529 for (int i = 0; i < sizeof(dlopen_permitted_list)/sizeof(char*); i++) {
3530 if (strcmp(dlopen_permitted_list[i], caller) == 0) {
3531 return true;
3532 }
3533 }
3534
3535 if (strcmp(PERMITIED_TARGET, target) == 0) {
3536 return true;
3537 }
3538
3539 return false;
3540 }
3541
is_permitted(const void * caller_addr,char * target)3542 static bool is_permitted(const void *caller_addr, char *target)
3543 {
3544 struct dso *caller;
3545 ns_t *ns;
3546 caller = (struct dso *)addr2dso((size_t)caller_addr);
3547 if ((caller == NULL) || (caller->namespace == NULL)) {
3548 LD_LOGE("caller ns get error");
3549 return false;
3550 }
3551
3552 ns = caller->namespace;
3553 if (in_permitted_list(ns->ns_name, target) == false) {
3554 LD_LOGE("caller ns: %{public}s have no permission, target is %{public}s", ns->ns_name, target);
3555 return false;
3556 }
3557
3558 return true;
3559 }
3560
3561 /* Add namespace function.
3562 * Some limitations come from sanitizer:
3563 * Sanitizer requires this interface to be exposed.
3564 * Pay attention to call __builtin_return_address in this interface because sanitizer can hook and call this interface.
3565 */
dlopen_impl(const char * file,int mode,const char * namespace,const void * caller_addr,const dl_extinfo * extinfo)3566 void *dlopen_impl(
3567 const char *file, int mode, const char *namespace, const void *caller_addr, const dl_extinfo *extinfo)
3568 {
3569 struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next;
3570 struct tls_module *orig_tls_tail;
3571 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
3572 size_t i;
3573 int cs;
3574 jmp_buf jb;
3575 struct dso **volatile ctor_queue = 0;
3576 ns_t *ns;
3577 struct dso *caller;
3578 bool reserved_address = false;
3579 bool reserved_address_recursive = false;
3580 struct reserved_address_params reserved_params = {0};
3581 struct dlopen_time_info dlopen_cost = {0};
3582 struct timespec time_start, time_end, total_start, total_end;
3583 struct dso *current_so = NULL;
3584 clock_gettime(CLOCK_MONOTONIC, &total_start);
3585 #ifdef LOAD_ORDER_RANDOMIZATION
3586 struct loadtasks *tasks = NULL;
3587 struct loadtask *task = NULL;
3588 bool is_task_appended = false;
3589 #endif
3590 #ifdef IS_ASAN
3591 char asan_file[PATH_MAX] = {0};
3592 #endif
3593
3594 if (!file) {
3595 LD_LOGD("dlopen_impl file is null, return head.");
3596 return dlopen_post(head, mode);
3597 }
3598
3599 #ifdef IS_ASAN
3600 if (g_is_asan) {
3601 char *place = strstr(file, LIB);
3602 if (place && asan_file) {
3603 int ret = snprintf(asan_file, sizeof asan_file, "%.*s/asan%s", (int)(place - file), file, place);
3604 if (ret > 0 && access(asan_file, F_OK) == 0) {
3605 LD_LOGI("dlopen_impl redirect to asan library.");
3606 file = asan_file;
3607 }
3608 }
3609 }
3610 #endif
3611
3612 if (extinfo) {
3613 reserved_address_recursive = extinfo->flag & DL_EXT_RESERVED_ADDRESS_RECURSIVE;
3614 if (extinfo->flag & DL_EXT_RESERVED_ADDRESS) {
3615 reserved_address = true;
3616 reserved_params.start_addr = extinfo->reserved_addr;
3617 reserved_params.reserved_size = extinfo->reserved_size;
3618 reserved_params.must_use_reserved = true;
3619 reserved_params.reserved_address_recursive = reserved_address_recursive;
3620 } else if (extinfo->flag & DL_EXT_RESERVED_ADDRESS_HINT) {
3621 reserved_address = true;
3622 reserved_params.start_addr = extinfo->reserved_addr;
3623 reserved_params.reserved_size = extinfo->reserved_size;
3624 reserved_params.must_use_reserved = false;
3625 reserved_params.reserved_address_recursive = reserved_address_recursive;
3626 }
3627 }
3628
3629 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
3630 pthread_rwlock_wrlock(&lock);
3631 __inhibit_ptc();
3632 trace_marker_reset();
3633 trace_marker_begin(HITRACE_TAG_MUSL, "dlopen: ", file);
3634
3635 /* When namespace does not exist, use caller's namespce
3636 * and when caller does not exist, use default namespce. */
3637 caller = (struct dso *)addr2dso((size_t)caller_addr);
3638 ns = find_ns_by_name(namespace);
3639 if (!ns) ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3640
3641 p = 0;
3642 if (shutting_down) {
3643 error("Cannot dlopen while program is exiting.");
3644 goto end;
3645 }
3646 orig_tls_tail = tls_tail;
3647 orig_tls_cnt = tls_cnt;
3648 orig_tls_offset = tls_offset;
3649 orig_tls_align = tls_align;
3650 orig_lazy_head = lazy_head;
3651 orig_syms_tail = syms_tail;
3652 orig_tail = tail;
3653 noload = mode & RTLD_NOLOAD;
3654
3655 rtld_fail = &jb;
3656 if (setjmp(*rtld_fail)) {
3657 /* Clean up anything new that was (partially) loaded */
3658 revert_syms(orig_syms_tail);
3659 for (p = orig_tail->next; p; p = next) {
3660 next = p->next;
3661 while (p->td_index) {
3662 void *tmp = p->td_index->next;
3663 free(p->td_index);
3664 p->td_index = tmp;
3665 }
3666 free(p->funcdescs);
3667 free(p->rpath);
3668 if (p->deps) {
3669 for (int i = 0; i < p->ndeps_direct; i++) {
3670 remove_dso_parent(p->deps[i], p);
3671 }
3672 }
3673 free(p->deps);
3674 dlclose_ns(p);
3675 unmap_library(p);
3676 if (p->parents) {
3677 free(p->parents);
3678 }
3679 free_reloc_can_search_dso(p);
3680 }
3681 for (p=orig_tail->next; p; p=next) {
3682 next = p->next;
3683 free(p);
3684 }
3685 free(ctor_queue);
3686 ctor_queue = 0;
3687 if (!orig_tls_tail) libc.tls_head = 0;
3688 tls_tail = orig_tls_tail;
3689 if (tls_tail) tls_tail->next = 0;
3690 tls_cnt = orig_tls_cnt;
3691 tls_offset = orig_tls_offset;
3692 tls_align = orig_tls_align;
3693 lazy_head = orig_lazy_head;
3694 tail = orig_tail;
3695 tail->next = 0;
3696 p = 0;
3697 goto end;
3698 } else {
3699 #ifdef LOAD_ORDER_RANDOMIZATION
3700 tasks = create_loadtasks();
3701 if (!tasks) {
3702 LD_LOGE("dlopen_impl create loadtasks failed");
3703 goto end;
3704 }
3705 task = create_loadtask(file, head, ns, true);
3706 if (!task) {
3707 LD_LOGE("dlopen_impl create loadtask failed");
3708 goto end;
3709 }
3710 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3711 clock_gettime(CLOCK_MONOTONIC, &time_start);
3712 if (!load_library_header(task)) {
3713 error(noload ?
3714 "Library %s is not already loaded" :
3715 "Error loading shared library %s: %m",
3716 file);
3717 LD_LOGE("dlopen_impl load library header failed for %{public}s", task->name);
3718 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3719 goto end;
3720 }
3721 if (reserved_address) {
3722 reserved_params.target = task->p;
3723 }
3724 }
3725 if (!task->p) {
3726 LD_LOGE("dlopen_impl load library failed for %{public}s", task->name);
3727 error(noload ?
3728 "Library %s is not already loaded" :
3729 "Error loading shared library %s: %m",
3730 file);
3731 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3732 goto end;
3733 }
3734 clock_gettime(CLOCK_MONOTONIC, &time_end);
3735 dlopen_cost.entry_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3736 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3737 if (!task->isloaded) {
3738 is_task_appended = append_loadtasks(tasks, task);
3739 }
3740 clock_gettime(CLOCK_MONOTONIC, &time_start);
3741 preload_deps(task->p, tasks);
3742 clock_gettime(CLOCK_MONOTONIC, &time_end);
3743 dlopen_cost.deps_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3744 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3745 unmap_preloaded_sections(tasks);
3746 if (!reserved_address_recursive) {
3747 shuffle_loadtasks(tasks);
3748 }
3749 clock_gettime(CLOCK_MONOTONIC, &time_start);
3750 run_loadtasks(tasks, reserved_address ? &reserved_params : NULL);
3751 clock_gettime(CLOCK_MONOTONIC, &time_end);
3752 dlopen_cost.map_so_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3753 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3754 p = task->p;
3755 if (!task->isloaded) {
3756 assign_tls(p);
3757 }
3758 if (!is_task_appended) {
3759 free_task(task);
3760 task = NULL;
3761 }
3762 free_loadtasks(tasks);
3763 tasks = NULL;
3764 #else
3765 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3766 p = load_library(file, head, ns, true, reserved_address ? &reserved_params : NULL);
3767 }
3768
3769 if (!p) {
3770 error(noload ?
3771 "Library %s is not already loaded" :
3772 "Error loading shared library %s: %m",
3773 file);
3774 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3775 goto end;
3776 }
3777 /* First load handling */
3778 load_deps(p, reserved_address && reserved_address_recursive ? &reserved_params : NULL);
3779 #endif
3780 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3781 extend_bfs_deps(p, 0);
3782 pthread_mutex_lock(&init_fini_lock);
3783 int constructed = p->constructed;
3784 pthread_mutex_unlock(&init_fini_lock);
3785 if (!constructed) ctor_queue = queue_ctors(p);
3786 if (!p->relocated && (mode & RTLD_LAZY)) {
3787 prepare_lazy(p);
3788 for (i = 0; p->deps[i]; i++)
3789 if (!p->deps[i]->relocated)
3790 prepare_lazy(p->deps[i]);
3791 }
3792 if (!p->relocated || (mode & RTLD_GLOBAL)) {
3793 /* Make new symbols global, at least temporarily, so we can do
3794 * relocations. If not RTLD_GLOBAL, this is reverted below. */
3795 add_syms(p);
3796 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of p, including p self. */
3797 p->is_reloc_head_so_dep = true;
3798 for (i = 0; p->deps[i]; i++) {
3799 p->deps[i]->is_reloc_head_so_dep = true;
3800 add_syms(p->deps[i]);
3801 }
3802 }
3803 struct dso *reloc_head_so = p;
3804 trace_marker_begin(HITRACE_TAG_MUSL, "linking: entry so", p->name);
3805 clock_gettime(CLOCK_MONOTONIC, &time_start);
3806 if (!p->relocated) {
3807 reloc_all(p, extinfo);
3808 }
3809 clock_gettime(CLOCK_MONOTONIC, &time_end);
3810 dlopen_cost.reloc_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3811 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3812 trace_marker_end(HITRACE_TAG_MUSL);
3813 reloc_head_so->is_reloc_head_so_dep = false;
3814 for (size_t i = 0; reloc_head_so->deps[i]; i++) {
3815 reloc_head_so->deps[i]->is_reloc_head_so_dep = false;
3816 }
3817
3818 /* If RTLD_GLOBAL was not specified, undo any new additions
3819 * to the global symbol table. This is a nop if the library was
3820 * previously loaded and already global. */
3821 if (!(mode & RTLD_GLOBAL))
3822 revert_syms(orig_syms_tail);
3823
3824 /* Processing of deferred lazy relocations must not happen until
3825 * the new libraries are committed; otherwise we could end up with
3826 * relocations resolved to symbol definitions that get removed. */
3827 redo_lazy_relocs();
3828 clock_gettime(CLOCK_MONOTONIC, &time_start);
3829 if (map_dso_to_cfi_shadow(p) == CFI_FAILED) {
3830 error("[%s] map_dso_to_cfi_shadow failed: %m", __FUNCTION__);
3831 longjmp(*rtld_fail, 1);
3832 }
3833 clock_gettime(CLOCK_MONOTONIC, &time_end);
3834 dlopen_cost.map_cfi_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3835 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3836
3837 if (mode & RTLD_NODELETE) {
3838 p->flags |= DSO_FLAGS_NODELETE;
3839 }
3840
3841 update_tls_size();
3842 if (tls_cnt != orig_tls_cnt)
3843 install_new_tls();
3844
3845 if (orig_tail != tail) {
3846 notify_addition_to_debugger(orig_tail->next);
3847 }
3848
3849 orig_tail = tail;
3850 current_so = p;
3851 p = dlopen_post(p, mode);
3852 end:
3853 #ifdef LOAD_ORDER_RANDOMIZATION
3854 if (!is_task_appended) {
3855 free_task(task);
3856 }
3857 free_loadtasks(tasks);
3858 #endif
3859 __release_ptc();
3860 clock_gettime(CLOCK_MONOTONIC, &time_start);
3861 pthread_rwlock_unlock(&lock);
3862 if (ctor_queue) {
3863 do_init_fini(ctor_queue);
3864 free(ctor_queue);
3865 }
3866 clock_gettime(CLOCK_MONOTONIC, &time_end);
3867 dlopen_cost.init_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3868 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3869 pthread_setcancelstate(cs, 0);
3870 trace_marker_end(HITRACE_TAG_MUSL); // "dlopen: " trace end.
3871 clock_gettime(CLOCK_MONOTONIC, &total_end);
3872 #ifdef USE_ENCAPS
3873 dlopen_cost.encaps_time = encpas_cost_time;
3874 #else
3875 dlopen_cost.encaps_time = 0;
3876 #endif
3877 dlopen_cost.total_time = (total_end.tv_sec - total_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3878 + (total_end.tv_nsec - total_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3879 if ((dlopen_cost.total_time > DLOPEN_TIME_THRESHOLD || is_dlopen_debug_enable()) && current_so) {
3880 LD_LOGE("dlopen so: %{public}s time cost: "
3881 "total_time: %{public}d ms, "
3882 "entry_header_time: %{public}d ms, "
3883 "deps_header_time: %{public}d ms, "
3884 "map_so_time: %{public}d ms, "
3885 "reloc_time: %{public}d ms, "
3886 "map_cfi_time: %{public}d ms, "
3887 "init_time: %{public}d ms, "
3888 "encaps_time: %{public}d ms",
3889 current_so->name,
3890 dlopen_cost.total_time,
3891 dlopen_cost.entry_header_time,
3892 dlopen_cost.deps_header_time,
3893 dlopen_cost.map_so_time,
3894 dlopen_cost.reloc_time,
3895 dlopen_cost.map_cfi_time,
3896 dlopen_cost.init_time,
3897 dlopen_cost.encaps_time);
3898 }
3899 #ifdef USE_ENCAPS
3900 encpas_cost_time = 0;
3901 #endif
3902 return p;
3903 }
3904
dlopen(const char * file,int mode)3905 void *dlopen(const char *file, int mode)
3906 {
3907 const void *caller_addr = __builtin_return_address(0);
3908 musl_log_reset();
3909 ld_log_reset();
3910 LD_LOGI("dlopen file:%{public}s, mode:%{public}x ,caller_addr:%{public}p .", file, mode, caller_addr);
3911 return dlopen_impl(file, mode, NULL, caller_addr, NULL);
3912 }
3913
dlns_init(Dl_namespace * dlns,const char * name)3914 void dlns_init(Dl_namespace *dlns, const char *name)
3915 {
3916 if (!dlns) {
3917 return;
3918 }
3919 if (!name) {
3920 dlns->name[0] = 0;
3921 return;
3922 }
3923
3924 const void *caller_addr = __builtin_return_address(0);
3925 if (is_permitted(caller_addr, name) == false) {
3926 return;
3927 }
3928
3929 snprintf(dlns->name, sizeof dlns->name, name);
3930 LD_LOGI("dlns_init dlns->name:%{public}s .", dlns->name);
3931 }
3932
dlns_get(const char * name,Dl_namespace * dlns)3933 int dlns_get(const char *name, Dl_namespace *dlns)
3934 {
3935 if (!dlns) {
3936 LD_LOGE("dlns_get dlns is null.");
3937 return EINVAL;
3938 }
3939 int ret = 0;
3940 ns_t *ns = NULL;
3941 pthread_rwlock_rdlock(&lock);
3942 if (!name) {
3943 struct dso *caller;
3944 const void *caller_addr = __builtin_return_address(0);
3945 caller = (struct dso *)addr2dso((size_t)caller_addr);
3946 ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3947 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3948 LD_LOGI("dlns_get name is null, current dlns dlns->name:%{public}s.", dlns->name);
3949 } else {
3950 ns = find_ns_by_name(name);
3951 if (ns) {
3952 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3953 LD_LOGI("dlns_get found ns, current dlns dlns->name:%{public}s.", dlns->name);
3954 } else {
3955 LD_LOGI("dlns_get not found ns! name:%{public}s.", name);
3956 ret = ENOKEY;
3957 }
3958 }
3959 pthread_rwlock_unlock(&lock);
3960 return ret;
3961 }
3962
dlopen_ns(Dl_namespace * dlns,const char * file,int mode)3963 void *dlopen_ns(Dl_namespace *dlns, const char *file, int mode)
3964 {
3965 const void *caller_addr = __builtin_return_address(0);
3966 if (is_permitted(caller_addr, dlns->name) == false) {
3967 return NULL;
3968 }
3969
3970 musl_log_reset();
3971 ld_log_reset();
3972 LD_LOGI("dlopen_ns file:%{public}s, mode:%{public}x , caller_addr:%{public}p , dlns->name:%{public}s.",
3973 file,
3974 mode,
3975 caller_addr,
3976 dlns ? dlns->name : "NULL");
3977 return dlopen_impl(file, mode, dlns->name, caller_addr, NULL);
3978 }
3979
dlopen_ns_ext(Dl_namespace * dlns,const char * file,int mode,const dl_extinfo * extinfo)3980 void *dlopen_ns_ext(Dl_namespace *dlns, const char *file, int mode, const dl_extinfo *extinfo)
3981 {
3982 const void *caller_addr = __builtin_return_address(0);
3983 if (is_permitted(caller_addr, dlns->name) == false) {
3984 return NULL;
3985 }
3986
3987 musl_log_reset();
3988 ld_log_reset();
3989 LD_LOGI("dlopen_ns_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , "
3990 "dlns->name:%{public}s. , extinfo->flag:%{public}x",
3991 file,
3992 mode,
3993 caller_addr,
3994 dlns->name,
3995 extinfo ? extinfo->flag : 0);
3996 return dlopen_impl(file, mode, dlns->name, caller_addr, extinfo);
3997 }
3998
dlns_create2(Dl_namespace * dlns,const char * lib_path,int flags)3999 int dlns_create2(Dl_namespace *dlns, const char *lib_path, int flags)
4000 {
4001 if (!dlns) {
4002 LD_LOGE("dlns_create2 dlns is null.");
4003 return EINVAL;
4004 }
4005 ns_t *ns;
4006
4007 pthread_rwlock_wrlock(&lock);
4008 const void *caller_addr = __builtin_return_address(0);
4009 if (is_permitted(caller_addr, dlns->name) == false) {
4010 pthread_rwlock_unlock(&lock);
4011 return EPERM;
4012 }
4013
4014 ns = find_ns_by_name(dlns->name);
4015 if (ns) {
4016 LD_LOGE("dlns_create2 ns is exist.");
4017 pthread_rwlock_unlock(&lock);
4018 return EEXIST;
4019 }
4020 ns = ns_alloc();
4021 if (!ns) {
4022 LD_LOGE("dlns_create2 no memery.");
4023 pthread_rwlock_unlock(&lock);
4024 return ENOMEM;
4025 }
4026 ns_set_name(ns, dlns->name);
4027 ns_set_flag(ns, flags);
4028 ns_add_dso(ns, get_default_ns()->ns_dsos->dsos[0]); /* add main app to this namespace*/
4029 nslist_add_ns(ns); /* add ns to list*/
4030 ns_set_lib_paths(ns, lib_path);
4031
4032 if ((flags & CREATE_INHERIT_DEFAULT) != 0) {
4033 ns_add_inherit(ns, get_default_ns(), NULL);
4034 }
4035
4036 if ((flags & CREATE_INHERIT_CURRENT) != 0) {
4037 struct dso *caller;
4038 caller_addr = __builtin_return_address(0);
4039 caller = (struct dso *)addr2dso((size_t)caller_addr);
4040 if (caller && caller->namespace) {
4041 ns_add_inherit(ns, caller->namespace, NULL);
4042 }
4043 }
4044
4045 LD_LOGI("dlns_create2:"
4046 "ns_name: %{public}s ,"
4047 "separated:%{public}d ,"
4048 "lib_paths:%{public}s ",
4049 ns->ns_name, ns->separated, ns->lib_paths);
4050 pthread_rwlock_unlock(&lock);
4051
4052 return 0;
4053 }
4054
dlns_create(Dl_namespace * dlns,const char * lib_path)4055 int dlns_create(Dl_namespace *dlns, const char *lib_path)
4056 {
4057 LD_LOGI("dlns_create lib_paths:%{public}s", lib_path);
4058 return dlns_create2(dlns, lib_path, CREATE_INHERIT_DEFAULT);
4059 }
4060
dlns_inherit(Dl_namespace * dlns,Dl_namespace * inherited,const char * shared_libs)4061 int dlns_inherit(Dl_namespace *dlns, Dl_namespace *inherited, const char *shared_libs)
4062 {
4063 if (!dlns || !inherited) {
4064 LD_LOGE("dlns_inherit dlns or inherited is null.");
4065 return EINVAL;
4066 }
4067
4068 pthread_rwlock_wrlock(&lock);
4069 const void *caller_addr = __builtin_return_address(0);
4070 if (is_permitted(caller_addr, dlns->name) == false) {
4071 pthread_rwlock_unlock(&lock);
4072 return EPERM;
4073 }
4074
4075 ns_t* ns = find_ns_by_name(dlns->name);
4076 ns_t* ns_inherited = find_ns_by_name(inherited->name);
4077 if (!ns || !ns_inherited) {
4078 LD_LOGE("dlns_inherit ns or ns_inherited is not found.");
4079 pthread_rwlock_unlock(&lock);
4080 return ENOKEY;
4081 }
4082 ns_add_inherit(ns, ns_inherited, shared_libs);
4083 pthread_rwlock_unlock(&lock);
4084
4085 return 0;
4086 }
4087
dlclose_ns(struct dso * p)4088 static void dlclose_ns(struct dso *p)
4089 {
4090 if (!p) return;
4091 ns_t * ns = p->namespace;
4092 if (!ns || !ns->ns_dsos) return;
4093 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
4094 if (p == ns->ns_dsos->dsos[i]) {
4095 for (size_t j = i + 1; j < ns->ns_dsos->num; j++) {
4096 ns->ns_dsos->dsos[j - 1] = ns->ns_dsos->dsos[j];
4097 }
4098 ns->ns_dsos->num--;
4099 return;
4100 }
4101 }
4102 }
4103
__dl_invalid_handle(void * h)4104 hidden int __dl_invalid_handle(void *h)
4105 {
4106 struct dso *p;
4107 for (p=head; p; p=p->next) if (h==p) return 0;
4108 error("Invalid library handle %p", (void *)h);
4109 return 1;
4110 }
4111
addr2dso(size_t a)4112 void *addr2dso(size_t a)
4113 {
4114 struct dso *p;
4115 size_t i;
4116 for (p=head; p; p=p->next) {
4117 if (a < p->map || a - (size_t)p->map >= p->map_len) continue;
4118 Phdr *ph = p->phdr;
4119 size_t phcnt = p->phnum;
4120 size_t entsz = p->phentsize;
4121 size_t base = (size_t)p->base;
4122 for (; phcnt--; ph=(void *)((char *)ph+entsz)) {
4123 if (ph->p_type != PT_LOAD) continue;
4124 if (a-base-ph->p_vaddr < ph->p_memsz)
4125 return p;
4126 }
4127 if (a-(size_t)p->map < p->map_len)
4128 return 0;
4129 }
4130 return 0;
4131 }
4132
do_dlsym(struct dso * p,const char * s,const char * v,void * ra)4133 static void *do_dlsym(struct dso *p, const char *s, const char *v, void *ra)
4134 {
4135 int use_deps = 0;
4136 bool ra2dso = false;
4137 ns_t *ns = NULL;
4138 struct dso *caller = NULL;
4139 if (p == head || p == RTLD_DEFAULT) {
4140 p = head;
4141 ra2dso = true;
4142 } else if (p == RTLD_NEXT) {
4143 p = addr2dso((size_t)ra);
4144 if (!p) p=head;
4145 p = p->next;
4146 ra2dso = true;
4147 #ifndef HANDLE_RANDOMIZATION
4148 } else if (__dl_invalid_handle(p)) {
4149 return 0;
4150 #endif
4151 } else {
4152 use_deps = 1;
4153 ns = p->namespace;
4154 }
4155 if (ra2dso) {
4156 caller = (struct dso *)addr2dso((size_t)ra);
4157 if (caller && caller->namespace) {
4158 ns = caller->namespace;
4159 }
4160 }
4161 trace_marker_begin(HITRACE_TAG_MUSL, "dlsym: ", (s == NULL ? "(NULL)" : s));
4162 struct verinfo verinfo = { .s = s, .v = v, .use_vna_hash = false };
4163 struct symdef def = use_deps ? find_sym_by_deps(p, &verinfo, 0, ns) :
4164 find_sym2(p, &verinfo, 0, use_deps, ns);
4165 trace_marker_end(HITRACE_TAG_MUSL);
4166 if (!def.sym) {
4167 LD_LOGW("do_dlsym failed: symbol not found. so=%{public}s s=%{public}s v=%{public}s",
4168 (p == NULL ? "NULL" : p->name), s, v);
4169 error("do_dlsym failed: Symbol not found: %s, version: %s so=%s",
4170 s, strlen(v) > 0 ? v : "null", (p == NULL ? "NULL" : p->name));
4171 return 0;
4172 }
4173 if ((def.sym->st_info&0xf) == STT_TLS)
4174 return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET});
4175 if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC)
4176 return def.dso->funcdescs + (def.sym - def.dso->syms);
4177 return laddr(def.dso, def.sym->st_value);
4178 }
4179
4180 extern int invalidate_exit_funcs(struct dso *p);
4181
so_can_unload(struct dso * p,int check_flag)4182 static int so_can_unload(struct dso *p, int check_flag)
4183 {
4184 if ((check_flag & UNLOAD_COMMON_CHECK) != 0) {
4185 if (__dl_invalid_handle(p)) {
4186 LD_LOGE("[dlclose]: invalid handle %{public}p", p);
4187 error("[dlclose]: Handle is invalid.");
4188 return 0;
4189 }
4190
4191 if (!p->by_dlopen) {
4192 LD_LOGD("[dlclose]: skip unload %{public}s because it's not loaded by dlopen", p->name);
4193 return 0;
4194 }
4195
4196 /* dso is marked as RTLD_NODELETE library, do nothing here. */
4197 if ((p->flags & DSO_FLAGS_NODELETE) != 0) {
4198 LD_LOGD("[dlclose]: skip unload %{public}s because flags is RTLD_NODELETE", p->name);
4199 return 0;
4200 }
4201 }
4202
4203 if ((check_flag & UNLOAD_NR_DLOPEN_CHECK) != 0) {
4204 if (p->nr_dlopen > 0) {
4205 LD_LOGD("[dlclose]: skip unload %{public}s because nr_dlopen=%{public}d > 0", p->name, p->nr_dlopen);
4206 return 0;
4207 }
4208 }
4209
4210 return 1;
4211 }
4212
dlclose_post(struct dso * p)4213 static int dlclose_post(struct dso *p)
4214 {
4215 if (p == NULL) {
4216 return -1;
4217 }
4218 #ifdef ENABLE_HWASAN
4219 if (libc.unload_hook) {
4220 libc.unload_hook((unsigned long int)p->base, p->phdr, p->phnum);
4221 }
4222 #endif
4223 unmap_dso_from_cfi_shadow(p);
4224 unmap_library(p);
4225 if (p->parents) {
4226 free(p->parents);
4227 }
4228 free_reloc_can_search_dso(p);
4229 if (p->tls.size == 0) {
4230 free(p);
4231 }
4232
4233 ++subcnt;
4234 return 0;
4235 }
4236
dlclose_impl(struct dso * p)4237 static int dlclose_impl(struct dso *p)
4238 {
4239 struct dso *d;
4240
4241 trace_marker_reset();
4242 trace_marker_begin(HITRACE_TAG_MUSL, "dlclose", p->name);
4243
4244 /* remove dso symbols from global list */
4245 if (p->syms_next) {
4246 for (d = head; d->syms_next != p; d = d->syms_next)
4247 ; /* NOP */
4248 d->syms_next = p->syms_next;
4249 } else if (p == syms_tail) {
4250 for (d = head; d->syms_next != p; d = d->syms_next)
4251 ; /* NOP */
4252 d->syms_next = NULL;
4253 syms_tail = d;
4254 }
4255
4256 /* remove dso from lazy list if needed */
4257 if (p == lazy_head) {
4258 lazy_head = p->lazy_next;
4259 } else if (p->lazy_next) {
4260 for (d = lazy_head; d->lazy_next != p; d = d->lazy_next)
4261 ; /* NOP */
4262 d->lazy_next = p->lazy_next;
4263 }
4264
4265 pthread_mutex_lock(&init_fini_lock);
4266 /* remove dso from fini list */
4267 if (p == fini_head) {
4268 fini_head = p->fini_next;
4269 } else if (p->fini_next) {
4270 for (d = fini_head; d->fini_next != p; d = d->fini_next)
4271 ; /* NOP */
4272 d->fini_next = p->fini_next;
4273 }
4274 pthread_mutex_unlock(&init_fini_lock);
4275
4276 /* empty tls image */
4277 if (p->tls.size != 0) {
4278 p->tls.image = NULL;
4279 }
4280
4281 /* remove dso from global dso list */
4282 if (p == tail) {
4283 tail = p->prev;
4284 tail->next = NULL;
4285 } else {
4286 p->next->prev = p->prev;
4287 p->prev->next = p->next;
4288 }
4289
4290 /* remove dso from namespace */
4291 dlclose_ns(p);
4292
4293 /* */
4294 void* handle = find_handle_by_dso(p);
4295 if (handle) {
4296 remove_handle_node(handle);
4297 }
4298
4299 /* after destruct, invalidate atexit funcs which belong to this dso */
4300 #if (defined(FEATURE_ATEXIT_CB_PROTECT))
4301 invalidate_exit_funcs(p);
4302 #endif
4303
4304 notify_remove_to_debugger(p);
4305
4306 if (p->lazy != NULL)
4307 free(p->lazy);
4308 if (p->deps != no_deps)
4309 free(p->deps);
4310
4311 if (p->deps_all_built) {
4312 free(p->deps_all);
4313 }
4314
4315 trace_marker_end(HITRACE_TAG_MUSL);
4316
4317 return 0;
4318 }
4319
do_dlclose(struct dso * p,bool check_deps_all)4320 static int do_dlclose(struct dso *p, bool check_deps_all)
4321 {
4322 struct dso_entry *ef = NULL;
4323 struct dso_entry *ef_tmp = NULL;
4324 size_t n;
4325 int unload_check_result;
4326 TAILQ_HEAD(unload_queue, dso_entry) unload_queue;
4327 TAILQ_HEAD(need_unload_queue, dso_entry) need_unload_queue;
4328 unload_check_result = so_can_unload(p, UNLOAD_COMMON_CHECK);
4329 if (unload_check_result != 1) {
4330 return unload_check_result;
4331 }
4332 // Unconditionally subtract 1 because unconditionally add 1 at dlopen_post.
4333 if (p->nr_dlopen > 0) {
4334 --(p->nr_dlopen);
4335 } else {
4336 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4337 p->name, p->name);
4338 return 0;
4339 }
4340
4341 if (p->bfs_built) {
4342 for (int i = 0; p->deps[i]; i++) {
4343 if (p->deps[i]->nr_dlopen > 0) {
4344 p->deps[i]->nr_dlopen--;
4345 } else {
4346 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4347 p->deps[i]->name, p->name);
4348 return 0;
4349 }
4350 }
4351 } else {
4352 /* This part is used for thread local object destructors:
4353 * - nr_dlopen increases for all deps(include self) when a thread local object destructor is added.
4354 * - nr_dlopen decreases for all deps(include self) when a thread local object destructor is called.
4355 */
4356 if (check_deps_all && p->deps_all_built) {
4357 for (int i = 0; p->deps_all[i]; i++) {
4358 if (p->deps_all[i]->nr_dlopen > 0) {
4359 p->deps_all[i]->nr_dlopen--;
4360 } else {
4361 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4362 p->deps_all[i]->name, p->name);
4363 return 0;
4364 }
4365 }
4366 }
4367 }
4368
4369 unload_check_result = so_can_unload(p, UNLOAD_NR_DLOPEN_CHECK);
4370 if (unload_check_result != 1) {
4371 return unload_check_result;
4372 }
4373 TAILQ_INIT(&unload_queue);
4374 TAILQ_INIT(&need_unload_queue);
4375 struct dso_entry *start_entry = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4376 start_entry->dso = p;
4377 TAILQ_INSERT_TAIL(&unload_queue, start_entry, entries);
4378
4379 while (!TAILQ_EMPTY(&unload_queue)) {
4380 struct dso_entry *ecur = TAILQ_FIRST(&unload_queue);
4381 struct dso *cur = ecur->dso;
4382 TAILQ_REMOVE(&unload_queue, ecur, entries);
4383 bool already_in_need_unload_queue = false;
4384 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4385 if (ef->dso == cur) {
4386 already_in_need_unload_queue = true;
4387 break;
4388 }
4389 }
4390 if (already_in_need_unload_queue) {
4391 continue;
4392 }
4393 TAILQ_INSERT_TAIL(&need_unload_queue, ecur, entries);
4394 for (int i = 0; i < cur->ndeps_direct; i++) {
4395 remove_dso_parent(cur->deps[i], cur);
4396 if ((cur->deps[i]->parents_count == 0) && (so_can_unload(cur->deps[i], UNLOAD_ALL_CHECK) == 1)) {
4397 bool already_in_unload_queue = false;
4398 TAILQ_FOREACH(ef, &unload_queue, entries) {
4399 if (ef->dso == cur->deps[i]) {
4400 already_in_unload_queue = true;
4401 break;
4402 }
4403 }
4404 if (already_in_unload_queue) {
4405 continue;
4406 }
4407
4408 struct dso_entry *edeps = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4409 edeps->dso = cur->deps[i];
4410 TAILQ_INSERT_TAIL(&unload_queue, edeps, entries);
4411 }
4412 } /* for */
4413 } /* while */
4414
4415 if (is_dlclose_debug_enable()) {
4416 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4417 LD_LOGE("[dlclose]: unload %{public}s succeed when dlclose %{public}s", ef->dso->name, p->name);
4418 }
4419 for (size_t deps_num = 0; p->deps[deps_num]; deps_num++) {
4420 bool ready_to_unload = false;
4421 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4422 if (ef->dso == p->deps[deps_num]) {
4423 ready_to_unload = true;
4424 break;
4425 }
4426 }
4427 if (!ready_to_unload) {
4428 LD_LOGE("[dlclose]: unload %{public}s failed when dlclose %{public}s,"
4429 "nr_dlopen:%{public}d, by_dlopen:%{public}d, parents_count:%{public}d",
4430 p->deps[deps_num]->name, p->name, p->deps[deps_num]->nr_dlopen,
4431 p->deps[deps_num]->by_dlopen, p->deps[deps_num]->parents_count);
4432 }
4433 }
4434 }
4435
4436 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4437 dlclose_impl(ef->dso);
4438 }
4439
4440 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4441 /* call destructors if needed */
4442 pthread_mutex_lock(&init_fini_lock);
4443 int constructed = ef->dso->constructed;
4444 pthread_mutex_unlock(&init_fini_lock);
4445
4446 if (constructed) {
4447 size_t dyn[DYN_CNT];
4448 decode_vec(ef->dso->dynv, dyn, DYN_CNT);
4449 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
4450 n = dyn[DT_FINI_ARRAYSZ] / sizeof(size_t);
4451 size_t *fn = (size_t *)laddr(ef->dso, dyn[DT_FINI_ARRAY]) + n;
4452 trace_marker_begin(HITRACE_TAG_MUSL, "calling destructors:", ef->dso->name);
4453
4454 pthread_rwlock_unlock(&lock);
4455 while (n--)
4456 ((void (*)(void))*--fn)();
4457 pthread_rwlock_wrlock(&lock);
4458
4459 trace_marker_end(HITRACE_TAG_MUSL);
4460 }
4461 pthread_mutex_lock(&init_fini_lock);
4462 ef->dso->constructed = 0;
4463 pthread_mutex_unlock(&init_fini_lock);
4464 }
4465 }
4466 // Unload all sos at the end because weak symbol may cause later unloaded so to access the previous so's function.
4467 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4468 dlclose_post(ef->dso);
4469 }
4470 // Free dso_entry.
4471 TAILQ_FOREACH_SAFE(ef, &need_unload_queue, entries, ef_tmp) {
4472 if (ef) {
4473 free(ef);
4474 }
4475 }
4476
4477 return 0;
4478 }
4479
__dlclose(void * p)4480 hidden int __dlclose(void *p)
4481 {
4482 pthread_mutex_lock(&dlclose_lock);
4483 int rc;
4484 pthread_rwlock_wrlock(&lock);
4485 if (shutting_down) {
4486 error("Cannot dlclose while program is exiting.");
4487 pthread_rwlock_unlock(&lock);
4488 pthread_mutex_unlock(&dlclose_lock);
4489 return -1;
4490 }
4491 #ifdef HANDLE_RANDOMIZATION
4492 struct dso *dso = find_dso_by_handle(p);
4493 if (dso == NULL) {
4494 errno = EINVAL;
4495 error("Handle is invalid.");
4496 LD_LOGE("Handle is not find.");
4497 pthread_rwlock_unlock(&lock);
4498 pthread_mutex_unlock(&dlclose_lock);
4499 return -1;
4500 }
4501 rc = do_dlclose(dso, 0);
4502 #else
4503 rc = do_dlclose(p, 0);
4504 #endif
4505 pthread_rwlock_unlock(&lock);
4506 pthread_mutex_unlock(&dlclose_lock);
4507 return rc;
4508 }
4509
sym_is_matched(const Sym * sym,size_t addr_offset_so)4510 static inline int sym_is_matched(const Sym* sym, size_t addr_offset_so) {
4511 return sym->st_value &&
4512 (1<<(sym->st_info&0xf) != STT_TLS) &&
4513 (addr_offset_so >= sym->st_value) &&
4514 (addr_offset_so < sym->st_value + sym->st_size);
4515 }
4516
find_addr_by_elf(size_t addr_offset_so,struct dso * p)4517 static inline Sym* find_addr_by_elf(size_t addr_offset_so, struct dso *p) {
4518 uint32_t nsym = p->hashtab[1];
4519 Sym *sym = p->syms;
4520 for (; nsym; nsym--, sym++) {
4521 if (sym_is_matched(sym, addr_offset_so)) {
4522 return sym;
4523 }
4524 }
4525
4526 return NULL;
4527 }
4528
find_addr_by_gnu(size_t addr_offset_so,struct dso * p)4529 static inline Sym* find_addr_by_gnu(size_t addr_offset_so, struct dso *p) {
4530
4531 size_t i, nsym, first_hash_sym_index;
4532 uint32_t *hashval;
4533 Sym *sym_tab = p->syms;
4534 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2] * sizeof(size_t) / 4);
4535 // Points to the first defined symbol, all symbols before it are undefined.
4536 first_hash_sym_index = buckets[0];
4537 Sym *sym = &sym_tab[first_hash_sym_index];
4538
4539 // Get the location pointed by the last bucket.
4540 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
4541 if (buckets[i] > nsym)
4542 nsym = buckets[i];
4543 }
4544
4545 for (i = first_hash_sym_index; i < nsym; i++) {
4546 if (sym_is_matched(sym, addr_offset_so)) {
4547 return sym;
4548 }
4549 sym++;
4550 }
4551
4552 // Start traversing the hash list from the position pointed to by the last bucket.
4553 if (nsym) {
4554 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
4555 do {
4556 nsym++;
4557 if (sym_is_matched(sym, addr_offset_so)) {
4558 return sym;
4559 }
4560 sym++;
4561 }
4562 while (!(*hashval++ & 1));
4563 }
4564
4565 return NULL;
4566 }
4567
4568
dladdr(const void * addr_arg,Dl_info * info)4569 int dladdr(const void *addr_arg, Dl_info *info)
4570 {
4571 size_t addr = (size_t)addr_arg;
4572 struct dso *p;
4573 Sym *match_sym = NULL;
4574 char *strings;
4575
4576 pthread_rwlock_rdlock(&lock);
4577 p = addr2dso(addr);
4578 pthread_rwlock_unlock(&lock);
4579
4580 if (!p) return 0;
4581
4582 strings = p->strings;
4583 size_t addr_offset_so = addr - (size_t)p->base;
4584
4585 info->dli_fname = p->name;
4586 info->dli_fbase = p->map;
4587
4588 if (p->ghashtab) {
4589 match_sym = find_addr_by_gnu(addr_offset_so, p);
4590
4591 } else {
4592 match_sym = find_addr_by_elf(addr_offset_so, p);
4593 }
4594
4595 if (!match_sym) {
4596 info->dli_sname = 0;
4597 info->dli_saddr = 0;
4598 return 1;
4599 }
4600 info->dli_sname = strings + match_sym->st_name;
4601 info->dli_saddr = (void *)laddr(p, match_sym->st_value);
4602 return 1;
4603 }
4604
__dlsym(void * restrict p,const char * restrict s,void * restrict ra)4605 hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
4606 {
4607 void *res;
4608 pthread_rwlock_rdlock(&lock);
4609 #ifdef HANDLE_RANDOMIZATION
4610 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4611 struct dso *dso = find_dso_by_handle(p);
4612 if (dso == NULL) {
4613 pthread_rwlock_unlock(&lock);
4614 return 0;
4615 }
4616 res = do_dlsym(dso, s, "", ra);
4617 } else {
4618 res = do_dlsym(p, s, "", ra);
4619 }
4620 #else
4621 res = do_dlsym(p, s, "", ra);
4622 #endif
4623 pthread_rwlock_unlock(&lock);
4624 return res;
4625 }
4626
__dlvsym(void * restrict p,const char * restrict s,const char * restrict v,void * restrict ra)4627 hidden void *__dlvsym(void *restrict p, const char *restrict s, const char *restrict v, void *restrict ra)
4628 {
4629 void *res;
4630 pthread_rwlock_rdlock(&lock);
4631 #ifdef HANDLE_RANDOMIZATION
4632 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4633 struct dso *dso = find_dso_by_handle(p);
4634 if (dso == NULL) {
4635 pthread_rwlock_unlock(&lock);
4636 return 0;
4637 }
4638 res = do_dlsym(dso, s, v, ra);
4639 } else {
4640 res = do_dlsym(p, s, v, ra);
4641 }
4642 #else
4643 res = do_dlsym(p, s, v, ra);
4644 #endif
4645 pthread_rwlock_unlock(&lock);
4646 return res;
4647 }
4648
__dlsym_redir_time64(void * restrict p,const char * restrict s,void * restrict ra)4649 hidden void *__dlsym_redir_time64(void *restrict p, const char *restrict s, void *restrict ra)
4650 {
4651 #if _REDIR_TIME64
4652 const char *suffix, *suffix2 = "";
4653 char redir[36];
4654
4655 /* Map the symbol name to a time64 version of itself according to the
4656 * pattern used for naming the redirected time64 symbols. */
4657 size_t l = strnlen(s, sizeof redir);
4658 if (l<4 || l==sizeof redir) goto no_redir;
4659 if (s[l-2]=='_' && s[l-1]=='r') {
4660 l -= 2;
4661 suffix2 = s+l;
4662 }
4663 if (l<4) goto no_redir;
4664 if (!strcmp(s+l-4, "time")) suffix = "64";
4665 else suffix = "_time64";
4666
4667 /* Use the presence of the remapped symbol name in libc to determine
4668 * whether it's one that requires time64 redirection; replace if so. */
4669 snprintf(redir, sizeof redir, "__%.*s%s%s", (int)l, s, suffix, suffix2);
4670 if (find_sym(&ldso, redir, 1).sym) s = redir;
4671 no_redir:
4672 #endif
4673 return __dlsym(p, s, ra);
4674 }
4675
dl_iterate_phdr(int (* callback)(struct dl_phdr_info * info,size_t size,void * data),void * data)4676 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
4677 {
4678 pthread_mutex_lock(&dlclose_lock);
4679 struct dso *current;
4680 struct dl_phdr_info info;
4681 int ret = 0;
4682 for(current = head; current;) {
4683 info.dlpi_addr = (uintptr_t)current->base;
4684 info.dlpi_name = current->name;
4685 info.dlpi_phdr = current->phdr;
4686 info.dlpi_phnum = current->phnum;
4687 info.dlpi_adds = gencnt;
4688 info.dlpi_subs = subcnt;
4689 info.dlpi_tls_modid = current->tls_id;
4690 info.dlpi_tls_data = !current->tls_id ? 0 :
4691 __tls_get_addr((tls_mod_off_t[]){current->tls_id,0});
4692
4693 // FIXME: add dl_phdr_lock for unwind callback
4694 pthread_mutex_lock(&dl_phdr_lock);
4695 ret = (callback)(&info, sizeof (info), data);
4696 pthread_mutex_unlock(&dl_phdr_lock);
4697
4698 if (ret != 0) break;
4699 pthread_rwlock_rdlock(&lock);
4700 current = current->next;
4701 pthread_rwlock_unlock(&lock);
4702 }
4703 pthread_mutex_unlock(&dlclose_lock);
4704 return ret;
4705 }
4706
error_impl(const char * fmt,...)4707 static void error_impl(const char *fmt, ...)
4708 {
4709 va_list ap;
4710 va_start(ap, fmt);
4711 if (!runtime) {
4712 vdprintf(2, fmt, ap);
4713 dprintf(2, "\n");
4714 ldso_fail = 1;
4715 va_end(ap);
4716 return;
4717 }
4718 __dl_vseterr(fmt, ap);
4719 va_end(ap);
4720 }
4721
error_noop(const char * fmt,...)4722 static void error_noop(const char *fmt, ...)
4723 {
4724 }
4725
dlns_set_namespace_lib_path(const char * name,const char * lib_path)4726 int dlns_set_namespace_lib_path(const char * name, const char * lib_path)
4727 {
4728 if (!name || !lib_path) {
4729 LD_LOGE("dlns_set_namespace_lib_path name or lib_path is null.");
4730 return EINVAL;
4731 }
4732
4733 pthread_rwlock_wrlock(&lock);
4734 const void *caller_addr = __builtin_return_address(0);
4735 if (is_permitted(caller_addr, name) == false) {
4736 pthread_rwlock_unlock(&lock);
4737 return EPERM;
4738 }
4739
4740 ns_t* ns = find_ns_by_name(name);
4741 if (!ns) {
4742 pthread_rwlock_unlock(&lock);
4743 LD_LOGE("dlns_set_namespace_lib_path fail, input ns name : [%{public}s] is not found.", name);
4744 return ENOKEY;
4745 }
4746
4747 ns_set_lib_paths(ns, lib_path);
4748 pthread_rwlock_unlock(&lock);
4749 return 0;
4750 }
4751
dlns_set_namespace_separated(const char * name,const bool separated)4752 int dlns_set_namespace_separated(const char * name, const bool separated)
4753 {
4754 if (!name) {
4755 LD_LOGE("dlns_set_namespace_separated name is null.");
4756 return EINVAL;
4757 }
4758
4759 pthread_rwlock_wrlock(&lock);
4760 const void *caller_addr = __builtin_return_address(0);
4761 if (is_permitted(caller_addr, name) == false) {
4762 pthread_rwlock_unlock(&lock);
4763 return EPERM;
4764 }
4765
4766 ns_t* ns = find_ns_by_name(name);
4767 if (!ns) {
4768 pthread_rwlock_unlock(&lock);
4769 LD_LOGE("dlns_set_namespace_separated fail, input ns name : [%{public}s] is not found.", name);
4770 return ENOKEY;
4771 }
4772
4773 ns_set_separated(ns, separated);
4774 pthread_rwlock_unlock(&lock);
4775 return 0;
4776 }
4777
dlns_set_namespace_permitted_paths(const char * name,const char * permitted_paths)4778 int dlns_set_namespace_permitted_paths(const char * name, const char * permitted_paths)
4779 {
4780 if (!name || !permitted_paths) {
4781 LD_LOGE("dlns_set_namespace_permitted_paths name or permitted_paths is null.");
4782 return EINVAL;
4783 }
4784
4785 pthread_rwlock_wrlock(&lock);
4786 const void *caller_addr = __builtin_return_address(0);
4787 if (is_permitted(caller_addr, name) == false) {
4788 pthread_rwlock_unlock(&lock);
4789 return EPERM;
4790 }
4791
4792 ns_t* ns = find_ns_by_name(name);
4793 if (!ns) {
4794 pthread_rwlock_unlock(&lock);
4795 LD_LOGE("dlns_set_namespace_permitted_paths fail, input ns name : [%{public}s] is not found.", name);
4796 return ENOKEY;
4797 }
4798
4799 ns_set_permitted_paths(ns, permitted_paths);
4800 pthread_rwlock_unlock(&lock);
4801 return 0;
4802 }
4803
dlns_set_namespace_allowed_libs(const char * name,const char * allowed_libs)4804 int dlns_set_namespace_allowed_libs(const char * name, const char * allowed_libs)
4805 {
4806 if (!name || !allowed_libs) {
4807 LD_LOGE("dlns_set_namespace_allowed_libs name or allowed_libs is null.");
4808 return EINVAL;
4809 }
4810
4811 pthread_rwlock_wrlock(&lock);
4812 const void *caller_addr = __builtin_return_address(0);
4813 if (is_permitted(caller_addr, name) == false) {
4814 pthread_rwlock_unlock(&lock);
4815 return EPERM;
4816 }
4817
4818 ns_t* ns = find_ns_by_name(name);
4819 if (!ns) {
4820 pthread_rwlock_unlock(&lock);
4821 LD_LOGE("dlns_set_namespace_allowed_libs fail, input ns name : [%{public}s] is not found.", name);
4822 return ENOKEY;
4823 }
4824
4825 ns_set_allowed_libs(ns, allowed_libs);
4826 pthread_rwlock_unlock(&lock);
4827 return 0;
4828 }
4829
handle_asan_path_open(int fd,const char * name,ns_t * namespace,char * buf,size_t buf_size)4830 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size)
4831 {
4832 LD_LOGD("handle_asan_path_open fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4833 fd,
4834 name,
4835 namespace ? namespace->ns_name : "NULL");
4836 int fd_tmp = fd;
4837 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4838 if (namespace->lib_paths && namespace->asan_lib_paths) {
4839 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4840 char *new_lib_paths = malloc(newlen);
4841 memset(new_lib_paths, 0, newlen);
4842 strcpy(new_lib_paths, namespace->asan_lib_paths);
4843 strcat(new_lib_paths, ":");
4844 strcat(new_lib_paths, namespace->lib_paths);
4845 fd_tmp = path_open(name, new_lib_paths, buf, buf_size);
4846 LD_LOGD("handle_asan_path_open path_open new_lib_paths:%{public}s ,fd: %{public}d.", new_lib_paths, fd_tmp);
4847 free(new_lib_paths);
4848 } else if (namespace->asan_lib_paths) {
4849 fd_tmp = path_open(name, namespace->asan_lib_paths, buf, buf_size);
4850 LD_LOGD("handle_asan_path_open path_open asan_lib_paths:%{public}s ,fd: %{public}d.",
4851 namespace->asan_lib_paths,
4852 fd_tmp);
4853 } else {
4854 fd_tmp = path_open(name, namespace->lib_paths, buf, buf_size);
4855 LD_LOGD(
4856 "handle_asan_path_open path_open lib_paths:%{public}s ,fd: %{public}d.", namespace->lib_paths, fd_tmp);
4857 }
4858 }
4859 return fd_tmp;
4860 }
4861
dlopen_ext(const char * file,int mode,const dl_extinfo * extinfo)4862 void* dlopen_ext(const char *file, int mode, const dl_extinfo *extinfo)
4863 {
4864 const void *caller_addr = __builtin_return_address(0);
4865 musl_log_reset();
4866 ld_log_reset();
4867 if (extinfo != NULL) {
4868 if ((extinfo->flag & ~(DL_EXT_VALID_FLAG_BITS)) != 0) {
4869 LD_LOGE("Error dlopen_ext %{public}s: invalid flag %{public}x", file, extinfo->flag);
4870 return NULL;
4871 }
4872 }
4873 LD_LOGI("dlopen_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , extinfo->flag:%{public}x",
4874 file,
4875 mode,
4876 caller_addr,
4877 extinfo ? extinfo->flag : 0);
4878 return dlopen_impl(file, mode, NULL, caller_addr, extinfo);
4879 }
4880
4881 #ifdef LOAD_ORDER_RANDOMIZATION
open_library_by_path(const char * name,const char * s,struct loadtask * task,struct zip_info * z_info)4882 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info)
4883 {
4884 char *buf = task->buf;
4885 size_t buf_size = sizeof task->buf;
4886 size_t l;
4887 for (;;) {
4888 s += strspn(s, ":\n");
4889 l = strcspn(s, ":\n");
4890 if (l-1 >= INT_MAX) return;
4891 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
4892 char *separator = strstr(buf, ZIP_FILE_PATH_SEPARATOR);
4893 if (separator != NULL) {
4894 int res = open_uncompressed_library_in_zipfile(buf, z_info, separator);
4895 if (res == 0) {
4896 task->fd = z_info->fd;
4897 task->file_offset = z_info->file_offset;
4898 break;
4899 } else {
4900 memset(z_info->path_buf, 0, sizeof(z_info->path_buf));
4901 }
4902 } else {
4903 if ((task->fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) break;
4904 }
4905 }
4906 s += l;
4907 }
4908 return;
4909 }
4910
handle_asan_path_open_by_task(int fd,const char * name,ns_t * namespace,struct loadtask * task,struct zip_info * z_info)4911 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task,
4912 struct zip_info *z_info)
4913 {
4914 LD_LOGD("handle_asan_path_open_by_task fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4915 fd,
4916 name,
4917 namespace ? namespace->ns_name : "NULL");
4918 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4919 if (namespace->lib_paths && namespace->asan_lib_paths) {
4920 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4921 char *new_lib_paths = malloc(newlen);
4922 memset(new_lib_paths, 0, newlen);
4923 strcpy(new_lib_paths, namespace->asan_lib_paths);
4924 strcat(new_lib_paths, ":");
4925 strcat(new_lib_paths, namespace->lib_paths);
4926 open_library_by_path(name, new_lib_paths, task, z_info);
4927 LD_LOGD("handle_asan_path_open_by_task open_library_by_path new_lib_paths:%{public}s ,fd: %{public}d.",
4928 new_lib_paths,
4929 task->fd);
4930 free(new_lib_paths);
4931 } else if (namespace->asan_lib_paths) {
4932 open_library_by_path(name, namespace->asan_lib_paths, task, z_info);
4933 LD_LOGD("handle_asan_path_open_by_task open_library_by_path asan_lib_paths:%{public}s ,fd: %{public}d.",
4934 namespace->asan_lib_paths,
4935 task->fd);
4936 } else {
4937 open_library_by_path(name, namespace->lib_paths, task, z_info);
4938 LD_LOGD("handle_asan_path_open_by_task open_library_by_path lib_paths:%{public}s ,fd: %{public}d.",
4939 namespace->lib_paths,
4940 task->fd);
4941 }
4942 }
4943 return;
4944 }
4945
4946 /* Used to get an uncompress library offset in zip file, then we can use the offset to mmap the library directly. */
open_uncompressed_library_in_zipfile(const char * path,struct zip_info * z_info,char * separator)4947 int open_uncompressed_library_in_zipfile(const char *path, struct zip_info *z_info, char *separator)
4948 {
4949 struct local_file_header zip_file_header;
4950 struct central_dir_entry c_dir_entry;
4951 struct zip_end_locator end_locator;
4952
4953 /* Use "'!/' to split the path into zipfile path and library path in zipfile.
4954 * For example:
4955 * - path: x/xx/xxx.zip!/x/xx/xxx.so
4956 * - zipfile path: x/xx/xxx.zip
4957 * - library path in zipfile: x/xx/xxx.so */
4958 if (strlcpy(z_info->path_buf, path, PATH_BUF_SIZE) >= PATH_BUF_SIZE) {
4959 LD_LOGE("Open uncompressed library: input path %{public}s is too long.", path);
4960 return -1;
4961 }
4962 z_info->path_buf[separator - path] = '\0';
4963 z_info->file_path_index = separator - path + 2;
4964 char *zip_file_path = z_info->path_buf;
4965 char *lib_path = &z_info->path_buf[z_info->file_path_index];
4966 if (zip_file_path == NULL || lib_path == NULL) {
4967 LD_LOGE("Open uncompressed library: get zip and lib path failed.");
4968 return -1;
4969 }
4970 LD_LOGD("Open uncompressed library: input path: %{public}s, zip file path: %{public}s, library path: %{public}s.",
4971 path, zip_file_path, lib_path);
4972
4973 // Get zip file length
4974 FILE *zip_file = fopen(zip_file_path, "re");
4975 if (zip_file == NULL) {
4976 LD_LOGE("Open uncompressed library: fopen %{public}s failed.", zip_file_path);
4977 return -1;
4978 }
4979 if (fseek(zip_file, 0, SEEK_END) != 0) {
4980 LD_LOGE("Open uncompressed library: fseek SEEK_END failed.");
4981 fclose(zip_file);
4982 return -1;
4983 }
4984 int64_t zip_file_len = ftell(zip_file);
4985 if (zip_file_len == -1) {
4986 LD_LOGE("Open uncompressed library: get zip file length failed.");
4987 fclose(zip_file);
4988 return -1;
4989 }
4990
4991 // Read end of central directory record.
4992 size_t end_locator_len = sizeof(end_locator);
4993 size_t end_locator_pos = zip_file_len - end_locator_len;
4994 if (fseek(zip_file, end_locator_pos, SEEK_SET) != 0) {
4995 LD_LOGE("Open uncompressed library: fseek end locator position failed.");
4996 fclose(zip_file);
4997 return -1;
4998 }
4999 if (fread(&end_locator, sizeof(end_locator), 1, zip_file) != 1 || end_locator.signature != EOCD_SIGNATURE) {
5000 LD_LOGE("Open uncompressed library: fread end locator failed.");
5001 fclose(zip_file);
5002 return -1;
5003 }
5004
5005 char file_name[PATH_BUF_SIZE];
5006 uint64_t current_dir_pos = end_locator.offset;
5007 for (uint16_t i = 0; i < end_locator.total_entries; i++) {
5008 // Read central dir entry.
5009 if (fseek(zip_file, current_dir_pos, SEEK_SET) != 0) {
5010 LD_LOGE("Open uncompressed library: fseek current centra dir entry position failed.");
5011 fclose(zip_file);
5012 return -1;
5013 }
5014 if (fread(&c_dir_entry, sizeof(c_dir_entry), 1, zip_file) != 1 || c_dir_entry.signature != CENTRAL_SIGNATURE) {
5015 LD_LOGE("Open uncompressed library: fread centra dir entry failed.");
5016 fclose(zip_file);
5017 return -1;
5018 }
5019
5020 if (fread(file_name, c_dir_entry.name_size, 1, zip_file) != 1) {
5021 LD_LOGE("Open uncompressed library: fread file name failed.");
5022 fclose(zip_file);
5023 return -1;
5024 }
5025 if (strcmp(file_name, lib_path) == 0) {
5026 // Read local file header.
5027 if (fseek(zip_file, c_dir_entry.local_header_offset, SEEK_SET) != 0) {
5028 LD_LOGE("Open uncompressed library: fseek local file header failed.");
5029 fclose(zip_file);
5030 return -1;
5031 }
5032 if (fread(&zip_file_header, sizeof(zip_file_header), 1, zip_file) != 1) {
5033 LD_LOGE("Open uncompressed library: fread local file header failed.");
5034 fclose(zip_file);
5035 return -1;
5036 }
5037 if (zip_file_header.signature != LOCAL_FILE_HEADER_SIGNATURE) {
5038 LD_LOGE("Open uncompressed library: read local file header signature error.");
5039 fclose(zip_file);
5040 return -1;
5041 }
5042
5043 z_info->file_offset = c_dir_entry.local_header_offset + sizeof(zip_file_header) +
5044 zip_file_header.name_size + zip_file_header.extra_size;
5045 if (zip_file_header.compression_method != COMPRESS_STORED || z_info->file_offset % PAGE_SIZE != 0) {
5046 LD_LOGE("Open uncompressed library: open %{public}s in %{public}s failed because of misalignment or saved with compression."
5047 "compress method %{public}d, file offset %{public}lu",
5048 lib_path, zip_file_path, zip_file_header.compression_method, z_info->file_offset);
5049 fclose(zip_file);
5050 return -2;
5051 }
5052 z_info->found = true;
5053 break;
5054 }
5055
5056 memset(file_name, 0, sizeof(file_name));
5057 current_dir_pos += sizeof(c_dir_entry);
5058 current_dir_pos += c_dir_entry.name_size + c_dir_entry.extra_size + c_dir_entry.comment_size;
5059 }
5060 if (!z_info->found) {
5061 LD_LOGE("Open uncompressed library: %{public}s was not found in %{public}s.", lib_path, zip_file_path);
5062 fclose(zip_file);
5063 return -3;
5064 }
5065 z_info->fd = fileno(zip_file);
5066
5067 return 0;
5068 }
5069
task_check_xpm(struct loadtask * task)5070 static bool task_check_xpm(struct loadtask *task)
5071 {
5072 size_t mapLen = sizeof(Ehdr);
5073 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, task->fd, task->file_offset);
5074 if (map == MAP_FAILED) {
5075 LD_LOGE("Xpm check failed for %{public}s, errno for mmap is: %{public}d", task->name, errno);
5076 return false;
5077 }
5078 munmap(map, mapLen);
5079 return true;
5080 }
5081
map_library_header(struct loadtask * task)5082 static bool map_library_header(struct loadtask *task)
5083 {
5084 off_t off_start;
5085 Phdr *ph;
5086 size_t i;
5087 size_t str_size;
5088 off_t str_table;
5089 if (!task_check_xpm(task)) {
5090 return false;
5091 }
5092
5093 ssize_t l = pread(task->fd, task->ehdr_buf, sizeof task->ehdr_buf, task->file_offset);
5094 task->eh = task->ehdr_buf;
5095 if (l < 0) {
5096 LD_LOGE("Error mapping header %{public}s: failed to read fd errno: %{public}d", task->name, errno);
5097 return false;
5098 }
5099 if (l < sizeof(Ehdr) || (task->eh->e_type != ET_DYN && task->eh->e_type != ET_EXEC)) {
5100 LD_LOGE("Error mapping header %{public}s: invaliled Ehdr l=%{public}d e_type=%{public}hu",
5101 task->name, l, task->eh->e_type);
5102 goto noexec;
5103 }
5104 task->phsize = task->eh->e_phentsize * task->eh->e_phnum;
5105 if (task->phsize > sizeof task->ehdr_buf - sizeof(Ehdr)) {
5106 task->allocated_buf = malloc(task->phsize);
5107 if (!task->allocated_buf) {
5108 LD_LOGE("Error mapping header %{public}s: failed to alloc memory errno: %{public}d", task->name, errno);
5109 return false;
5110 }
5111 l = pread(task->fd, task->allocated_buf, task->phsize, task->eh->e_phoff + task->file_offset);
5112 if (l < 0) {
5113 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5114 goto error;
5115 }
5116 if (l != task->phsize) {
5117 LD_LOGE("Error mapping header %{public}s: unmatched phsize errno: %{public}d", task->name, errno);
5118 goto noexec;
5119 }
5120 ph = task->ph0 = task->allocated_buf;
5121 } else if (task->eh->e_phoff + task->phsize > l) {
5122 l = pread(task->fd, task->ehdr_buf + 1, task->phsize, task->eh->e_phoff + task->file_offset);
5123 if (l < 0) {
5124 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5125 goto error;
5126 }
5127 if (l != task->phsize) {
5128 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
5129 goto noexec;
5130 }
5131 ph = task->ph0 = (void *)(task->ehdr_buf + 1);
5132 } else {
5133 ph = task->ph0 = (void *)((char *)task->ehdr_buf + task->eh->e_phoff);
5134 }
5135
5136 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5137 if (ph->p_type == PT_DYNAMIC) {
5138 task->dyn = ph->p_vaddr;
5139 } else if (ph->p_type == PT_TLS) {
5140 task->tls_image = ph->p_vaddr;
5141 task->tls.align = ph->p_align;
5142 task->tls.len = ph->p_filesz;
5143 task->tls.size = ph->p_memsz;
5144 }
5145
5146 if (ph->p_type != PT_DYNAMIC) {
5147 continue;
5148 }
5149 // map the dynamic segment and the string table of the library
5150 off_start = ph->p_offset;
5151 off_start &= -PAGE_SIZE;
5152 task->dyn_map_len = ph->p_memsz + (ph->p_offset - off_start);
5153 /* The default value of file_offset is 0.
5154 * The value of file_offset may be greater than 0 when opening library from zip file.
5155 * The value of file_offset ensures PAGE_SIZE aligned. */
5156 task->dyn_map = mmap(0, task->dyn_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5157 if (task->dyn_map == MAP_FAILED) {
5158 LD_LOGE("Error mapping header %{public}s: failed to map dynamic section errno: %{public}d", task->name, errno);
5159 goto error;
5160 }
5161 task->dyn_addr = (size_t *)((unsigned char *)task->dyn_map + (ph->p_offset - off_start));
5162 size_t dyn_tmp;
5163 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRTAB)) {
5164 str_table = dyn_tmp;
5165 } else {
5166 LD_LOGE("Error mapping header %{public}s: DT_STRTAB not found", task->name);
5167 goto error;
5168 }
5169 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRSZ)) {
5170 str_size = dyn_tmp;
5171 } else {
5172 LD_LOGE("Error mapping header %{public}s: DT_STRSZ not found", task->name);
5173 goto error;
5174 }
5175 }
5176
5177 task->shsize = task->eh->e_shentsize * task->eh->e_shnum;
5178 off_start = task->eh->e_shoff;
5179 off_start &= -PAGE_SIZE;
5180 task->shsize += task->eh->e_shoff - off_start;
5181 task->shdr_allocated_buf = mmap(0, task->shsize, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5182 if (task->shdr_allocated_buf == MAP_FAILED) {
5183 LD_LOGE("Error mapping section header %{public}s: failed to map shdr_allocated_buf errno: %{public}d",
5184 task->name, errno);
5185 goto error;
5186 }
5187 Shdr *sh = (char *)task->shdr_allocated_buf + task->eh->e_shoff - off_start;
5188 for (i = task->eh->e_shnum; i; i--, sh = (void *)((char *)sh + task->eh->e_shentsize)) {
5189 if (sh->sh_type != SHT_STRTAB || sh->sh_addr != str_table || sh->sh_size != str_size) {
5190 continue;
5191 }
5192 off_start = sh->sh_offset;
5193 off_start &= -PAGE_SIZE;
5194 task->str_map_len = sh->sh_size + (sh->sh_offset - off_start);
5195 task->str_map = mmap(0, task->str_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5196 if (task->str_map == MAP_FAILED) {
5197 LD_LOGE("Error mapping section header %{public}s: failed to map string section errno: %{public}d",
5198 task->name, errno);
5199 goto error;
5200 }
5201 task->str_addr = (char *)task->str_map + sh->sh_offset - off_start;
5202 break;
5203 }
5204 if (!task->dyn) {
5205 LD_LOGE("Error mapping header %{public}s: dynamic section not found", task->name);
5206 goto noexec;
5207 }
5208 return true;
5209 noexec:
5210 errno = ENOEXEC;
5211 error:
5212 free(task->allocated_buf);
5213 task->allocated_buf = NULL;
5214 if (task->shdr_allocated_buf != MAP_FAILED) {
5215 munmap(task->shdr_allocated_buf, task->shsize);
5216 task->shdr_allocated_buf = MAP_FAILED;
5217 }
5218 return false;
5219 }
5220
task_map_library(struct loadtask * task,struct reserved_address_params * reserved_params)5221 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5222 {
5223 size_t addr_min = SIZE_MAX, addr_max = 0, map_len;
5224 size_t this_min, this_max;
5225 size_t nsegs = 0;
5226 off_t off_start;
5227 Phdr *ph = task->ph0;
5228 unsigned prot;
5229 unsigned char *map = MAP_FAILED, *base;
5230 size_t i;
5231 int map_flags = MAP_PRIVATE;
5232 size_t start_addr;
5233 size_t start_alignment = PAGE_SIZE;
5234 bool hugepage_enabled = false;
5235
5236 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5237 if (ph->p_type == PT_GNU_RELRO) {
5238 task->p->relro_start = ph->p_vaddr & -PAGE_SIZE;
5239 task->p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
5240 } else if (ph->p_type == PT_GNU_STACK) {
5241 if (!runtime && ph->p_memsz > __default_stacksize) {
5242 __default_stacksize =
5243 ph->p_memsz < DEFAULT_STACK_MAX ?
5244 ph->p_memsz : DEFAULT_STACK_MAX;
5245 }
5246 }
5247 if (ph->p_type != PT_LOAD) {
5248 continue;
5249 }
5250 nsegs++;
5251 if (ph->p_vaddr < addr_min) {
5252 addr_min = ph->p_vaddr;
5253 off_start = ph->p_offset;
5254 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5255 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5256 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5257 }
5258 if (ph->p_vaddr + ph->p_memsz > addr_max) {
5259 addr_max = ph->p_vaddr + ph->p_memsz;
5260 }
5261 }
5262 if (!task->dyn) {
5263 LD_LOGE("Error mapping library: !task->dyn dynamic section not found task->name=%{public}s", task->name);
5264 goto noexec;
5265 }
5266 if (DL_FDPIC && !(task->eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
5267 task->p->loadmap = calloc(1, sizeof(struct fdpic_loadmap) + nsegs * sizeof(struct fdpic_loadseg));
5268 if (!task->p->loadmap) {
5269 LD_LOGE("Error mapping library: calloc failed errno=%{public}d nsegs=%{public}d", errno, nsegs);
5270 goto error;
5271 }
5272 task->p->loadmap->nsegs = nsegs;
5273 for (ph = task->ph0, i = 0; i < nsegs; ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5274 if (ph->p_type != PT_LOAD) {
5275 continue;
5276 }
5277 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5278 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5279 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5280 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1),
5281 prot, MAP_PRIVATE,
5282 task->fd, ph->p_offset & -PAGE_SIZE + task->file_offset);
5283 if (map == MAP_FAILED) {
5284 unmap_library(task->p);
5285 LD_LOGE("Error mapping library: PT_LOAD mmap failed task->name=%{public}s errno=%{public}d map_len=%{public}d",
5286 task->name, errno, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1));
5287 goto error;
5288 }
5289 task->p->loadmap->segs[i].addr = (size_t)map +
5290 (ph->p_vaddr & PAGE_SIZE - 1);
5291 task->p->loadmap->segs[i].p_vaddr = ph->p_vaddr;
5292 task->p->loadmap->segs[i].p_memsz = ph->p_memsz;
5293 i++;
5294 if (prot & PROT_WRITE) {
5295 size_t brk = (ph->p_vaddr & PAGE_SIZE - 1) + ph->p_filesz;
5296 size_t pgbrk = (brk + PAGE_SIZE - 1) & -PAGE_SIZE;
5297 size_t pgend = (brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
5298 if (pgend > pgbrk && mmap_fixed(map + pgbrk,
5299 pgend - pgbrk, prot,
5300 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5301 -1, off_start) == MAP_FAILED)
5302 LD_LOGE("Error mapping library: PROT_WRITE mmap_fixed failed errno=%{public}d", errno);
5303 goto error;
5304 memset(map + brk, 0, pgbrk - brk);
5305 }
5306 }
5307 map = (void *)task->p->loadmap->segs[0].addr;
5308 map_len = 0;
5309 goto done_mapping;
5310 }
5311 addr_max += PAGE_SIZE - 1;
5312 addr_max &= -PAGE_SIZE;
5313 addr_min &= -PAGE_SIZE;
5314 off_start &= -PAGE_SIZE;
5315 map_len = addr_max - addr_min + off_start;
5316 start_addr = addr_min;
5317
5318 hugepage_enabled = get_transparent_hugepages_supported();
5319 if (hugepage_enabled) {
5320 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(task->ph0, task->eh->e_phnum);
5321
5322 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
5323 }
5324
5325 if (reserved_params) {
5326 if (map_len > reserved_params->reserved_size) {
5327 if (reserved_params->must_use_reserved) {
5328 LD_LOGE("Error mapping library: map len is larger than reserved address task->name=%{public}s", task->name);
5329 goto error;
5330 }
5331 } else {
5332 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
5333 map_flags |= MAP_FIXED;
5334 }
5335 }
5336
5337 /* we will find a mapping_align aligned address as the start of dso
5338 * so we need a tmp_map_len as map_len + mapping_align to make sure
5339 * we have enough space to shift the dso to the correct location. */
5340 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
5341 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
5342
5343 /* map the whole load segments with PROT_READ first for security consideration. */
5344 prot = PROT_READ;
5345
5346 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
5347 if (reserved_params) {
5348 map = DL_NOMMU_SUPPORT
5349 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5350 : mmap((void *)start_addr, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
5351 if (map == MAP_FAILED) {
5352 LD_LOGE("Error mapping library: reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5353 " task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
5354 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5355 goto error;
5356 }
5357 if (reserved_params && map_len < reserved_params->reserved_size) {
5358 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
5359 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
5360 }
5361 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
5362 } else {
5363 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
5364 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
5365 if (temp_map == MAP_FAILED) {
5366 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d tmp_map_len=%{public}d",
5367 errno, tmp_map_len);
5368 goto error;
5369 }
5370
5371 /* find the mapping_align aligned address */
5372 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
5373
5374 map = DL_NOMMU_SUPPORT
5375 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5376 /* use map_len to mmap correct space for the dso with file mapping */
5377 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, task->fd, off_start + task->file_offset);
5378 if (map == MAP_FAILED || map != real_map) {
5379 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5380 "task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
5381 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5382 goto error;
5383 }
5384
5385 /* Free unused memory.
5386 * |--------------------------tmp_map_len--------------------------|
5387 * ^ ^ ^ ^
5388 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
5389 * temp_map real_map(aligned) temp_map_end
5390 */
5391 unsigned char *temp_map_end = temp_map + tmp_map_len;
5392 size_t unused_part_1 = real_map - temp_map;
5393 size_t unused_part_2 = temp_map_end - (real_map + map_len);
5394 if (unused_part_1 > 0) {
5395 int res1 = munmap(temp_map, unused_part_1);
5396 if (res1 == -1) {
5397 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
5398 }
5399 }
5400
5401 if (unused_part_2 > 0) {
5402 int res2 = munmap(real_map + map_len, unused_part_2);
5403 if (res2 == -1) {
5404 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
5405 }
5406 }
5407 }
5408 task->p->map = map;
5409 task->p->map_len = map_len;
5410 /* If the loaded file is not relocatable and the requested address is
5411 * not available, then the load operation must fail. */
5412 if (task->eh->e_type != ET_DYN && addr_min && map != (void *)addr_min) {
5413 LD_LOGE("Error mapping library: ET_DYN task->name=%{public}s", task->name);
5414 errno = EBUSY;
5415 goto error;
5416 }
5417 base = map - addr_min;
5418 task->p->phdr = 0;
5419 task->p->phnum = 0;
5420 for (ph = task->ph0, i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5421 if (ph->p_type == PT_OHOS_RANDOMDATA) {
5422 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
5423 continue;
5424 }
5425 if (ph->p_type != PT_LOAD) {
5426 continue;
5427 }
5428 /* Check if the programs headers are in this load segment, and
5429 * if so, record the address for use by dl_iterate_phdr. */
5430 if (!task->p->phdr && task->eh->e_phoff >= ph->p_offset
5431 && task->eh->e_phoff + task->phsize <= ph->p_offset + ph->p_filesz) {
5432 task->p->phdr = (void *)(base + ph->p_vaddr + (task->eh->e_phoff - ph->p_offset));
5433 task->p->phnum = task->eh->e_phnum;
5434 task->p->phentsize = task->eh->e_phentsize;
5435 }
5436 this_min = ph->p_vaddr & -PAGE_SIZE;
5437 this_max = ph->p_vaddr + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5438 off_start = ph->p_offset & -PAGE_SIZE;
5439 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5440 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5441 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5442 /* Reuse the existing mapping for the lowest-address LOAD */
5443 if (mmap_fixed(
5444 base + this_min,
5445 this_max - this_min,
5446 prot, MAP_PRIVATE | MAP_FIXED,
5447 task->fd,
5448 off_start + task->file_offset) == MAP_FAILED) {
5449 LD_LOGE("Error mapping library: mmap fix failed task->name=%{public}s errno=%{public}d", task->name, errno);
5450 goto error;
5451 }
5452 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
5453 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
5454 if (ph->p_memsz > ph->p_filesz && (ph->p_flags & PF_W)) {
5455 size_t brk = (size_t)base + ph->p_vaddr + ph->p_filesz;
5456 size_t pgbrk = brk + PAGE_SIZE - 1 & -PAGE_SIZE;
5457 size_t zeromap_size = (size_t)base + this_max - pgbrk;
5458 memset((void *)brk, 0, pgbrk - brk & PAGE_SIZE - 1);
5459 if (pgbrk - (size_t)base < this_max && mmap_fixed(
5460 (void *)pgbrk,
5461 zeromap_size,
5462 prot,
5463 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5464 -1,
5465 0) == MAP_FAILED) {
5466 LD_LOGE("Error mapping library: PF_W mmap fix failed errno=%{public}d task->name=%{public}s zeromap_size=%{public}d",
5467 errno, task->name, zeromap_size);
5468 goto error;
5469 }
5470 set_bss_vma_name(task->p->name, (void *)pgbrk, zeromap_size);
5471 }
5472 }
5473 for (i = 0; ((size_t *)(base + task->dyn))[i]; i += NEXT_DYNAMIC_INDEX) {
5474 if (((size_t *)(base + task->dyn))[i] == DT_TEXTREL) {
5475 if (mprotect(map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC) && errno != ENOSYS) {
5476 LD_LOGE("Error mapping library: mprotect failed task->name=%{public}s errno=%{public}d", task->name, errno);
5477 goto error;
5478 }
5479 break;
5480 }
5481 }
5482 done_mapping:
5483 #ifdef USE_ENCAPS
5484 clock_gettime(CLOCK_MONOTONIC, &encaps_time_start);
5485 (void)is_section_exist(task->eh, sizeof(Ehdr), task->fd, ".kernelpermission");
5486 clock_gettime(CLOCK_MONOTONIC, &encaps_time_end);
5487 encpas_cost_time = (encaps_time_end.tv_sec - encaps_time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
5488 + (encaps_time_end.tv_nsec - encaps_time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
5489 #endif
5490 task->p->base = base;
5491 task->p->dynv = laddr(task->p, task->dyn);
5492 if (task->p->tls.size) {
5493 task->p->tls.image = laddr(task->p, task->tls_image);
5494 }
5495 free(task->allocated_buf);
5496 task->allocated_buf = NULL;
5497 if (task->shdr_allocated_buf != MAP_FAILED) {
5498 munmap(task->shdr_allocated_buf, task->shsize);
5499 task->shdr_allocated_buf = MAP_FAILED;
5500 }
5501 return true;
5502 noexec:
5503 errno = ENOEXEC;
5504 error:
5505 if (map != MAP_FAILED) {
5506 unmap_library(task->p);
5507 }
5508 free(task->allocated_buf);
5509 task->allocated_buf = NULL;
5510 if (task->shdr_allocated_buf != MAP_FAILED) {
5511 munmap(task->shdr_allocated_buf, task->shsize);
5512 task->shdr_allocated_buf = MAP_FAILED;
5513 }
5514 return false;
5515 }
5516
resolve_fd_to_realpath(struct loadtask * task)5517 static bool resolve_fd_to_realpath(struct loadtask *task)
5518 {
5519 char proc_self_fd[32];
5520 static char resolved_path[PATH_MAX];
5521
5522 int ret = snprintf(proc_self_fd, sizeof(proc_self_fd), "/proc/self/fd/%d", task->fd);
5523 if (ret < 0 || ret >= sizeof(proc_self_fd)) {
5524 return false;
5525 }
5526 ssize_t len = readlink(proc_self_fd, resolved_path, sizeof(resolved_path) - 1);
5527 if (len < 0) {
5528 return false;
5529 }
5530 resolved_path[len] = '\0';
5531 strncpy(task->buf, resolved_path, PATH_MAX);
5532
5533 return true;
5534 }
5535
load_library_header(struct loadtask * task)5536 static bool load_library_header(struct loadtask *task)
5537 {
5538 const char *name = task->name;
5539 struct dso *needed_by = task->needed_by;
5540 ns_t *namespace = task->namespace;
5541 bool check_inherited = task->check_inherited;
5542 struct zip_info z_info;
5543
5544 bool map = false;
5545 struct stat st;
5546 size_t alloc_size;
5547 int n_th = 0;
5548 int is_self = 0;
5549
5550 if (!*name) {
5551 errno = EINVAL;
5552 return false;
5553 }
5554
5555 /* Catch and block attempts to reload the implementation itself */
5556 if (name[NAME_INDEX_ZERO] == 'l' && name[NAME_INDEX_ONE] == 'i' && name[NAME_INDEX_TWO] == 'b') {
5557 static const char reserved[] =
5558 "c.pthread.rt.m.dl.util.xnet.";
5559 const char *rp, *next;
5560 for (rp = reserved; *rp; rp = next) {
5561 next = strchr(rp, '.') + 1;
5562 if (strncmp(name + NAME_INDEX_THREE, rp, next - rp) == 0) {
5563 break;
5564 }
5565 }
5566 if (*rp) {
5567 if (ldd_mode) {
5568 /* Track which names have been resolved
5569 * and only report each one once. */
5570 static unsigned reported;
5571 unsigned mask = 1U << (rp - reserved);
5572 if (!(reported & mask)) {
5573 reported |= mask;
5574 dprintf(1, "\t%s => %s (%p)\n",
5575 name, ldso.name,
5576 ldso.base);
5577 }
5578 }
5579 is_self = 1;
5580 }
5581 }
5582 if (!strcmp(name, ldso.name)) {
5583 is_self = 1;
5584 }
5585 if (is_self) {
5586 if (!ldso.prev) {
5587 tail->next = &ldso;
5588 ldso.prev = tail;
5589 tail = &ldso;
5590 ldso.namespace = namespace;
5591 ns_add_dso(namespace, &ldso);
5592 }
5593 task->isloaded = true;
5594 task->p = &ldso;
5595 return true;
5596 }
5597 if (strchr(name, '/')) {
5598 char *separator = strstr(name, ZIP_FILE_PATH_SEPARATOR);
5599 if (separator != NULL) {
5600 int res = open_uncompressed_library_in_zipfile(name, &z_info, separator);
5601 if (!res) {
5602 task->pathname = name;
5603 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5604 LD_LOGE("Open uncompressed library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5605 task->pathname, namespace ? namespace->ns_name : "NULL");
5606 task->fd = -1;
5607 } else {
5608 task->fd = z_info.fd;
5609 task->file_offset = z_info.file_offset;
5610 }
5611 } else {
5612 LD_LOGE("Open uncompressed library in zip file failed, name:%{public}s res:%{public}d", name, res);
5613 return false;
5614 }
5615 } else {
5616 task->pathname = name;
5617 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5618 LD_LOGE("Open absolute_path library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5619 task->pathname, namespace ? namespace->ns_name : "NULL");
5620 task->fd = -1;
5621 } else {
5622 task->fd = open(name, O_RDONLY | O_CLOEXEC);
5623 }
5624 }
5625 } else {
5626 /* Search for the name to see if it's already loaded */
5627 /* Search in namespace */
5628 task->p = find_library_by_name(name, namespace, check_inherited);
5629 if (task->p) {
5630 task->isloaded = true;
5631 LD_LOGD("find_library_by_name(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5632 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5633 return true;
5634 }
5635 if (strlen(name) > NAME_MAX) {
5636 LD_LOGE("load_library name length is larger than NAME_MAX:%{public}s.", name);
5637 return false;
5638 }
5639 task->fd = -1;
5640 if (namespace->env_paths) {
5641 open_library_by_path(name, namespace->env_paths, task, &z_info);
5642 }
5643 for (task->p = needed_by; task->fd == -1 && task->p; task->p = task->p->needed_by) {
5644 if (fixup_rpath(task->p, task->buf, sizeof task->buf) < 0) {
5645 task->fd = INVALID_FD_INHIBIT_FURTHER_SEARCH; /* Inhibit further search. */
5646 }
5647 if (task->p->rpath) {
5648 open_library_by_path(name, task->p->rpath, task, &z_info);
5649 if (task->fd != -1 && resolve_fd_to_realpath(task)) {
5650 if (!is_accessible(namespace, task->buf, g_is_asan, check_inherited)) {
5651 LD_LOGE("Open library: check ns accessible failed, name %{public}s namespace %{public}s.",
5652 name, namespace ? namespace->ns_name : "NULL");
5653 close(task->fd);
5654 task->fd = -1;
5655 }
5656 }
5657 }
5658 }
5659 if (g_is_asan) {
5660 handle_asan_path_open_by_task(task->fd, name, namespace, task, &z_info);
5661 LD_LOGD("load_library handle_asan_path_open_by_task fd:%{public}d.", task->fd);
5662 } else {
5663 if (task->fd == -1 && namespace->lib_paths) {
5664 open_library_by_path(name, namespace->lib_paths, task, &z_info);
5665 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", task->fd);
5666 }
5667 }
5668 task->pathname = task->buf;
5669 }
5670 if (task->fd < 0) {
5671 if (!check_inherited || !namespace->ns_inherits) {
5672 LD_LOGE("Error loading header %{public}s, namespace %{public}s has no inherits, errno=%{public}d",
5673 task->name, namespace->ns_name, errno);
5674 return false;
5675 }
5676 /* Load lib in inherited namespace. Do not check inherited again.*/
5677 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
5678 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
5679 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) {
5680 continue;
5681 }
5682 task->namespace = inherit->inherited_ns;
5683 task->check_inherited = false;
5684 if (load_library_header(task)) {
5685 return true;
5686 }
5687 }
5688 LD_LOGE("Error loading header: can't find library %{public}s in namespace: %{public}s",
5689 task->name, namespace->ns_name);
5690 return false;
5691 }
5692
5693 if (fstat(task->fd, &st) < 0) {
5694 LD_LOGE("Error loading header %{public}s: failed to get file state errno=%{public}d", task->name, errno);
5695 close(task->fd);
5696 task->fd = -1;
5697 return false;
5698 }
5699 /* Search in namespace */
5700 task->p = find_library_by_fstat(&st, namespace, check_inherited, task->file_offset);
5701 if (task->p) {
5702 /* If this library was previously loaded with a
5703 * pathname but a search found the same inode,
5704 * setup its shortname so it can be found by name. */
5705 if (!task->p->shortname && task->pathname != name) {
5706 task->p->shortname = strrchr(task->p->name, '/') + 1;
5707 }
5708 close(task->fd);
5709 task->fd = -1;
5710 task->isloaded = true;
5711 LD_LOGD("find_library_by_fstat(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5712 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5713 return true;
5714 }
5715
5716 map = noload ? 0 : map_library_header(task);
5717 if (!map) {
5718 LD_LOGE("Error loading header %{public}s: failed to map header", task->name);
5719 close(task->fd);
5720 task->fd = -1;
5721 return false;
5722 }
5723
5724 /* Allocate storage for the new DSO. When there is TLS, this
5725 * storage must include a reservation for all pre-existing
5726 * threads to obtain copies of both the new TLS, and an
5727 * extended DTV capable of storing an additional slot for
5728 * the newly-loaded DSO. */
5729 alloc_size = sizeof(struct dso) + strlen(task->pathname) + 1;
5730 if (runtime && task->tls.size) {
5731 size_t per_th = task->tls.size + task->tls.align + sizeof(void *) * (tls_cnt + TLS_CNT_INCREASE);
5732 n_th = libc.threads_minus_1 + 1;
5733 if (n_th > SSIZE_MAX / per_th) {
5734 alloc_size = SIZE_MAX;
5735 } else {
5736 alloc_size += n_th * per_th;
5737 }
5738 }
5739 task->p = calloc(1, alloc_size);
5740 if (!task->p) {
5741 LD_LOGE("Error loading header %{public}s: failed to allocate dso", task->name);
5742 close(task->fd);
5743 task->fd = -1;
5744 return false;
5745 }
5746 task->p->dev = st.st_dev;
5747 task->p->ino = st.st_ino;
5748 task->p->file_offset = task->file_offset;
5749 task->p->needed_by = needed_by;
5750 task->p->name = task->p->buf;
5751 strcpy(task->p->name, task->pathname);
5752 task->p->tls = task->tls;
5753 task->p->dynv = task->dyn_addr;
5754 task->p->strings = task->str_addr;
5755 size_t rpath_offset;
5756 size_t runpath_offset;
5757 if (search_vec(task->p->dynv, &rpath_offset, DT_RPATH))
5758 task->p->rpath_orig = task->p->strings + rpath_offset;
5759 if (search_vec(task->p->dynv, &runpath_offset, DT_RUNPATH))
5760 task->p->rpath_orig = task->p->strings + runpath_offset;
5761
5762 /* Add a shortname only if name arg was not an explicit pathname. */
5763 if (task->pathname != name) {
5764 task->p->shortname = strrchr(task->p->name, '/') + 1;
5765 }
5766
5767 if (task->p->tls.size) {
5768 task->p->tls_id = ++tls_cnt;
5769 task->p->new_dtv = (void *)(-sizeof(size_t) &
5770 (uintptr_t)(task->p->name + strlen(task->p->name) + sizeof(size_t)));
5771 task->p->new_tls = (void *)(task->p->new_dtv + n_th * (tls_cnt + 1));
5772 }
5773
5774 tail->next = task->p;
5775 task->p->prev = tail;
5776 tail = task->p;
5777
5778 /* Add dso to namespace */
5779 task->p->namespace = namespace;
5780 ns_add_dso(namespace, task->p);
5781 return true;
5782 }
5783
task_load_library(struct loadtask * task,struct reserved_address_params * reserved_params)5784 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5785 {
5786 LD_LOGD("load_library loading ns=%{public}s name=%{public}s by_dlopen=%{public}d", task->namespace->ns_name, task->p->name, runtime);
5787 bool map = noload ? 0 : task_map_library(task, reserved_params);
5788 __close(task->fd);
5789 task->fd = -1;
5790 if (!map) {
5791 LD_LOGE("Error loading library %{public}s: failed to map library noload=%{public}d errno=%{public}d",
5792 task->name, noload, errno);
5793 error("Error loading library %s: failed to map library noload=%d errno=%d", task->name, noload, errno);
5794 if (runtime) {
5795 longjmp(*rtld_fail, 1);
5796 }
5797 return;
5798 };
5799
5800 /* Avoid the danger of getting two versions of libc mapped into the
5801 * same process when an absolute pathname was used. The symbols
5802 * checked are chosen to catch both musl and glibc, and to avoid
5803 * false positives from interposition-hack libraries. */
5804 decode_dyn(task->p);
5805 if (find_sym(task->p, "__libc_start_main", 1).sym &&
5806 find_sym(task->p, "stdin", 1).sym) {
5807 do_dlclose(task->p, 0);
5808 task->p = NULL;
5809 free((void*)task->name);
5810 task->name = ld_strdup("libc.so");
5811 task->check_inherited = true;
5812 if (!load_library_header(task)) {
5813 LD_LOGE("Error loading library %{public}s: failed to load libc.so", task->name);
5814 error("Error loading library %s: failed to load libc.so", task->name);
5815 if (runtime) {
5816 longjmp(*rtld_fail, 1);
5817 }
5818 }
5819 return;
5820 }
5821 /* Past this point, if we haven't reached runtime yet, ldso has
5822 * committed either to use the mapped library or to abort execution.
5823 * Unmapping is not possible, so we can safely reclaim gaps. */
5824 if (!runtime) {
5825 reclaim_gaps(task->p);
5826 }
5827 task->p->runtime_loaded = runtime;
5828 if (runtime)
5829 task->p->by_dlopen = 1;
5830
5831 ++gencnt;
5832
5833 if (DL_FDPIC) {
5834 makefuncdescs(task->p);
5835 }
5836
5837 if (ldd_mode) {
5838 dprintf(1, "\t%s => %s (%p)\n", task->name, task->pathname, task->p->base);
5839 }
5840
5841 #ifdef ENABLE_HWASAN
5842 if (libc.load_hook) {
5843 libc.load_hook((long unsigned int)task->p->base, task->p->phdr, task->p->phnum);
5844 }
5845 #endif
5846 }
5847
preload_direct_deps(struct dso * p,ns_t * namespace,struct loadtasks * tasks)5848 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks)
5849 {
5850 size_t i, cnt = 0;
5851 if (p->deps) {
5852 return;
5853 }
5854 /* For head, all preloads are direct pseudo-dependencies.
5855 * Count and include them now to avoid realloc later. */
5856 if (p == head) {
5857 for (struct dso *q = p->next; q; q = q->next) {
5858 cnt++;
5859 }
5860 }
5861 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5862 if (p->dynv[i] == DT_NEEDED) {
5863 cnt++;
5864 }
5865 }
5866 /* Use builtin buffer for apps with no external deps, to
5867 * preserve property of no runtime failure paths. */
5868 p->deps = (p == head && cnt < MIN_DEPS_COUNT) ? builtin_deps :
5869 calloc(cnt + 1, sizeof *p->deps);
5870 if (!p->deps) {
5871 LD_LOGE("Error loading dependencies for %{public}s", p->name);
5872 error("Error loading dependencies for %s", p->name);
5873 if (runtime) {
5874 longjmp(*rtld_fail, 1);
5875 }
5876 }
5877 cnt = 0;
5878 if (p == head) {
5879 for (struct dso *q = p->next; q; q = q->next) {
5880 p->deps[cnt++] = q;
5881 }
5882 }
5883 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5884 if (p->dynv[i] != DT_NEEDED) {
5885 continue;
5886 }
5887 const char* dtneed_name = p->strings + p->dynv[i + 1];
5888 LD_LOGD("load_library %{public}s adding DT_NEEDED task %{public}s namespace(%{public}s)", p->name, dtneed_name, namespace->ns_name);
5889 struct loadtask *task = create_loadtask(dtneed_name, p, namespace, true);
5890 if (!task) {
5891 LD_LOGE("Error loading dependencies %{public}s : create load task failed", p->name);
5892 error("Error loading dependencies for %s : create load task failed", p->name);
5893 if (runtime) {
5894 longjmp(*rtld_fail, 1);
5895 }
5896 continue;
5897 }
5898 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
5899 if (!load_library_header(task)) {
5900 free_task(task);
5901 task = NULL;
5902 LD_LOGE("Error loading shared library %{public}s: (needed by %{public}s)",
5903 p->strings + p->dynv[i + 1],
5904 p->name);
5905 error("Error loading shared library %s: %m (needed by %s)",
5906 p->strings + p->dynv[i + 1], p->name);
5907 if (runtime) {
5908 longjmp(*rtld_fail, 1);
5909 }
5910 continue;
5911 }
5912 p->deps[cnt++] = task->p;
5913 if (task->isloaded) {
5914 free_task(task);
5915 task = NULL;
5916 } else {
5917 append_loadtasks(tasks, task);
5918 }
5919 }
5920 p->deps[cnt] = 0;
5921 p->ndeps_direct = cnt;
5922 for (i = 0; i < p->ndeps_direct; i++) {
5923 add_dso_parent(p->deps[i], p);
5924 }
5925 }
5926
unmap_preloaded_sections(struct loadtasks * tasks)5927 static void unmap_preloaded_sections(struct loadtasks *tasks)
5928 {
5929 struct loadtask *task = NULL;
5930 for (size_t i = 0; i < tasks->length; i++) {
5931 task = get_loadtask(tasks, i);
5932 if (!task) {
5933 continue;
5934 }
5935 if (task->dyn_map_len) {
5936 munmap(task->dyn_map, task->dyn_map_len);
5937 task->dyn_map = NULL;
5938 task->dyn_map_len = 0;
5939 if (task->p) {
5940 task->p->dynv = NULL;
5941 }
5942 }
5943 if (task->str_map_len) {
5944 munmap(task->str_map, task->str_map_len);
5945 task->str_map = NULL;
5946 task->str_map_len = 0;
5947 if (task->p) {
5948 task->p->strings = NULL;
5949 }
5950 }
5951 }
5952 }
5953
preload_deps(struct dso * p,struct loadtasks * tasks)5954 static void preload_deps(struct dso *p, struct loadtasks *tasks)
5955 {
5956 if (p->deps) {
5957 return;
5958 }
5959 for (; p; p = p->next) {
5960 preload_direct_deps(p, p->namespace, tasks);
5961 }
5962 }
5963
run_loadtasks(struct loadtasks * tasks,struct reserved_address_params * reserved_params)5964 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params)
5965 {
5966 struct loadtask *task = NULL;
5967 bool reserved_address = false;
5968 for (size_t i = 0; i < tasks->length; i++) {
5969 task = get_loadtask(tasks, i);
5970 if (task) {
5971 if (reserved_params) {
5972 reserved_address = reserved_params->reserved_address_recursive || (reserved_params->target == task->p);
5973 }
5974 task_load_library(task, reserved_address ? reserved_params : NULL);
5975 }
5976 }
5977 }
5978
assign_tls(struct dso * p)5979 UT_STATIC void assign_tls(struct dso *p)
5980 {
5981 while (p) {
5982 if (p->tls.image) {
5983 tls_align = MAXP2(tls_align, p->tls.align);
5984 #ifdef TLS_ABOVE_TP
5985 p->tls.offset = tls_offset + ((p->tls.align - 1) &
5986 (-tls_offset + (uintptr_t)p->tls.image));
5987 tls_offset = p->tls.offset + p->tls.size;
5988 #else
5989 tls_offset += p->tls.size + p->tls.align - 1;
5990 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
5991 & (p->tls.align - 1);
5992 p->tls.offset = tls_offset;
5993 #endif
5994 if (tls_tail) {
5995 tls_tail->next = &p->tls;
5996 } else {
5997 libc.tls_head = &p->tls;
5998 }
5999 tls_tail = &p->tls;
6000 }
6001
6002 p = p->next;
6003 }
6004 }
6005
load_preload(char * s,ns_t * ns,struct loadtasks * tasks)6006 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks)
6007 {
6008 int tmp;
6009 char *z;
6010
6011 struct loadtask *task = NULL;
6012 for (z = s; *z; s = z) {
6013 for (; *s && (isspace(*s) || *s == ':'); s++) {
6014 ;
6015 }
6016 for (z = s; *z && !isspace(*z) && *z != ':'; z++) {
6017 ;
6018 }
6019 tmp = *z;
6020 *z = 0;
6021 task = create_loadtask(s, NULL, ns, true);
6022 if (!task) {
6023 continue;
6024 }
6025 if (load_library_header(task)) {
6026 if (!task->isloaded) {
6027 append_loadtasks(tasks, task);
6028 task = NULL;
6029 }
6030 }
6031 if (task) {
6032 free_task(task);
6033 }
6034 *z = tmp;
6035 }
6036 }
6037 #endif
6038
serialize_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6039 static int serialize_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6040 {
6041 ssize_t count = dso->relro_end - dso->relro_start;
6042 ssize_t offset = 0;
6043 while (count > 0) {
6044 ssize_t write_size = TEMP_FAILURE_RETRY(write(fd, laddr(dso, dso->relro_start + offset), count));
6045 if (-1 == write_size) {
6046 LD_LOGE("Error serializing relro %{public}s: failed to write GNU_RELRO", dso->name);
6047 return -1;
6048 }
6049 offset += write_size;
6050 count -= write_size;
6051 }
6052
6053 ssize_t size = dso->relro_end - dso->relro_start;
6054 void *map = mmap(
6055 laddr(dso, dso->relro_start),
6056 size,
6057 PROT_READ,
6058 MAP_PRIVATE | MAP_FIXED,
6059 fd,
6060 *file_offset);
6061 if (map == MAP_FAILED) {
6062 LD_LOGE("Error serializing relro %{public}s: failed to map GNU_RELRO", dso->name);
6063 return -1;
6064 }
6065 *file_offset += size;
6066 return 0;
6067 }
6068
map_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6069 static int map_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6070 {
6071 ssize_t ext_fd_file_size = 0;
6072 struct stat ext_fd_file_stat;
6073 if (TEMP_FAILURE_RETRY(fstat(fd, &ext_fd_file_stat)) != 0) {
6074 LD_LOGE("Error mapping relro %{public}s: failed to get file state", dso->name);
6075 return -1;
6076 }
6077 ext_fd_file_size = ext_fd_file_stat.st_size;
6078
6079 void *ext_temp_map = MAP_FAILED;
6080 ext_temp_map = mmap(NULL, ext_fd_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
6081 if (ext_temp_map == MAP_FAILED) {
6082 LD_LOGE("Error mapping relro %{public}s: failed to map fd", dso->name);
6083 return -1;
6084 }
6085
6086 char *file_base = (char *)(ext_temp_map) + *file_offset;
6087 char *mem_base = (char *)(laddr(dso, dso->relro_start));
6088 ssize_t start_offset = 0;
6089 ssize_t size = dso->relro_end - dso->relro_start;
6090
6091 if (size > ext_fd_file_size - *file_offset) {
6092 LD_LOGE("Error mapping relro %{public}s: invalid file size", dso->name);
6093 return -1;
6094 }
6095 while (start_offset < size) {
6096 // Find start location.
6097 while (start_offset < size) {
6098 if (memcmp(mem_base + start_offset, file_base + start_offset, PAGE_SIZE) == 0) {
6099 break;
6100 }
6101 start_offset += PAGE_SIZE;
6102 }
6103
6104 // Find end location.
6105 ssize_t end_offset = start_offset;
6106 while (end_offset < size) {
6107 if (memcmp(mem_base + end_offset, file_base + end_offset, PAGE_SIZE) != 0) {
6108 break;
6109 }
6110 end_offset += PAGE_SIZE;
6111 }
6112
6113 // Map pages.
6114 ssize_t map_length = end_offset - start_offset;
6115 ssize_t map_offset = *file_offset + start_offset;
6116 if (map_length > 0) {
6117 void *map = mmap(
6118 mem_base + start_offset, map_length, PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, map_offset);
6119 if (map == MAP_FAILED) {
6120 LD_LOGE("Error mapping relro %{public}s: failed to map GNU_RELRO", dso->name);
6121 munmap(ext_temp_map, ext_fd_file_size);
6122 return -1;
6123 }
6124 }
6125
6126 start_offset = end_offset;
6127 }
6128 *file_offset += size;
6129 munmap(ext_temp_map, ext_fd_file_size);
6130 return 0;
6131 }
6132
handle_relro_sharing(struct dso * p,const dl_extinfo * extinfo,ssize_t * relro_fd_offset)6133 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset)
6134 {
6135 if (extinfo == NULL) {
6136 return;
6137 }
6138 if (extinfo->flag & DL_EXT_WRITE_RELRO) {
6139 LD_LOGD("Serializing GNU_RELRO %{public}s", p->name);
6140 if (serialize_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6141 LD_LOGE("Error serializing GNU_RELRO %{public}s", p->name);
6142 error("Error serializing GNU_RELRO");
6143 if (runtime) longjmp(*rtld_fail, 1);
6144 }
6145 } else if (extinfo->flag & DL_EXT_USE_RELRO) {
6146 LD_LOGD("Mapping GNU_RELRO %{public}s", p->name);
6147 if (map_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6148 LD_LOGE("Error mapping GNU_RELRO %{public}s", p->name);
6149 error("Error mapping GNU_RELRO");
6150 if (runtime) longjmp(*rtld_fail, 1);
6151 }
6152 }
6153 }
6154
set_bss_vma_name(char * path_name,void * addr,size_t zeromap_size)6155 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size)
6156 {
6157 char so_bss_name[ANON_NAME_MAX_LEN];
6158 if (path_name == NULL) {
6159 snprintf(so_bss_name, ANON_NAME_MAX_LEN, ".bss");
6160 } else {
6161 char *t = strrchr(path_name, '/');
6162 if (t) {
6163 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", ++t);
6164 } else {
6165 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", path_name);
6166 }
6167 }
6168
6169 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, zeromap_size, so_bss_name);
6170 }
6171
find_and_set_bss_name(struct dso * p)6172 static void find_and_set_bss_name(struct dso *p)
6173 {
6174 size_t cnt;
6175 Phdr *ph = p->phdr;
6176 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
6177 if (ph->p_type != PT_LOAD) continue;
6178 size_t seg_start = p->base + ph->p_vaddr;
6179 size_t seg_file_end = seg_start + ph->p_filesz + PAGE_SIZE - 1 & -PAGE_SIZE;
6180 size_t seg_max_addr = seg_start + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
6181 size_t zeromap_size = seg_max_addr - seg_file_end;
6182 if (zeromap_size > 0 && (ph->p_flags & PF_W)) {
6183 set_bss_vma_name(p->name, (void *)seg_file_end, zeromap_size);
6184 }
6185 }
6186 }
6187
sync_with_debugger(void)6188 static void sync_with_debugger(void)
6189 {
6190 debug.ver = 1;
6191 debug.bp = dl_debug_state;
6192 debug.head = NULL;
6193 debug.base = ldso.base;
6194
6195 add_dso_info_to_debug_map(head);
6196
6197 debug.state = RT_CONSISTENT;
6198 _dl_debug_state();
6199 }
6200
notify_addition_to_debugger(struct dso * p)6201 static void notify_addition_to_debugger(struct dso *p)
6202 {
6203 debug.state = RT_ADD;
6204 _dl_debug_state();
6205
6206 add_dso_info_to_debug_map(p);
6207
6208 debug.state = RT_CONSISTENT;
6209 _dl_debug_state();
6210 }
6211
notify_remove_to_debugger(struct dso * p)6212 static void notify_remove_to_debugger(struct dso *p)
6213 {
6214 debug.state = RT_DELETE;
6215 _dl_debug_state();
6216
6217 remove_dso_info_from_debug_map(p);
6218
6219 debug.state = RT_CONSISTENT;
6220 _dl_debug_state();
6221 }
6222
add_dso_info_to_debug_map(struct dso * p)6223 static void add_dso_info_to_debug_map(struct dso *p)
6224 {
6225 for (struct dso *so = p; so != NULL; so = so->next) {
6226 struct dso_debug_info *debug_info = malloc(sizeof(struct dso_debug_info));
6227 if (debug_info == NULL) {
6228 LD_LOGE("malloc error! dso name: %{public}s.", so->name);
6229 continue;
6230 }
6231 #if DL_FDPIC
6232 debug_info->loadmap = so->loadmap;
6233 #else
6234 debug_info->base = so->base;
6235 #endif
6236 debug_info->name = so->name;
6237 debug_info->dynv = so->dynv;
6238 if (debug.head == NULL) {
6239 debug_info->prev = NULL;
6240 debug_info->next = NULL;
6241 debug.head = debug_tail = debug_info;
6242 } else {
6243 debug_info->prev = debug_tail;
6244 debug_info->next = NULL;
6245 debug_tail->next = debug_info;
6246 debug_tail = debug_info;
6247 }
6248 so->debug_info = debug_info;
6249 }
6250 }
6251
remove_dso_info_from_debug_map(struct dso * p)6252 static void remove_dso_info_from_debug_map(struct dso *p)
6253 {
6254 struct dso_debug_info *debug_info = p->debug_info;
6255 if (debug_info == debug_tail) {
6256 debug_tail = debug_tail->prev;
6257 debug_tail->next = NULL;
6258 } else {
6259 debug_info->next->prev = debug_info->prev;
6260 debug_info->prev->next = debug_info->next;
6261 }
6262 free(debug_info);
6263 }
6264
6265 typedef struct dso_handle_node {
6266 void *dso_handle; // Used to located dso.
6267 uint32_t count;
6268 struct dso* dso;
6269 struct dso_handle_node* next;
6270 } dso_handle_node;
6271
6272 static dso_handle_node* dso_handle_list = NULL;
6273
find_dso_handle_node(void * dso_handle)6274 dso_handle_node* find_dso_handle_node(void *dso_handle)
6275 {
6276 dso_handle_node *cur = dso_handle_list;
6277 while(cur) {
6278 if (cur->dso_handle == dso_handle) {
6279 return cur;
6280 }
6281 cur =cur->next;
6282 }
6283 return NULL;
6284 }
6285
add_dso_handle_node(void * dso_handle)6286 void add_dso_handle_node(void *dso_handle)
6287 {
6288 pthread_rwlock_wrlock(&lock);
6289 if (!dso_handle) {
6290 LD_LOGW("[cxa_thread] add_dso_handle_node return because dso_handle is null.\n");
6291 pthread_rwlock_unlock(&lock);
6292 return;
6293 }
6294
6295 dso_handle_node *node = find_dso_handle_node(dso_handle);
6296 if (node) {
6297 node->count++;
6298 LD_LOGD("[cxa_thread] increase dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count);
6299 pthread_rwlock_unlock(&lock);
6300 return;
6301 }
6302 dso_handle_node *cur = __libc_malloc(sizeof(*cur));
6303 if (!cur) {
6304 pthread_rwlock_unlock(&lock);
6305 LD_LOGE("[cxa_thread] alloc dso_handle_node failed.");
6306 error("[cxa_thread]: alloc dso_handle_node failed.");
6307 return;
6308 }
6309
6310 struct dso* p = addr2dso(dso_handle);
6311 if (!p) {
6312 pthread_rwlock_unlock(&lock);
6313 LD_LOGE("[cxa_thread] can't find dso by dso_handle(%{public}p)", dso_handle);
6314 error("[cxa_thread] can't find dso by dso_handle(%p)", dso_handle);
6315 return;
6316 }
6317
6318 // We don't need to care about the so which by_dlopen is false because it will never be unload.
6319 if (p->by_dlopen) {
6320 p->nr_dlopen++;
6321 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6322 p->name, p->name, p->nr_dlopen);
6323 if (p->bfs_built) {
6324 for (size_t i = 0; p->deps[i]; i++) {
6325 p->deps[i]->nr_dlopen++;
6326 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6327 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
6328 }
6329 } else {
6330 // Get all the direct and indirect deps.
6331 extend_bfs_deps(p, 1);
6332 for (size_t i = 0; p->deps_all[i]; i++) {
6333 p->deps_all[i]->nr_dlopen++;
6334 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6335 p->deps_all[i]->name, p->name, p->deps_all[i]->nr_dlopen);
6336 }
6337 }
6338 }
6339 cur->dso = p;
6340 cur->dso_handle = dso_handle;
6341 cur->count = 1;
6342 cur->next = dso_handle_list;
6343 dso_handle_list = cur;
6344 pthread_rwlock_unlock(&lock);
6345
6346 return;
6347 }
6348
remove_dso_handle_node(void * dso_handle)6349 void remove_dso_handle_node(void *dso_handle)
6350 {
6351 pthread_rwlock_wrlock(&lock);
6352 if (dso_handle == NULL) {
6353 LD_LOGW("[cxa_thread] remove_dso_handle_node return because dso_handle is null.\n");
6354 pthread_rwlock_unlock(&lock);
6355 return;
6356 }
6357
6358 dso_handle_node *node = find_dso_handle_node(dso_handle);
6359 if (node && node->count) {
6360 LD_LOGD("[cxa_thread] decrease dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count - 1);
6361 if ((--node->count) == 0) {
6362 LD_LOGD("[cxa_thread] call do_dlclose(%{public}s) when count is 0", node->dso->name);
6363 do_dlclose(node->dso, 1);
6364 // Invalidate current node.
6365 node->dso_handle = NULL;
6366 }
6367 pthread_rwlock_unlock(&lock);
6368 return;
6369 } else {
6370 LD_LOGE("[cxa_thread] can't find matched dso handle node by %{public}p, count:%{public}d", dso_handle, node->count);
6371 error("[cxa_thread] can't find matched dso handle node by %p, count:%d", dso_handle, node->count);
6372 }
6373 pthread_rwlock_unlock(&lock);
6374
6375 return;
6376 }