1 #define _GNU_SOURCE
2 #define SYSCALL_NO_TLS 1
3
4 #include "dynlink.h"
5
6 #include <stdbool.h>
7 #include <stdlib.h>
8 #include <stdarg.h>
9 #include <stddef.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <elf.h>
14 #include <sys/mman.h>
15 #include <limits.h>
16 #include <fcntl.h>
17 #include <sys/stat.h>
18 #include <errno.h>
19 #include <link.h>
20 #include <setjmp.h>
21 #include <pthread.h>
22 #include <ctype.h>
23 #include <dlfcn.h>
24 #include <semaphore.h>
25 #include <sys/membarrier.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <sys/prctl.h>
29 #include <sys/queue.h>
30
31 #include "cfi.h"
32 #include "dlfcn_ext.h"
33 #include "dynlink_rand.h"
34 #include "ld_log.h"
35 #include "libc.h"
36 #include "musl_fdsan.h"
37 #include "namespace.h"
38 #include "ns_config.h"
39 #include "pthread_impl.h"
40 #include "fork_impl.h"
41 #include "strops.h"
42 #include "trace/trace_marker.h"
43
44 #ifdef IS_ASAN
45 #if defined (__arm__)
46 #define LIB "/lib/"
47 #elif defined (__aarch64__)
48 #define LIB "/lib64/"
49 #else
50 #error "unsupported arch"
51 #endif
52 #endif
53
54 #ifdef OHOS_ENABLE_PARAMETER
55 #include "sys_param.h"
56 #endif
57 #ifdef LOAD_ORDER_RANDOMIZATION
58 #include "zip_archive.h"
59 #endif
60
61 #ifdef USE_ENCAPS
62 #include <sys/ioctl.h>
63
64 #define OH_ENCAPS_MAGIC 'E'
65 #define OH_ENCAPS_SYNC_BASE 0x19
66 #define SYNC_ENCAPS_CMD _IO(OH_ENCAPS_MAGIC, OH_ENCAPS_SYNC_BASE)
67 static int encpas_cost_time = 0;
68 struct timespec encaps_time_start, encaps_time_end;
69 #endif
70
71 static size_t ldso_page_size;
72 #ifndef PAGE_SIZE
73 #define PAGE_SIZE ldso_page_size
74 #endif
75
76 #define malloc __libc_malloc
77 #define calloc __libc_calloc
78 #define realloc __libc_realloc
79 #define free __libc_free
80
81 static void error_impl(const char *, ...);
82 static void error_noop(const char *, ...);
83 static void (*error)(const char *, ...) = error_noop;
84
85 #define MAXP2(a,b) (-(-(a)&-(b)))
86 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
87 #define GNU_HASH_FILTER(ght, ghm, gho) \
88 const size_t *bloomwords = (const void *)(ght + 4); \
89 size_t f = bloomwords[gho & (ght[2] - 1)]; \
90 if (!(f & ghm)) continue; \
91 f >>= (gh >> ght[3]) % (8 * sizeof f); \
92 if (!(f & 1)) continue;
93
94 #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m)))
95 #define countof(a) ((sizeof (a))/(sizeof (a)[0]))
96 #define DSO_FLAGS_NODELETE 0x1
97
98 #ifdef HANDLE_RANDOMIZATION
99 #define NEXT_DYNAMIC_INDEX 2
100 #define MIN_DEPS_COUNT 2
101 #define NAME_INDEX_ZERO 0
102 #define NAME_INDEX_ONE 1
103 #define NAME_INDEX_TWO 2
104 #define NAME_INDEX_THREE 3
105 #define TLS_CNT_INCREASE 3
106 #define INVALID_FD_INHIBIT_FURTHER_SEARCH (-2)
107 #endif
108
109 #define MAP_XPM 0x40
110 #define PARENTS_BASE_CAPACITY 8
111 #define RELOC_CAN_SEARCH_DSO_BASE_CAPACITY 32
112 #define ANON_NAME_MAX_LEN 70
113
114 #define KPMD_SIZE (1UL << 21)
115 #define HUGEPAGES_SUPPORTED_STR_SIZE (32)
116
117 #ifdef UNIT_TEST_STATIC
118 #define UT_STATIC
119 #else
120 #define UT_STATIC static
121 #endif
122
123 /* Used for dlclose */
124 #define UNLOAD_NR_DLOPEN_CHECK 1
125 #define UNLOAD_COMMON_CHECK 2
126 #define UNLOAD_ALL_CHECK 3
127 struct dso_entry {
128 struct dso *dso;
129 TAILQ_ENTRY(dso_entry) entries;
130 };
131
132 struct debug {
133 int ver;
134 void *head;
135 void (*bp)(void);
136 int state;
137 void *base;
138 };
139
140 struct reserved_address_params {
141 void* start_addr;
142 size_t reserved_size;
143 bool must_use_reserved;
144 bool reserved_address_recursive;
145 #ifdef LOAD_ORDER_RANDOMIZATION
146 struct dso *target;
147 #endif
148 };
149
150 typedef void (*stage3_func)(size_t *, size_t *, size_t *);
151
152 static struct builtin_tls {
153 char c[8];
154 struct pthread pt;
155 void *space[16];
156 } builtin_tls[1];
157 #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
158
159 #define ADDEND_LIMIT 4096
160 static size_t *saved_addends, *apply_addends_to;
161 static bool g_is_asan;
162 static struct dso ldso;
163 static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head;
164 static struct dso_debug_info *debug_tail = NULL;
165 static char *env_path, *sys_path;
166 static unsigned long long gencnt;
167 static unsigned long long subcnt;
168 static int runtime;
169 static int ldd_mode;
170 static int ldso_fail;
171 static int noload;
172 static int shutting_down;
173 static jmp_buf *rtld_fail;
174 static pthread_rwlock_t lock;
175 static pthread_mutex_t dlclose_lock = { { PTHREAD_MUTEX_RECURSIVE } }; // set mutex type to PTHREAD_MUTEX_RECURSIVE
176 static struct debug debug;
177 static struct tls_module *tls_tail;
178 static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
179 static size_t static_tls_cnt;
180 static pthread_mutex_t init_fini_lock;
181 static pthread_mutex_t dl_phdr_lock;
182 static pthread_cond_t ctor_cond;
183 static struct dso *builtin_deps[2];
184 static struct dso *const no_deps[1];
185 static struct dso *builtin_ctor_queue[4];
186 static struct dso **main_ctor_queue;
187 static struct fdpic_loadmap *app_loadmap;
188 static struct fdpic_dummy_loadmap app_dummy_loadmap;
189
190 struct debug *_dl_debug_addr = &debug;
191
192 extern weak hidden char __ehdr_start[];
193
194 extern hidden int __malloc_replaced;
195
196 hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
197
198 extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void);
199
200 #ifdef USE_GWP_ASAN
201 extern bool init_gwp_asan_by_libc(bool force_init);
202 #endif
203
204 weak_alias(__init_array_start, __init_array_end);
205 weak_alias(__fini_array_start, __fini_array_end);
206 #ifdef DFX_SIGNAL_LIBC
__InstallSignalHandler()207 UT_STATIC void __InstallSignalHandler()
208 {
209 }
210 weak_alias(__InstallSignalHandler, DFX_InstallSignalHandler);
211 #endif
212
213 #ifdef HANDLE_RANDOMIZATION
214 static int do_dlclose(struct dso *p, bool check_deps_all);
215 #endif
216
217 #ifdef LOAD_ORDER_RANDOMIZATION
218 static bool task_check_xpm(struct loadtask *task);
219 static bool map_library_header(struct loadtask *task);
220 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params);
221 static bool resolve_fd_to_realpath(struct loadtask *task);
222 static bool load_library_header(struct loadtask *task);
223 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params);
224 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks);
225 static void unmap_preloaded_sections(struct loadtasks *tasks);
226 static void preload_deps(struct dso *p, struct loadtasks *tasks);
227 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params);
228 UT_STATIC void assign_tls(struct dso *p);
229 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks);
230 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info);
231 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task, struct zip_info *z_info);
232 #endif
233
234 extern int __close(int fd);
235
236 /* Sharing relro */
237 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset);
238
239 /* asan path open */
240 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size);
241
242 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size);
243
244 static void find_and_set_bss_name(struct dso *p);
245
246 /* lldb debug function */
247 static void sync_with_debugger();
248 static void notify_addition_to_debugger(struct dso *p);
249 static void notify_remove_to_debugger(struct dso *p);
250 static void add_dso_info_to_debug_map(struct dso *p);
251 static void remove_dso_info_from_debug_map(struct dso *p);
252
253 /* add namespace function */
254 static void get_sys_path(ns_configor *conf);
255 static void dlclose_ns(struct dso *p);
get_app_path(char * path,size_t size)256 static bool get_app_path(char *path, size_t size)
257 {
258 int l = 0;
259 l = readlink("/proc/self/exe", path, size);
260 if (l < 0 || l >= size) {
261 LD_LOGD("get_app_path readlink failed!");
262 return false;
263 }
264 path[l] = 0;
265 LD_LOGD("get_app_path path:%{public}s.", path);
266 return true;
267 }
268
init_default_namespace(struct dso * app)269 static void init_default_namespace(struct dso *app)
270 {
271 ns_t *default_ns = get_default_ns();
272 memset(default_ns, 0, sizeof *default_ns);
273 ns_set_name(default_ns, NS_DEFAULT_NAME);
274 if (env_path) ns_set_env_paths(default_ns, env_path);
275 ns_set_lib_paths(default_ns, sys_path);
276 ns_set_separated(default_ns, false);
277 app->namespace = default_ns;
278 ns_add_dso(default_ns, app);
279 LD_LOGD("init_default_namespace default_namespace:"
280 "nsname: default ,"
281 "lib_paths:%{public}s ,"
282 "env_path:%{public}s ,"
283 "separated: false.",
284 sys_path, env_path);
285 return;
286 }
287
set_ns_attrs(ns_t * ns,ns_configor * conf)288 UT_STATIC void set_ns_attrs(ns_t *ns, ns_configor *conf)
289 {
290 if (!ns || !conf) {
291 return;
292 }
293
294 char *lib_paths, *asan_lib_paths, *permitted_paths, *asan_permitted_paths, *allowed_libs;
295
296 ns_set_separated(ns, conf->get_separated(ns->ns_name));
297
298 lib_paths = conf->get_lib_paths(ns->ns_name);
299 if (lib_paths) ns_set_lib_paths(ns, lib_paths);
300
301 asan_lib_paths = conf->get_asan_lib_paths(ns->ns_name);
302 if (asan_lib_paths) ns_set_asan_lib_paths(ns, asan_lib_paths);
303
304 permitted_paths = conf->get_permitted_paths(ns->ns_name);
305 if (permitted_paths) ns_set_permitted_paths(ns, permitted_paths);
306
307 asan_permitted_paths = conf->get_asan_permitted_paths(ns->ns_name);
308 if (asan_permitted_paths) ns_set_asan_permitted_paths(ns, asan_permitted_paths);
309
310 allowed_libs = conf->get_allowed_libs(ns->ns_name);
311 if (allowed_libs) ns_set_allowed_libs(ns, allowed_libs);
312
313 LD_LOGD("set_ns_attrs :"
314 "ns_name: %{public}s ,"
315 "separated:%{public}d ,"
316 "lib_paths:%{public}s ,"
317 "asan_lib_paths:%{public}s ,"
318 "permitted_paths:%{public}s ,"
319 "asan_permitted_paths:%{public}s ,"
320 "allowed_libs: %{public}s .",
321 ns->ns_name, ns->separated, ns->lib_paths, ns->asan_lib_paths, permitted_paths,
322 asan_permitted_paths, allowed_libs);
323 }
324
set_ns_inherits(ns_t * ns,ns_configor * conf)325 UT_STATIC void set_ns_inherits(ns_t *ns, ns_configor *conf)
326 {
327 if (!ns || !conf) {
328 return;
329 }
330
331 strlist *inherits = conf->get_inherits(ns->ns_name);
332 if (inherits) {
333 for (size_t i = 0; i < inherits->num; i++) {
334 ns_t *inherited_ns = find_ns_by_name(inherits->strs[i]);
335 if (inherited_ns) {
336 char *shared_libs = conf->get_inherit_shared_libs(ns->ns_name, inherited_ns->ns_name);
337 ns_add_inherit(ns, inherited_ns, shared_libs);
338 LD_LOGD("set_ns_inherits :"
339 "ns_name: %{public}s ,"
340 "separated:%{public}d ,"
341 "lib_paths:%{public}s ,"
342 "asan_lib_paths:%{public}s ,",
343 inherited_ns->ns_name, inherited_ns->separated, inherited_ns->lib_paths,
344 inherited_ns->asan_lib_paths);
345 }
346 }
347 strlist_free(inherits);
348 } else {
349 LD_LOGD("set_ns_inherits inherits is NULL!");
350 }
351 }
352
init_namespace(struct dso * app)353 static void init_namespace(struct dso *app)
354 {
355 char app_path[PATH_MAX + 1];
356 if (!get_app_path(app_path, sizeof app_path)) {
357 strcpy(app_path, app->name);
358 }
359 char *t = strrchr(app_path, '/');
360 if (t) {
361 *t = 0;
362 } else {
363 app_path[0] = '.';
364 app_path[1] = 0;
365 }
366
367 nslist *nsl = nslist_init();
368 ns_configor *conf = configor_init();
369 char file_path[sizeof "/etc/ld-musl-namespace-" + sizeof (LDSO_ARCH) + sizeof ".ini" + 1] = {0};
370 (void)snprintf(file_path, sizeof file_path, "/etc/ld-musl-namespace-%s.ini", LDSO_ARCH);
371 LD_LOGI("init_namespace file_path:%{public}s", file_path);
372 trace_marker_reset();
373 trace_marker_begin(HITRACE_TAG_MUSL, "parse linker config", file_path);
374 int ret = conf->parse(file_path, app_path);
375 if (ret < 0) {
376 LD_LOGE("init_namespace ini file parse failed!");
377 /* Init_default_namespace is required even if the ini file parsing fails */
378 if (!sys_path) get_sys_path(conf);
379 init_default_namespace(app);
380 configor_free();
381 trace_marker_end(HITRACE_TAG_MUSL);
382 return;
383 }
384
385 /* sys_path needs to be parsed through ini file */
386 if (!sys_path) get_sys_path(conf);
387 init_default_namespace(app);
388
389 /* Init default namespace */
390 ns_t *d_ns = get_default_ns();
391 set_ns_attrs(d_ns, conf);
392
393 /* Init other namespace */
394 if (!nsl) {
395 LD_LOGE("init nslist fail!");
396 configor_free();
397 trace_marker_end(HITRACE_TAG_MUSL);
398 return;
399 }
400 strlist *s_ns = conf->get_namespaces();
401 if (s_ns) {
402 for (size_t i = 0; i < s_ns->num; i++) {
403 ns_t *ns = ns_alloc();
404 ns_set_name(ns, s_ns->strs[i]);
405 set_ns_attrs(ns, conf);
406 ns_add_dso(ns, app);
407 nslist_add_ns(ns);
408 }
409 strlist_free(s_ns);
410 }
411 /* Set inherited namespace */
412 set_ns_inherits(d_ns, conf);
413 for (size_t i = 0; i < nsl->num; i++) {
414 set_ns_inherits(nsl->nss[i], conf);
415 }
416 configor_free();
417 trace_marker_end(HITRACE_TAG_MUSL);
418 return;
419 }
420
421 /* Compute load address for a virtual address in a given dso. */
422 #if DL_FDPIC
laddr(const struct dso * p,size_t v)423 void *laddr(const struct dso *p, size_t v)
424 {
425 size_t j=0;
426 if (!p->loadmap) return p->base + v;
427 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
428 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
429 }
laddr_pg(const struct dso * p,size_t v)430 static void *laddr_pg(const struct dso *p, size_t v)
431 {
432 size_t j=0;
433 size_t pgsz = PAGE_SIZE;
434 if (!p->loadmap) return p->base + v;
435 for (j=0; ; j++) {
436 size_t a = p->loadmap->segs[j].p_vaddr;
437 size_t b = a + p->loadmap->segs[j].p_memsz;
438 a &= -pgsz;
439 b += pgsz-1;
440 b &= -pgsz;
441 if (v-a<b-a) break;
442 }
443 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
444 }
fdbarrier(void * p)445 static void (*fdbarrier(void *p))()
446 {
447 void (*fd)();
448 __asm__("" : "=r"(fd) : "0"(p));
449 return fd;
450 }
451 #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
452 laddr(p, v), (p)->got }))
453 #else
454 #define laddr(p, v) (void *)((p)->base + (v))
455 #define laddr_pg(p, v) laddr(p, v)
456 #define fpaddr(p, v) ((void (*)())laddr(p, v))
457 #endif
458
decode_vec(size_t * v,size_t * a,size_t cnt)459 static void decode_vec(size_t *v, size_t *a, size_t cnt)
460 {
461 size_t i;
462 for (i=0; i<cnt; i++) a[i] = 0;
463 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
464 if (v[0] < 8 * sizeof(long)) {
465 a[0] |= 1UL<<v[0];
466 }
467 a[v[0]] = v[1];
468 }
469 }
470
search_vec(size_t * v,size_t * r,size_t key)471 static int search_vec(size_t *v, size_t *r, size_t key)
472 {
473 for (; v[0]!=key; v+=2)
474 if (!v[0]) return 0;
475 *r = v[1];
476 return 1;
477 }
478
check_vna_hash(Verdef * def,int16_t vsym,uint32_t vna_hash)479 UT_STATIC int check_vna_hash(Verdef *def, int16_t vsym, uint32_t vna_hash)
480 {
481 int matched = 0;
482
483 vsym &= 0x7fff;
484 Verdef *verdef = def;
485 for (;;) {
486 if ((verdef->vd_ndx & 0x7fff) == vsym) {
487 if (vna_hash == verdef->vd_hash) {
488 matched = 1;
489 }
490 break;
491 }
492 if (matched) {
493 break;
494 }
495 if (verdef->vd_next == 0) {
496 break;
497 }
498 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
499 }
500 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
501 if (!matched) {
502 LD_LOGD("check_vna_hash no matched found. vsym=%{public}d vna_hash=%{public}x", vsym, vna_hash);
503 }
504 #endif
505 return matched;
506 }
507
check_verinfo(Verdef * def,int16_t * versym,uint32_t index,struct verinfo * verinfo,char * strings)508 UT_STATIC int check_verinfo(Verdef *def, int16_t *versym, uint32_t index, struct verinfo *verinfo, char *strings)
509 {
510 /* if the versym and verinfo is null , then not need version. */
511 if (!versym || !def) {
512 if (strlen(verinfo->v) == 0) {
513 return 1;
514 } else {
515 LD_LOGD("check_verinfo versym or def is null and verinfo->v exist, s:%{public}s v:%{public}s.",
516 verinfo->s, verinfo->v);
517 return 0;
518 }
519 }
520
521 int16_t vsym = versym[index];
522
523 /* find the verneed symbol. */
524 if (verinfo->use_vna_hash) {
525 if (vsym != VER_NDX_LOCAL && versym != VER_NDX_GLOBAL) {
526 return check_vna_hash(def, vsym, verinfo->vna_hash);
527 }
528 }
529
530 /* if the version length is zero and vsym not less than zero, then library hava default version symbol. */
531 if (strlen(verinfo->v) == 0) {
532 if (vsym >= 0) {
533 return 1;
534 } else {
535 LD_LOGD("check_verinfo not default version. vsym:%{public}d s:%{public}s", vsym, verinfo->s);
536 return 0;
537 }
538 }
539
540 /* find the version of symbol. */
541 vsym &= 0x7fff;
542 for (;;) {
543 if (!(def->vd_flags & VER_FLG_BASE) && (def->vd_ndx & 0x7fff) == vsym) {
544 break;
545 }
546 if (def->vd_next == 0) {
547 return 0;
548 }
549 def = (Verdef *)((char *)def + def->vd_next);
550 }
551
552 Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux);
553
554 int ret = !strcmp(verinfo->v, strings + aux->vda_name);
555 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
556 if (!ret) {
557 LD_LOGD("check_verinfo version not match. s=%{public}s v=%{public}s vsym=%{public}d vda_name=%{public}s",
558 verinfo->s, verinfo->v, vsym, strings + aux->vda_name);
559 }
560 #endif
561 return ret;
562 }
563
sysv_hash(const char * s0)564 static struct sym_info_pair sysv_hash(const char *s0)
565 {
566 struct sym_info_pair s_info_p;
567 const unsigned char *s = (void *)s0;
568 uint_fast32_t h = 0;
569 while (*s) {
570 h = 16*h + *s++;
571 h ^= h>>24 & 0xf0;
572 }
573 s_info_p.sym_h = h & 0xfffffff;
574 s_info_p.sym_l = (char *)s - s0;
575 return s_info_p;
576 }
577
gnu_hash(const char * s0)578 struct sym_info_pair gnu_hash(const char *s0)
579 {
580 struct sym_info_pair s_info_p;
581 const unsigned char *s = (void *)s0;
582 uint_fast32_t h = 5381;
583 for (; *s; s++)
584 h += h*32 + *s;
585 s_info_p.sym_h = h;
586 s_info_p.sym_l = (char *)s - s0;
587 return s_info_p;
588 }
589
sysv_lookup(struct verinfo * verinfo,struct sym_info_pair s_info_p,struct dso * dso)590 static Sym *sysv_lookup(struct verinfo *verinfo, struct sym_info_pair s_info_p, struct dso *dso)
591 {
592 size_t i;
593 uint32_t h = s_info_p.sym_h;
594 Sym *syms = dso->syms;
595 Elf_Symndx *hashtab = dso->hashtab;
596 char *strings = dso->strings;
597 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
598 if ((!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
599 && (!memcmp(verinfo->s, strings+syms[i].st_name, s_info_p.sym_l))) {
600 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
601 continue;
602 }
603
604 return syms+i;
605 }
606
607 }
608 LD_LOGD("sysv_lookup not find the symbol, "
609 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
610 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
611 return 0;
612 }
613
gnu_lookup(struct sym_info_pair s_info_p,uint32_t * hashtab,struct dso * dso,struct verinfo * verinfo)614 static Sym *gnu_lookup(struct sym_info_pair s_info_p, uint32_t *hashtab, struct dso *dso, struct verinfo *verinfo)
615 {
616 uint32_t h1 = s_info_p.sym_h;
617 uint32_t nbuckets = hashtab[0];
618 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
619 uint32_t i = buckets[h1 % nbuckets];
620
621 if (!i) {
622 LD_LOGD("gnu_lookup symbol not found (bloom filter), so:%{public}s s:%{public}s", dso->name, verinfo->s);
623 return 0;
624 }
625
626 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
627
628 for (h1 |= 1; ; i++) {
629 uint32_t h2 = *hashval++;
630 if ((h1 == (h2|1)) && (!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
631 && !memcmp(verinfo->s, dso->strings + dso->syms[i].st_name, s_info_p.sym_l)) {
632 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
633 continue;
634 }
635
636 return dso->syms+i;
637 }
638
639 if (h2 & 1) break;
640 }
641
642 LD_LOGD("gnu_lookup symbol not found, "
643 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
644 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
645 return 0;
646 }
647
check_sym_accessible(struct dso * dso,ns_t * ns)648 static bool check_sym_accessible(struct dso *dso, ns_t *ns)
649 {
650 if (!dso || !dso->namespace || !ns) {
651 LD_LOGD("check_sym_accessible invalid parameter!");
652 return false;
653 }
654 if (dso->namespace == ns) {
655 return true;
656 }
657 for (int i = 0; i < dso->parents_count; i++) {
658 if (dso->parents[i]->namespace == ns) {
659 return true;
660 }
661 }
662 LD_LOGD(
663 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
664 return false;
665 }
666
is_dso_accessible(struct dso * dso,ns_t * ns)667 static inline bool is_dso_accessible(struct dso *dso, ns_t *ns)
668 {
669 if (dso->namespace == ns) {
670 return true;
671 }
672 for (int i = 0; i < dso->parents_count; i++) {
673 if (dso->parents[i]->namespace == ns) {
674 return true;
675 }
676 }
677 LD_LOGD(
678 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
679 return false;
680 }
681
find_dso_parent(struct dso * p,struct dso * target)682 static int find_dso_parent(struct dso *p, struct dso *target)
683 {
684 int index = -1;
685 for (int i = 0; i < p->parents_count; i++) {
686 if (p->parents[i] == target) {
687 index = i;
688 break;
689 }
690 }
691 return index;
692 }
693
add_dso_parent(struct dso * p,struct dso * parent)694 static void add_dso_parent(struct dso *p, struct dso *parent)
695 {
696 int index = find_dso_parent(p, parent);
697 if (index != -1) {
698 return;
699 }
700 if (p->parents_count + 1 > p->parents_capacity) {
701 if (p->parents_capacity == 0) {
702 p->parents = (struct dso **)malloc(sizeof(struct dso *) * PARENTS_BASE_CAPACITY);
703 if (!p->parents) {
704 return;
705 }
706 p->parents_capacity = PARENTS_BASE_CAPACITY;
707 } else {
708 struct dso ** realloced = (struct dso **)realloc(
709 p->parents, sizeof(struct dso *) * (p->parents_capacity + PARENTS_BASE_CAPACITY));
710 if (!realloced) {
711 return;
712 }
713 p->parents = realloced;
714 p->parents_capacity += PARENTS_BASE_CAPACITY;
715 }
716 }
717 p->parents[p->parents_count] = parent;
718 p->parents_count++;
719 }
720
remove_dso_parent(struct dso * p,struct dso * parent)721 static void remove_dso_parent(struct dso *p, struct dso *parent)
722 {
723 int index = find_dso_parent(p, parent);
724 if (index == -1) {
725 return;
726 }
727 int i;
728 for (i = 0; i < index; i++) {
729 p->parents[i] = p->parents[i];
730 }
731 for (i = index; i < p->parents_count - 1; i++) {
732 p->parents[i] = p->parents[i + 1];
733 }
734 p->parents_count--;
735 }
736
add_reloc_can_search_dso(struct dso * p,struct dso * can_search_so)737 static void add_reloc_can_search_dso(struct dso *p, struct dso *can_search_so)
738 {
739 if (p->reloc_can_search_dso_count + 1 > p->reloc_can_search_dso_capacity) {
740 if (p->reloc_can_search_dso_capacity == 0) {
741 p->reloc_can_search_dso_list =
742 (struct dso **)malloc(sizeof(struct dso *) * RELOC_CAN_SEARCH_DSO_BASE_CAPACITY);
743 if (!p->reloc_can_search_dso_list) {
744 return;
745 }
746 p->reloc_can_search_dso_capacity = RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
747 } else {
748 struct dso ** realloced = (struct dso **)realloc(
749 p->reloc_can_search_dso_list,
750 sizeof(struct dso *) * (p->reloc_can_search_dso_capacity + RELOC_CAN_SEARCH_DSO_BASE_CAPACITY));
751 if (!realloced) {
752 return;
753 }
754 p->reloc_can_search_dso_list = realloced;
755 p->reloc_can_search_dso_capacity += RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
756 }
757 }
758 p->reloc_can_search_dso_list[p->reloc_can_search_dso_count] = can_search_so;
759 p->reloc_can_search_dso_count++;
760 }
761
free_reloc_can_search_dso(struct dso * p)762 static void free_reloc_can_search_dso(struct dso *p)
763 {
764 if (p->reloc_can_search_dso_list) {
765 free(p->reloc_can_search_dso_list);
766 p->reloc_can_search_dso_list = NULL;
767 p->reloc_can_search_dso_count = 0;
768 p->reloc_can_search_dso_capacity = 0;
769 }
770 }
771
772 /* The list of so that can be accessed during relocation include:
773 * - The is_global flag of the so is true which means accessible by default.
774 * Global so includes exe, ld preload so and ldso.
775 * - We only check whether ns is accessible for the so if is_reloc_head_so_dep is true.
776 *
777 * How to set is_reloc_head_so_dep:
778 * When dlopen A, we set is_reloc_head_so_dep to true for
779 * all direct and indirect dependent sos of A, including A itself. */
add_can_search_so_list_in_dso(struct dso * dso_relocating,struct dso * start_check_dso)780 static void add_can_search_so_list_in_dso(struct dso *dso_relocating, struct dso *start_check_dso) {
781 struct dso *p = start_check_dso;
782 for (; p; p = p->syms_next) {
783 if (p->is_global) {
784 add_reloc_can_search_dso(dso_relocating, p);
785 continue;
786 }
787
788 if (p->is_reloc_head_so_dep) {
789 if (dso_relocating->namespace && check_sym_accessible(p, dso_relocating->namespace)) {
790 add_reloc_can_search_dso(dso_relocating, p);
791 }
792 }
793 }
794
795 return;
796 }
797
798 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
799 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
800
801 #ifndef ARCH_SYM_REJECT_UND
802 #define ARCH_SYM_REJECT_UND(s) 0
803 #endif
804
805 #if defined(__GNUC__)
806 __attribute__((always_inline))
807 #endif
808
find_sym_impl(struct dso * dso,struct verinfo * verinfo,struct sym_info_pair s_info_g,int need_def,ns_t * ns)809 struct symdef find_sym_impl(
810 struct dso *dso, struct verinfo *verinfo, struct sym_info_pair s_info_g, int need_def, ns_t *ns)
811 {
812 Sym *sym;
813 struct sym_info_pair s_info_s = {0, 0};
814 uint32_t *ght;
815 uint32_t gh = s_info_g.sym_h;
816 uint32_t gho = gh / (8 * sizeof(size_t));
817 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
818 struct symdef def = {0};
819 if (ns && !check_sym_accessible(dso, ns))
820 return def;
821
822 if ((ght = dso->ghashtab)) {
823 const size_t *bloomwords = (const void *)(ght + 4);
824 size_t f = bloomwords[gho & (ght[2] - 1)];
825 if (!(f & ghm))
826 return def;
827
828 f >>= (gh >> ght[3]) % (8 * sizeof f);
829 if (!(f & 1))
830 return def;
831
832 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
833 } else {
834 if (!s_info_s.sym_h)
835 s_info_s = sysv_hash(verinfo->s);
836
837 sym = sysv_lookup(verinfo, s_info_s, dso);
838 }
839
840 if (!sym)
841 return def;
842
843 if (!sym->st_shndx)
844 if (need_def || (sym->st_info & 0xf) == STT_TLS || ARCH_SYM_REJECT_UND(sym))
845 return def;
846
847 if (!sym->st_value)
848 if ((sym->st_info & 0xf) != STT_TLS)
849 return def;
850
851 if (!(1 << (sym->st_info & 0xf) & OK_TYPES))
852 return def;
853
854 if (!(1 << (sym->st_info >> 4) & OK_BINDS))
855 return def;
856
857 def.sym = sym;
858 def.dso = dso;
859 return def;
860 }
861
find_sym2(struct dso * dso,struct verinfo * verinfo,int need_def,int use_deps,ns_t * ns)862 static inline struct symdef find_sym2(struct dso *dso, struct verinfo *verinfo, int need_def, int use_deps, ns_t *ns)
863 {
864 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
865 struct sym_info_pair s_info_s = {0, 0};
866 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
867 size_t ghm = 1ul << gh % (8*sizeof(size_t));
868 struct symdef def = {0};
869 struct dso **deps = use_deps ? dso->deps : 0;
870 for (; dso; dso=use_deps ? *deps++ : dso->syms_next) {
871 Sym *sym;
872 // for ldso, app, preload so which is global, should be accessible in all exist namespaces
873 if (!dso->is_preload && ns && !check_sym_accessible(dso, ns)) {
874 continue;
875 }
876 if ((ght = dso->ghashtab)) {
877 GNU_HASH_FILTER(ght, ghm, gho)
878 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
879 } else {
880 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
881 sym = sysv_lookup(verinfo, s_info_s, dso);
882 }
883
884 if (!sym) continue;
885 if (!sym->st_shndx)
886 if (need_def || (sym->st_info&0xf) == STT_TLS
887 || ARCH_SYM_REJECT_UND(sym))
888 continue;
889 if (!sym->st_value)
890 if ((sym->st_info&0xf) != STT_TLS)
891 continue;
892 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
893 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
894 def.sym = sym;
895 def.dso = dso;
896 break;
897 }
898 return def;
899 }
900
find_sym_by_deps(struct dso * dso,struct verinfo * verinfo,int need_def,ns_t * ns)901 static inline struct symdef find_sym_by_deps(struct dso *dso, struct verinfo *verinfo, int need_def, ns_t *ns)
902 {
903 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
904 struct sym_info_pair s_info_s = {0, 0};
905 uint32_t h = 0, gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
906 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
907 struct symdef def = {0};
908 struct dso **deps = dso->deps;
909 for (; dso; dso = *deps++) {
910 Sym *sym;
911 if (!is_dso_accessible(dso, ns)) {
912 continue;
913 }
914 if ((ght = dso->ghashtab)) {
915 GNU_HASH_FILTER(ght, ghm, gho)
916 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
917 } else {
918 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
919 sym = sysv_lookup(verinfo, s_info_s, dso);
920 }
921
922 if (!sym) continue;
923 if (!sym->st_shndx)
924 if (need_def || (sym->st_info&0xf) == STT_TLS
925 || ARCH_SYM_REJECT_UND(sym))
926 continue;
927 if (!sym->st_value)
928 if ((sym->st_info&0xf) != STT_TLS)
929 continue;
930 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
931 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
932 def.sym = sym;
933 def.dso = dso;
934 break;
935 }
936 return def;
937 }
938
find_sym_by_saved_so_list(int sym_type,struct dso * dso,struct verinfo * verinfo,int need_def,struct dso * dso_relocating)939 static inline struct symdef find_sym_by_saved_so_list(
940 int sym_type, struct dso *dso, struct verinfo *verinfo, int need_def, struct dso *dso_relocating)
941 {
942 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
943 struct sym_info_pair s_info_s = {0, 0};
944 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
945 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
946 struct symdef def = {0};
947 // skip head dso.
948 int start_search_index = sym_type==REL_COPY ? 1 : 0;
949 struct dso *dso_searching = 0;
950 for (int i = start_search_index; i < dso_relocating->reloc_can_search_dso_count; i++) {
951 dso_searching = dso_relocating->reloc_can_search_dso_list[i];
952 Sym *sym;
953 if ((ght = dso_searching->ghashtab)) {
954 GNU_HASH_FILTER(ght, ghm, gho)
955 sym = gnu_lookup(s_info_g, ght, dso_searching, verinfo);
956 } else {
957 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
958 sym = sysv_lookup(verinfo, s_info_s, dso_searching);
959 }
960 if (!sym) continue;
961 if (!sym->st_shndx)
962 if (need_def || (sym->st_info&0xf) == STT_TLS
963 || ARCH_SYM_REJECT_UND(sym))
964 continue;
965 if (!sym->st_value)
966 if ((sym->st_info&0xf) != STT_TLS)
967 continue;
968 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
969 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
970 def.sym = sym;
971 def.dso = dso_searching;
972 break;
973 }
974 return def;
975 }
976
find_sym(struct dso * dso,const char * s,int need_def)977 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
978 {
979 struct verinfo verinfo = { .s = s, .v = "", .use_vna_hash = false };
980 return find_sym2(dso, &verinfo, need_def, 0, NULL);
981 }
982
get_vna_hash(struct dso * dso,int sym_index,uint32_t * vna_hash)983 static bool get_vna_hash(struct dso *dso, int sym_index, uint32_t *vna_hash)
984 {
985 if (!dso->versym || !dso->verneed) {
986 return false;
987 }
988
989 uint16_t vsym = dso->versym[sym_index];
990 if (vsym == VER_NDX_LOCAL || vsym == VER_NDX_GLOBAL) {
991 return false;
992 }
993
994 bool result = false;
995 Verneed *verneed = dso->verneed;
996 Vernaux *vernaux;
997 vsym &= 0x7fff;
998
999 for (;;) {
1000 vernaux = (Vernaux *)((char *)verneed + verneed->vn_aux);
1001
1002 for (size_t cnt = 0; cnt < verneed->vn_cnt; cnt++) {
1003 if ((vernaux->vna_other & 0x7fff) == vsym) {
1004 result = true;
1005 *vna_hash = vernaux->vna_hash;
1006 break;
1007 }
1008
1009 vernaux = (Vernaux *)((char *)vernaux + vernaux->vna_next);
1010 }
1011
1012 if (result) {
1013 break;
1014 }
1015
1016 if (verneed->vn_next == 0) {
1017 break;
1018 }
1019
1020 verneed = (Verneed *)((char *)verneed + verneed->vn_next);
1021 }
1022 return result;
1023 }
1024
get_verinfo(struct dso * dso,int sym_index,struct verinfo * vinfo)1025 static void get_verinfo(struct dso *dso, int sym_index, struct verinfo *vinfo)
1026 {
1027 char *strings = dso->strings;
1028 // try to get version number from .gnu.version
1029 int16_t vsym = dso->versym[sym_index];
1030 Verdef *verdef = dso->verdef;
1031 vsym &= 0x7fff;
1032 if (!verdef) {
1033 return;
1034 }
1035 int version_found = 0;
1036 for (;;) {
1037 if (!verdef) {
1038 break;
1039 }
1040 if (!(verdef->vd_flags & VER_FLG_BASE) && (verdef->vd_ndx & 0x7fff) == vsym) {
1041 version_found = 1;
1042 break;
1043 }
1044 if (verdef->vd_next == 0) {
1045 break;
1046 }
1047 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
1048 }
1049 if (version_found) {
1050 Verdaux *aux = (Verdaux *)((char *)verdef + verdef->vd_aux);
1051 if (aux && aux->vda_name && strings && (dso->strings + aux->vda_name)) {
1052 vinfo->v = dso->strings + aux->vda_name;
1053 }
1054 }
1055 }
1056
do_relocs(struct dso * dso,size_t * rel,size_t rel_size,size_t stride)1057 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
1058 {
1059 unsigned char *base = dso->base;
1060 Sym *syms = dso->syms;
1061 char *strings = dso->strings;
1062 Sym *sym;
1063 const char *name;
1064 void *ctx;
1065 int type;
1066 int sym_index;
1067 struct symdef def;
1068 size_t *reloc_addr;
1069 size_t sym_val;
1070 size_t tls_val;
1071 size_t addend;
1072 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
1073
1074 if (dso == &ldso) {
1075 /* Only ldso's REL table needs addend saving/reuse. */
1076 if (rel == apply_addends_to)
1077 reuse_addends = 1;
1078 skip_relative = 1;
1079 }
1080
1081 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
1082 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue;
1083 type = R_TYPE(rel[1]);
1084 if (type == REL_NONE) continue;
1085 reloc_addr = laddr(dso, rel[0]);
1086
1087 if (stride > 2) {
1088 addend = rel[2];
1089 } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) {
1090 addend = 0;
1091 } else if (reuse_addends) {
1092 /* Save original addend in stage 2 where the dso
1093 * chain consists of just ldso; otherwise read back
1094 * saved addend since the inline one was clobbered. */
1095 if (head==&ldso)
1096 saved_addends[save_slot] = *reloc_addr;
1097 addend = saved_addends[save_slot++];
1098 } else {
1099 addend = *reloc_addr;
1100 }
1101
1102 sym_index = R_SYM(rel[1]);
1103 if (sym_index) {
1104 sym = syms + sym_index;
1105 name = strings + sym->st_name;
1106 ctx = type==REL_COPY ? head->syms_next : head;
1107 struct verinfo vinfo = { .s = name, .v = ""};
1108
1109 vinfo.use_vna_hash = get_vna_hash(dso, sym_index, &vinfo.vna_hash);
1110 if (!vinfo.use_vna_hash && dso->versym && (dso->versym[sym_index] & 0x7fff) >= 0) {
1111 get_verinfo(dso, sym_index, &vinfo);
1112 }
1113 if (dso->cache_sym_index == sym_index) {
1114 def = (struct symdef){ .dso = dso->cache_dso, .sym = dso->cache_sym };
1115 } else {
1116 def = (sym->st_info>>4) == STB_LOCAL
1117 ? (struct symdef){ .dso = dso, .sym = sym }
1118 : dso != &ldso ? find_sym_by_saved_so_list(type, ctx, &vinfo, type==REL_PLT, dso)
1119 : find_sym2(ctx, &vinfo, type==REL_PLT, 0, dso->namespace);
1120 dso->cache_sym_index = sym_index;
1121 dso->cache_dso = def.dso;
1122 dso->cache_sym = def.sym;
1123 }
1124
1125 if (!def.sym && (sym->st_shndx != SHN_UNDEF
1126 || sym->st_info>>4 != STB_WEAK)) {
1127 if (dso->lazy && (type==REL_PLT || type==REL_GOT)) {
1128 dso->lazy[3*dso->lazy_cnt+0] = rel[0];
1129 dso->lazy[3*dso->lazy_cnt+1] = rel[1];
1130 dso->lazy[3*dso->lazy_cnt+2] = addend;
1131 dso->lazy_cnt++;
1132 continue;
1133 }
1134 LD_LOGE("relocating failed: symbol not found. "
1135 "dso=%{public}s s=%{public}s use_vna_hash=%{public}d van_hash=%{public}x",
1136 dso->name, name, vinfo.use_vna_hash, vinfo.vna_hash);
1137 error("Error relocating %s: %s: symbol not found",
1138 dso->name, name);
1139 if (runtime) longjmp(*rtld_fail, 1);
1140 continue;
1141 }
1142 } else {
1143 sym = 0;
1144 def.sym = 0;
1145 def.dso = dso;
1146 }
1147
1148 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0;
1149 tls_val = def.sym ? def.sym->st_value : 0;
1150
1151 if ((type == REL_TPOFF || type == REL_TPOFF_NEG)
1152 && def.dso->tls_id > static_tls_cnt) {
1153 error("Error relocating %s: %s: initial-exec TLS "
1154 "resolves to dynamic definition in %s",
1155 dso->name, name, def.dso->name);
1156 longjmp(*rtld_fail, 1);
1157 }
1158
1159 switch(type) {
1160 case REL_OFFSET:
1161 addend -= (size_t)reloc_addr;
1162 case REL_SYMBOLIC:
1163 case REL_GOT:
1164 case REL_PLT:
1165 *reloc_addr = sym_val + addend;
1166 break;
1167 case REL_USYMBOLIC:
1168 memcpy(reloc_addr, &(size_t){sym_val + addend}, sizeof(size_t));
1169 break;
1170 case REL_RELATIVE:
1171 *reloc_addr = (size_t)base + addend;
1172 break;
1173 case REL_SYM_OR_REL:
1174 if (sym) *reloc_addr = sym_val + addend;
1175 else *reloc_addr = (size_t)base + addend;
1176 break;
1177 case REL_COPY:
1178 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
1179 break;
1180 case REL_OFFSET32:
1181 *(uint32_t *)reloc_addr = sym_val + addend
1182 - (size_t)reloc_addr;
1183 break;
1184 case REL_FUNCDESC:
1185 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
1186 + (def.sym - def.dso->syms)) : 0;
1187 break;
1188 case REL_FUNCDESC_VAL:
1189 if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val;
1190 else *reloc_addr = sym_val;
1191 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
1192 break;
1193 case REL_DTPMOD:
1194 *reloc_addr = def.dso->tls_id;
1195 break;
1196 case REL_DTPOFF:
1197 *reloc_addr = tls_val + addend - DTP_OFFSET;
1198 break;
1199 #ifdef TLS_ABOVE_TP
1200 case REL_TPOFF:
1201 *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
1202 break;
1203 #else
1204 case REL_TPOFF:
1205 *reloc_addr = tls_val - def.dso->tls.offset + addend;
1206 break;
1207 case REL_TPOFF_NEG:
1208 *reloc_addr = def.dso->tls.offset - tls_val + addend;
1209 break;
1210 #endif
1211 case REL_TLSDESC:
1212 if (stride<3) addend = reloc_addr[!TLSDESC_BACKWARDS];
1213 if (def.dso->tls_id > static_tls_cnt) {
1214 struct td_index *new = malloc(sizeof *new);
1215 if (!new) {
1216 error(
1217 "Error relocating %s: cannot allocate TLSDESC for %s",
1218 dso->name, sym ? name : "(local)" );
1219 longjmp(*rtld_fail, 1);
1220 }
1221 new->next = dso->td_index;
1222 dso->td_index = new;
1223 new->args[0] = def.dso->tls_id;
1224 new->args[1] = tls_val + addend - DTP_OFFSET;
1225 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
1226 reloc_addr[1] = (size_t)new;
1227 } else {
1228 reloc_addr[0] = (size_t)__tlsdesc_static;
1229 #ifdef TLS_ABOVE_TP
1230 reloc_addr[1] = tls_val + def.dso->tls.offset
1231 + TPOFF_K + addend;
1232 #else
1233 reloc_addr[1] = tls_val - def.dso->tls.offset
1234 + addend;
1235 #endif
1236 }
1237 /* Some archs (32-bit ARM at least) invert the order of
1238 * the descriptor members. Fix them up here. */
1239 if (TLSDESC_BACKWARDS) {
1240 size_t tmp = reloc_addr[0];
1241 reloc_addr[0] = reloc_addr[1];
1242 reloc_addr[1] = tmp;
1243 }
1244 break;
1245 default:
1246 error("Error relocating %s: unsupported relocation type %d",
1247 dso->name, type);
1248 if (runtime) longjmp(*rtld_fail, 1);
1249 continue;
1250 }
1251 }
1252 }
1253
redo_lazy_relocs()1254 static void redo_lazy_relocs()
1255 {
1256 struct dso *p = lazy_head, *next;
1257 lazy_head = 0;
1258 for (; p; p=next) {
1259 next = p->lazy_next;
1260 size_t size = p->lazy_cnt*3*sizeof(size_t);
1261 p->lazy_cnt = 0;
1262 do_relocs(p, p->lazy, size, 3);
1263 if (p->lazy_cnt) {
1264 p->lazy_next = lazy_head;
1265 lazy_head = p;
1266 } else {
1267 free(p->lazy);
1268 p->lazy = 0;
1269 p->lazy_next = 0;
1270 }
1271 }
1272 }
1273
1274 /* A huge hack: to make up for the wastefulness of shared libraries
1275 * needing at least a page of dirty memory even if they have no global
1276 * data, we reclaim the gaps at the beginning and end of writable maps
1277 * and "donate" them to the heap. */
1278
reclaim(struct dso * dso,size_t start,size_t end)1279 static void reclaim(struct dso *dso, size_t start, size_t end)
1280 {
1281 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
1282 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
1283 if (start >= end) return;
1284 char *base = laddr_pg(dso, start);
1285 __malloc_donate(base, base+(end-start));
1286 }
1287
reclaim_gaps(struct dso * dso)1288 static void reclaim_gaps(struct dso *dso)
1289 {
1290 Phdr *ph = dso->phdr;
1291 size_t phcnt = dso->phnum;
1292
1293 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
1294 if (ph->p_type!=PT_LOAD) continue;
1295 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
1296 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
1297 reclaim(dso, ph->p_vaddr+ph->p_memsz,
1298 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
1299 }
1300 }
1301
read_loop(int fd,void * p,size_t n)1302 static ssize_t read_loop(int fd, void *p, size_t n)
1303 {
1304 for (size_t i=0; i<n; ) {
1305 ssize_t l = read(fd, (char *)p+i, n-i);
1306 if (l<0) {
1307 if (errno==EINTR) continue;
1308 else return -1;
1309 }
1310 if (l==0) return i;
1311 i += l;
1312 }
1313 return n;
1314 }
1315
mmap_fixed(void * p,size_t n,int prot,int flags,int fd,off_t off)1316 UT_STATIC void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
1317 {
1318 static int no_map_fixed;
1319 char *q;
1320 if (!n) return p;
1321 if (!no_map_fixed) {
1322 q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
1323 if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
1324 return q;
1325 no_map_fixed = 1;
1326 }
1327 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
1328 if (flags & MAP_ANONYMOUS) {
1329 memset(p, 0, n);
1330 return p;
1331 }
1332 ssize_t r;
1333 if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
1334 for (q=p; n; q+=r, off+=r, n-=r) {
1335 r = read(fd, q, n);
1336 if (r < 0 && errno != EINTR) return MAP_FAILED;
1337 if (!r) {
1338 memset(q, 0, n);
1339 break;
1340 }
1341 }
1342 return p;
1343 }
1344
unmap_library(struct dso * dso)1345 UT_STATIC void unmap_library(struct dso *dso)
1346 {
1347 if (dso->loadmap) {
1348 size_t i;
1349 for (i=0; i<dso->loadmap->nsegs; i++) {
1350 if (!dso->loadmap->segs[i].p_memsz)
1351 continue;
1352 if (!is_dlclose_debug_enable()) {
1353 munmap((void *)dso->loadmap->segs[i].addr,
1354 dso->loadmap->segs[i].p_memsz);
1355 } else {
1356 (void)mprotect((void *)dso->loadmap->segs[i].addr,
1357 dso->loadmap->segs[i].p_memsz, PROT_NONE);
1358 }
1359 }
1360 free(dso->loadmap);
1361 } else if (dso->map && dso->map_len) {
1362 if (!is_dlclose_debug_enable()) {
1363 munmap(dso->map, dso->map_len);
1364 } else {
1365 mprotect(dso->map, dso->map_len, PROT_NONE);
1366 }
1367 }
1368 }
1369
get_random(void * buf,size_t buflen)1370 UT_STATIC bool get_random(void *buf, size_t buflen)
1371 {
1372 int ret;
1373 int fd = open("/dev/urandom", O_RDONLY);
1374 if (fd < 0) {
1375 return false;
1376 }
1377
1378 ret = read(fd, buf, buflen);
1379 if (ret < 0) {
1380 close(fd);
1381 return false;
1382 }
1383
1384 close(fd);
1385 return true;
1386 }
1387
fill_random_data(void * buf,size_t buflen)1388 UT_STATIC void fill_random_data(void *buf, size_t buflen)
1389 {
1390 uint64_t x;
1391 int i;
1392 int pos = 0;
1393 struct timespec ts;
1394 /* Try to use urandom to get the random number first */
1395 if (!get_random(buf, buflen)) {
1396 /* Can't get random number from /dev/urandom, generate from addr based on ASLR and time */
1397 for (i = 1; i <= (buflen / sizeof(x)); i++) {
1398 (void)clock_gettime(CLOCK_REALTIME, &ts);
1399 x = (((uint64_t)get_random) << 32) ^ (uint64_t)fill_random_data ^ ts.tv_nsec;
1400 memcpy((char *)buf + pos, &x, sizeof(x));
1401 pos += sizeof(x);
1402 }
1403 }
1404 return;
1405 }
1406
get_transparent_hugepages_supported(void)1407 static bool get_transparent_hugepages_supported(void)
1408 {
1409 int fd = -1;
1410 ssize_t read_size = 0;
1411 bool enable = false;
1412 char buf[HUGEPAGES_SUPPORTED_STR_SIZE] = {'0'};
1413
1414 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
1415 if (fd < 0)
1416 goto done;
1417
1418 read_size = read(fd, buf, HUGEPAGES_SUPPORTED_STR_SIZE - 1);
1419 if (read_size < 0)
1420 goto close_fd;
1421
1422 buf[HUGEPAGES_SUPPORTED_STR_SIZE - 1] = '\0';
1423 if (strstr(buf, "[never]") == NULL)
1424 enable = true;
1425
1426 close_fd:
1427 close(fd);
1428 done:
1429 return enable;
1430 }
1431
phdr_table_get_maxinum_alignment(Phdr * phdr_table,size_t phdr_count)1432 static size_t phdr_table_get_maxinum_alignment(Phdr *phdr_table, size_t phdr_count)
1433 {
1434 #if defined(__LP64__)
1435 size_t maxinum_alignment = PAGE_SIZE;
1436 size_t i = 0;
1437
1438 for (i = 0; i < phdr_count; ++i) {
1439 const Phdr *phdr = &phdr_table[i];
1440
1441 /* p_align must be 0, 1, or a positive, integral power of two */
1442 if ((phdr->p_type != PT_LOAD) || ((phdr->p_align & (phdr->p_align - 1)) != 0))
1443 continue;
1444
1445 if (phdr->p_align > maxinum_alignment)
1446 maxinum_alignment = phdr->p_align;
1447 }
1448
1449 return maxinum_alignment;
1450 #else
1451 return PAGE_SIZE;
1452 #endif
1453 }
1454
1455 #ifdef USE_ENCAPS
do_sync_to_other()1456 static int do_sync_to_other()
1457 {
1458 int fd;
1459 int ret;
1460
1461 fd = open("/dev/encaps", O_RDONLY);
1462 if (fd < 0) {
1463 LD_LOGE("open encaps failed, %{public}s", strerror(errno));
1464 return -1;
1465 }
1466
1467 ret = ioctl(fd, SYNC_ENCAPS_CMD);
1468 if (ret != 0) {
1469 LD_LOGE("ioctl encaps failed, %{public}s", strerror(errno));
1470 close(fd);
1471 return -1;
1472 }
1473
1474 close(fd);
1475 return 0;
1476 }
1477
sync_to_other()1478 static void sync_to_other()
1479 {
1480 __synccall(do_sync_to_other, NULL);
1481 }
1482
is_section_exist(Ehdr * eh_buf,uint32_t en_size,int fd,char * section_name)1483 static bool is_section_exist(Ehdr *eh_buf, uint32_t en_size, int fd, char *section_name)
1484 {
1485 char *shstrtab_content = NULL;
1486 size_t i, len;
1487 size_t shsize;
1488 uint16_t index;
1489 void *sh_buf = NULL;
1490 Shdr *sh, *sh0, shstrtab;
1491
1492 if (eh_buf == NULL) {
1493 return false;
1494 }
1495
1496 if (eh_buf->e_type != ET_DYN) {
1497 goto error_without_free;
1498 }
1499
1500 shsize = eh_buf->e_shentsize * eh_buf->e_shnum;
1501 index = eh_buf->e_shstrndx;
1502 if (index >= eh_buf->e_shnum) {
1503 goto error_without_free;
1504 }
1505
1506 if (shsize > en_size - sizeof(Ehdr)) {
1507 sh_buf = malloc(shsize);
1508 if (!sh_buf) {
1509 goto error_without_free;
1510 }
1511 len = pread(fd, sh_buf, shsize, eh_buf->e_shoff);
1512 if (len != shsize) {
1513 free(sh_buf);
1514 goto error_without_free;
1515 }
1516 sh = sh0 = sh_buf;
1517 } else if (eh_buf->e_shoff + shsize > len) {
1518 len = pread(fd, eh_buf + 1, shsize, eh_buf->e_shoff);
1519 if (len != shsize) {
1520 goto error_without_free;
1521 }
1522 sh = sh0 = (void *)(eh_buf + 1);
1523 } else {
1524 sh = sh0 = (void *)((char *)eh_buf + eh_buf->e_shoff);
1525 }
1526
1527 shstrtab = sh[index];
1528 shstrtab_content = (char *)malloc(shstrtab.sh_size);
1529 if (!shstrtab_content) {
1530 free(sh_buf);
1531 goto error_without_free;
1532 }
1533
1534 len = pread(fd, shstrtab_content, shstrtab.sh_size, shstrtab.sh_offset);
1535 if (len != shstrtab.sh_size) {
1536 goto error;
1537 }
1538 for (i = eh_buf->e_shnum; i != 0; i--) {
1539 char *shname = shstrtab_content + sh0[i - 1].sh_name; // this name is offset in shstrtab
1540 if ((shname == NULL) || (sh0[i - 1].sh_name > shstrtab.sh_size)) {
1541 continue;
1542 }
1543 if (strcmp(shname, section_name) == 0) {
1544 goto done_search;
1545 }
1546 }
1547
1548 error:
1549 free(shstrtab_content);
1550 if (sh_buf != NULL) {
1551 free(sh_buf);
1552 }
1553 error_without_free:
1554 errno = ENOEXEC;
1555 return false;
1556 done_search:
1557 free(shstrtab_content);
1558 if (sh_buf != NULL) {
1559 free(sh_buf);
1560 }
1561 sync_to_other();
1562 return true;
1563 }
1564 #endif
1565
check_xpm(int fd)1566 static bool check_xpm(int fd)
1567 {
1568 size_t mapLen = sizeof(Ehdr);
1569 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, fd, 0);
1570 if (map == MAP_FAILED) {
1571 LD_LOGE("Xpm check failed for so file, errno for mmap is: %{public}d", errno);
1572 return false;
1573 }
1574 munmap(map, mapLen);
1575 return true;
1576 }
1577
map_library(int fd,struct dso * dso,struct reserved_address_params * reserved_params)1578 UT_STATIC void *map_library(int fd, struct dso *dso, struct reserved_address_params *reserved_params)
1579 {
1580 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
1581 void *allocated_buf=0;
1582 size_t phsize;
1583 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
1584 size_t this_min, this_max;
1585 size_t nsegs = 0;
1586 off_t off_start;
1587 Ehdr *eh;
1588 Phdr *ph, *ph0;
1589 unsigned prot;
1590 unsigned char *map=MAP_FAILED, *base;
1591 size_t dyn=0;
1592 size_t tls_image=0;
1593 size_t i;
1594 int map_flags = MAP_PRIVATE;
1595 size_t start_addr;
1596 size_t start_alignment = PAGE_SIZE;
1597 bool hugepage_enabled = false;
1598 if (!check_xpm(fd)) {
1599 return 0;
1600 }
1601
1602 ssize_t l = read(fd, buf, sizeof buf);
1603 eh = buf;
1604 if (l<0) return 0;
1605 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
1606 goto noexec;
1607 phsize = eh->e_phentsize * eh->e_phnum;
1608 if (phsize > sizeof buf - sizeof *eh) {
1609 allocated_buf = malloc(phsize);
1610 if (!allocated_buf) return 0;
1611 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
1612 if (l < 0) goto error;
1613 if (l != phsize) goto noexec;
1614 ph = ph0 = allocated_buf;
1615 } else if (eh->e_phoff + phsize > l) {
1616 l = pread(fd, buf+1, phsize, eh->e_phoff);
1617 if (l < 0) goto error;
1618 if (l != phsize) goto noexec;
1619 ph = ph0 = (void *)(buf + 1);
1620 } else {
1621 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
1622 }
1623 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1624 if (ph->p_type == PT_DYNAMIC) {
1625 dyn = ph->p_vaddr;
1626 } else if (ph->p_type == PT_TLS) {
1627 tls_image = ph->p_vaddr;
1628 dso->tls.align = ph->p_align;
1629 dso->tls.len = ph->p_filesz;
1630 dso->tls.size = ph->p_memsz;
1631 } else if (ph->p_type == PT_GNU_RELRO) {
1632 dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
1633 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
1634 } else if (ph->p_type == PT_GNU_STACK) {
1635 if (!runtime && ph->p_memsz > __default_stacksize) {
1636 __default_stacksize =
1637 ph->p_memsz < DEFAULT_STACK_MAX ?
1638 ph->p_memsz : DEFAULT_STACK_MAX;
1639 }
1640 }
1641 if (ph->p_type != PT_LOAD) continue;
1642 nsegs++;
1643 if (ph->p_vaddr < addr_min) {
1644 addr_min = ph->p_vaddr;
1645 off_start = ph->p_offset;
1646 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1647 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1648 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1649 }
1650 if (ph->p_vaddr+ph->p_memsz > addr_max) {
1651 addr_max = ph->p_vaddr+ph->p_memsz;
1652 }
1653 }
1654 if (!dyn) goto noexec;
1655 if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
1656 dso->loadmap = calloc(1, sizeof *dso->loadmap
1657 + nsegs * sizeof *dso->loadmap->segs);
1658 if (!dso->loadmap) goto error;
1659 dso->loadmap->nsegs = nsegs;
1660 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
1661 if (ph->p_type != PT_LOAD) continue;
1662 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1663 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1664 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1665 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
1666 prot, MAP_PRIVATE,
1667 fd, ph->p_offset & -PAGE_SIZE);
1668 if (map == MAP_FAILED) {
1669 unmap_library(dso);
1670 goto error;
1671 }
1672 dso->loadmap->segs[i].addr = (size_t)map +
1673 (ph->p_vaddr & PAGE_SIZE-1);
1674 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
1675 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
1676 i++;
1677 if (prot & PROT_WRITE) {
1678 size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
1679 + ph->p_filesz;
1680 size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
1681 size_t pgend = brk + ph->p_memsz - ph->p_filesz
1682 + PAGE_SIZE-1 & -PAGE_SIZE;
1683 if (pgend > pgbrk && mmap_fixed(map+pgbrk,
1684 pgend-pgbrk, prot,
1685 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
1686 -1, off_start) == MAP_FAILED)
1687 goto error;
1688 memset(map + brk, 0, pgbrk-brk);
1689 }
1690 }
1691 map = (void *)dso->loadmap->segs[0].addr;
1692 map_len = 0;
1693 goto done_mapping;
1694 }
1695 addr_max += PAGE_SIZE-1;
1696 addr_max &= -PAGE_SIZE;
1697 addr_min &= -PAGE_SIZE;
1698 off_start &= -PAGE_SIZE;
1699 map_len = addr_max - addr_min + off_start;
1700 start_addr = addr_min;
1701
1702 hugepage_enabled = get_transparent_hugepages_supported();
1703 if (hugepage_enabled) {
1704 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(ph0, eh->e_phnum);
1705
1706 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
1707 }
1708
1709 if (reserved_params) {
1710 if (map_len > reserved_params->reserved_size) {
1711 if (reserved_params->must_use_reserved) {
1712 goto error;
1713 }
1714 } else {
1715 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
1716 map_flags |= MAP_FIXED;
1717 }
1718 }
1719
1720 /* we will find a mapping_align aligned address as the start of dso
1721 * so we need a tmp_map_len as map_len + mapping_align to make sure
1722 * we have enough space to shift the dso to the correct location. */
1723 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
1724 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
1725
1726 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
1727 if (reserved_params) {
1728 map = DL_NOMMU_SUPPORT
1729 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1730 : mmap((void *)start_addr, map_len, prot, map_flags, fd, off_start);
1731 if (map == MAP_FAILED) {
1732 goto error;
1733 }
1734 if (reserved_params && map_len < reserved_params->reserved_size) {
1735 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
1736 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
1737 }
1738 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
1739 } else {
1740 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
1741 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1742 if (temp_map == MAP_FAILED) {
1743 goto error;
1744 }
1745
1746 /* find the mapping_align aligned address */
1747 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
1748 map = DL_NOMMU_SUPPORT
1749 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1750 /* use map_len to mmap correct space for the dso with file mapping */
1751 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, fd, off_start);
1752 if (map == MAP_FAILED || map != real_map) {
1753 LD_LOGE("mmap MAP_FIXED failed");
1754 goto error;
1755 }
1756
1757 /* Free unused memory.
1758 * |--------------------------tmp_map_len--------------------------|
1759 * ^ ^ ^ ^
1760 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
1761 * temp_map real_map(aligned) temp_map_end
1762 */
1763 unsigned char *temp_map_end = temp_map + tmp_map_len;
1764 size_t unused_part_1 = real_map - temp_map;
1765 size_t unused_part_2 = temp_map_end - (real_map + map_len);
1766 if (unused_part_1 > 0) {
1767 int res1 = munmap(temp_map, unused_part_1);
1768 if (res1 == -1) {
1769 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
1770 }
1771 }
1772
1773 if (unused_part_2 > 0) {
1774 int res2 = munmap(real_map + map_len, unused_part_2);
1775 if (res2 == -1) {
1776 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
1777 }
1778 }
1779 }
1780 dso->map = map;
1781 dso->map_len = map_len;
1782 /* If the loaded file is not relocatable and the requested address is
1783 * not available, then the load operation must fail. */
1784 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
1785 errno = EBUSY;
1786 goto error;
1787 }
1788 base = map - addr_min;
1789 dso->phdr = 0;
1790 dso->phnum = 0;
1791 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1792 if (ph->p_type == PT_OHOS_RANDOMDATA) {
1793 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
1794 continue;
1795 }
1796 if (ph->p_type != PT_LOAD) continue;
1797 /* Check if the programs headers are in this load segment, and
1798 * if so, record the address for use by dl_iterate_phdr. */
1799 if (!dso->phdr && eh->e_phoff >= ph->p_offset
1800 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
1801 dso->phdr = (void *)(base + ph->p_vaddr
1802 + (eh->e_phoff-ph->p_offset));
1803 dso->phnum = eh->e_phnum;
1804 dso->phentsize = eh->e_phentsize;
1805 }
1806 this_min = ph->p_vaddr & -PAGE_SIZE;
1807 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
1808 off_start = ph->p_offset & -PAGE_SIZE;
1809 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1810 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1811 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1812 /* Reuse the existing mapping for the lowest-address LOAD */
1813 if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
1814 if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
1815 goto error;
1816 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
1817 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
1818 if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
1819 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
1820 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
1821 size_t zeromap_size = (size_t)base + this_max - pgbrk;
1822 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
1823 if (pgbrk - (size_t)base < this_max && mmap_fixed((void *)pgbrk, zeromap_size, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
1824 goto error;
1825 set_bss_vma_name(dso->name, (void *)pgbrk, zeromap_size);
1826 }
1827 }
1828 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
1829 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
1830 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
1831 && errno != ENOSYS)
1832 goto error;
1833 break;
1834 }
1835 done_mapping:
1836 #ifdef USE_ENCAPS
1837 clock_gettime(CLOCK_MONOTONIC, &encaps_time_start);
1838 (void)is_section_exist(buf, sizeof(buf), fd, ".kernelpermission");
1839 clock_gettime(CLOCK_MONOTONIC, &encaps_time_end);
1840 encpas_cost_time = (encaps_time_end.tv_sec - encaps_time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
1841 + (encaps_time_end.tv_nsec - encaps_time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
1842 #endif
1843 dso->base = base;
1844 dso->dynv = laddr(dso, dyn);
1845 if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
1846 free(allocated_buf);
1847 return map;
1848 noexec:
1849 errno = ENOEXEC;
1850 error:
1851 if (map!=MAP_FAILED) unmap_library(dso);
1852 free(allocated_buf);
1853 return 0;
1854 }
1855
path_open(const char * name,const char * s,char * buf,size_t buf_size)1856 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
1857 {
1858 size_t l;
1859 int fd;
1860 for (;;) {
1861 s += strspn(s, ":\n");
1862 l = strcspn(s, ":\n");
1863 if (l-1 >= INT_MAX) return -1;
1864 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
1865 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
1866 switch (errno) {
1867 case ENOENT:
1868 case ENOTDIR:
1869 case EACCES:
1870 case ENAMETOOLONG:
1871 break;
1872 default:
1873 /* Any negative value but -1 will inhibit
1874 * futher path search. */
1875 return -2;
1876 }
1877 }
1878 s += l;
1879 }
1880 }
1881
fixup_rpath(struct dso * p,char * buf,size_t buf_size)1882 UT_STATIC int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
1883 {
1884 size_t n, l;
1885 const char *s, *t, *origin;
1886 char *d;
1887 if (p->rpath || !p->rpath_orig) return 0;
1888 if (!strchr(p->rpath_orig, '$')) {
1889 p->rpath = ld_strdup(p->rpath_orig);
1890 return 0;
1891 }
1892 n = 0;
1893 s = p->rpath_orig;
1894 while ((t=strchr(s, '$'))) {
1895 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
1896 return 0;
1897 s = t+1;
1898 n++;
1899 }
1900 if (n > SSIZE_MAX/PATH_MAX) return 0;
1901
1902 if (p->kernel_mapped) {
1903 /* $ORIGIN searches cannot be performed for the main program
1904 * when it is suid/sgid/AT_SECURE. This is because the
1905 * pathname is under the control of the caller of execve.
1906 * For libraries, however, $ORIGIN can be processed safely
1907 * since the library's pathname came from a trusted source
1908 * (either system paths or a call to dlopen). */
1909 if (libc.secure)
1910 return 0;
1911 l = readlink("/proc/self/exe", buf, buf_size);
1912 if (l == -1) switch (errno) {
1913 case ENOENT:
1914 case ENOTDIR:
1915 case EACCES:
1916 return 0;
1917 default:
1918 return -1;
1919 }
1920 if (l >= buf_size)
1921 return 0;
1922 buf[l] = 0;
1923 origin = buf;
1924 } else {
1925 origin = p->name;
1926 }
1927 t = strrchr(origin, '/');
1928 if (t) {
1929 l = t-origin;
1930 } else {
1931 /* Normally p->name will always be an absolute or relative
1932 * pathname containing at least one '/' character, but in the
1933 * case where ldso was invoked as a command to execute a
1934 * program in the working directory, app.name may not. Fix. */
1935 origin = ".";
1936 l = 1;
1937 }
1938 /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */
1939 if (libc.secure && *origin != '/')
1940 return 0;
1941 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
1942 if (!p->rpath) return -1;
1943
1944 d = p->rpath;
1945 s = p->rpath_orig;
1946 while ((t=strchr(s, '$'))) {
1947 memcpy(d, s, t-s);
1948 d += t-s;
1949 memcpy(d, origin, l);
1950 d += l;
1951 /* It was determined previously that the '$' is followed
1952 * either by "ORIGIN" or "{ORIGIN}". */
1953 s = t + 7 + 2*(t[1]=='{');
1954 }
1955 strcpy(d, s);
1956 return 0;
1957 }
1958
decode_dyn(struct dso * p)1959 static void decode_dyn(struct dso *p)
1960 {
1961 size_t dyn[DYN_CNT];
1962 size_t flags1 = 0;
1963 decode_vec(p->dynv, dyn, DYN_CNT);
1964 search_vec(p->dynv, &flags1, DT_FLAGS_1);
1965 if (flags1 & DF_1_GLOBAL) {
1966 LD_LOGI("Add DF_1_GLOBAL for %{public}s", p->name);
1967 p->is_global = true;
1968 }
1969 if (flags1 & DF_1_NODELETE) {
1970 p->flags |= DSO_FLAGS_NODELETE;
1971 }
1972 p->syms = laddr(p, dyn[DT_SYMTAB]);
1973 p->strings = laddr(p, dyn[DT_STRTAB]);
1974 if (dyn[0]&(1<<DT_HASH))
1975 p->hashtab = laddr(p, dyn[DT_HASH]);
1976 if (dyn[0]&(1<<DT_RPATH))
1977 p->rpath_orig = p->strings + dyn[DT_RPATH];
1978 if (dyn[0]&(1<<DT_RUNPATH))
1979 p->rpath_orig = p->strings + dyn[DT_RUNPATH];
1980 if (dyn[0]&(1<<DT_PLTGOT))
1981 p->got = laddr(p, dyn[DT_PLTGOT]);
1982 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
1983 p->ghashtab = laddr(p, *dyn);
1984 if (search_vec(p->dynv, dyn, DT_VERSYM))
1985 p->versym = laddr(p, *dyn);
1986 if (search_vec(p->dynv, dyn, DT_VERDEF))
1987 p->verdef = laddr(p, *dyn);
1988 if (search_vec(p->dynv, dyn, DT_VERNEED))
1989 p->verneed = laddr(p, *dyn);
1990 }
1991
count_syms(struct dso * p)1992 UT_STATIC size_t count_syms(struct dso *p)
1993 {
1994 if (p->hashtab) return p->hashtab[1];
1995
1996 size_t nsym, i;
1997 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1998 uint32_t *hashval;
1999 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
2000 if (buckets[i] > nsym)
2001 nsym = buckets[i];
2002 }
2003 if (nsym) {
2004 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
2005 do nsym++;
2006 while (!(*hashval++ & 1));
2007 }
2008 return nsym;
2009 }
2010
dl_mmap(size_t n)2011 static void *dl_mmap(size_t n)
2012 {
2013 void *p;
2014 int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE;
2015 #ifdef SYS_mmap2
2016 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0);
2017 #else
2018 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0);
2019 #endif
2020 return (unsigned long)p > -4096UL ? 0 : p;
2021 }
2022
makefuncdescs(struct dso * p)2023 static void makefuncdescs(struct dso *p)
2024 {
2025 static int self_done;
2026 size_t nsym = count_syms(p);
2027 size_t i, size = nsym * sizeof(*p->funcdescs);
2028
2029 if (!self_done) {
2030 p->funcdescs = dl_mmap(size);
2031 self_done = 1;
2032 } else {
2033 p->funcdescs = malloc(size);
2034 }
2035 if (!p->funcdescs) {
2036 if (!runtime) a_crash();
2037 error("Error allocating function descriptors for %s", p->name);
2038 longjmp(*rtld_fail, 1);
2039 }
2040 for (i=0; i<nsym; i++) {
2041 if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) {
2042 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value);
2043 p->funcdescs[i].got = p->got;
2044 } else {
2045 p->funcdescs[i].addr = 0;
2046 p->funcdescs[i].got = 0;
2047 }
2048 }
2049 }
2050
get_sys_path(ns_configor * conf)2051 static void get_sys_path(ns_configor *conf)
2052 {
2053 LD_LOGD("get_sys_path g_is_asan:%{public}d", g_is_asan);
2054 /* Use ini file's system paths when Asan is not enabled */
2055 if (!g_is_asan) {
2056 sys_path = conf->get_sys_paths();
2057 } else {
2058 /* Use ini file's asan system paths when the Asan is enabled
2059 * Merge two strings when both sys_paths and asan_sys_paths are valid */
2060 sys_path = conf->get_asan_sys_paths();
2061 char *sys_path_default = conf->get_sys_paths();
2062 if (!sys_path) {
2063 sys_path = sys_path_default;
2064 } else if (sys_path_default) {
2065 size_t newlen = strlen(sys_path) + strlen(sys_path_default) + 2;
2066 char *new_syspath = malloc(newlen);
2067 memset(new_syspath, 0, newlen);
2068 strcpy(new_syspath, sys_path);
2069 strcat(new_syspath, ":");
2070 strcat(new_syspath, sys_path_default);
2071 sys_path = new_syspath;
2072 }
2073 }
2074 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib:/lib64";
2075 LD_LOGD("get_sys_path sys_path:%{public}s", sys_path);
2076 }
2077
search_dso_by_name(const char * name,const ns_t * ns)2078 static struct dso *search_dso_by_name(const char *name, const ns_t *ns) {
2079 LD_LOGD("search_dso_by_name name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2080 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2081 struct dso *p = ns->ns_dsos->dsos[i];
2082 if (p->shortname && !strcmp(p->shortname, name)) {
2083 LD_LOGD("search_dso_by_name found name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
2084 return p;
2085 }
2086 }
2087 return NULL;
2088 }
2089
search_dso_by_fstat(const struct stat * st,const ns_t * ns,uint64_t file_offset)2090 static struct dso *search_dso_by_fstat(const struct stat *st, const ns_t *ns, uint64_t file_offset) {
2091 LD_LOGD("search_dso_by_fstat ns_name:%{public}s", ns ? ns->ns_name : "NULL");
2092 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
2093 struct dso *p = ns->ns_dsos->dsos[i];
2094 if (p->dev == st->st_dev && p->ino == st->st_ino && p->file_offset == file_offset) {
2095 LD_LOGD("search_dso_by_fstat found dev:%{public}lu, ino:%{public}lu, ns_name:%{public}s",
2096 st->st_dev, st->st_ino, ns ? ns->ns_name : "NULL");
2097 return p;
2098 }
2099 }
2100 return NULL;
2101 }
2102
app_has_same_name_so(const char * so_name,const ns_t * ns)2103 static inline int app_has_same_name_so(const char *so_name, const ns_t *ns)
2104 {
2105 int fd = -1;
2106 /* Only check system app. */
2107 if (((ns->flag & LOCAL_NS_PREFERED) != 0) && ns->lib_paths) {
2108 char tmp_buf[PATH_MAX + 1];
2109 fd = path_open(so_name, ns->lib_paths, tmp_buf, sizeof tmp_buf);
2110 }
2111 return fd;
2112 }
2113
2114 /* Find loaded so by name */
find_library_by_name(const char * name,const ns_t * ns,bool check_inherited)2115 static struct dso *find_library_by_name(const char *name, const ns_t *ns, bool check_inherited)
2116 {
2117 LD_LOGD("find_library_by_name name:%{public}s, ns_name:%{public}s, check_inherited:%{public}d",
2118 name,
2119 ns ? ns->ns_name : "NULL",
2120 !!check_inherited);
2121 struct dso *p = search_dso_by_name(name, ns);
2122 if (p) return p;
2123 if (check_inherited && ns->ns_inherits) {
2124 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2125 ns_inherit * inherit = ns->ns_inherits->inherits[i];
2126 p = search_dso_by_name(name, inherit->inherited_ns);
2127 if (p && is_sharable(inherit, name)) {
2128 if (app_has_same_name_so(name, ns) != -1) {
2129 return NULL;
2130 }
2131 return p;
2132 }
2133 }
2134 }
2135 return NULL;
2136 }
2137 /* Find loaded so by file stat */
find_library_by_fstat(const struct stat * st,const ns_t * ns,bool check_inherited,uint64_t file_offset)2138 UT_STATIC struct dso *find_library_by_fstat(const struct stat *st, const ns_t *ns, bool check_inherited, uint64_t file_offset) {
2139 LD_LOGD("find_library_by_fstat ns_name:%{public}s, check_inherited :%{public}d",
2140 ns ? ns->ns_name : "NULL",
2141 !!check_inherited);
2142 struct dso *p = search_dso_by_fstat(st, ns, file_offset);
2143 if (p) return p;
2144 if (check_inherited && ns->ns_inherits) {
2145 for (size_t i = 0; i < ns->ns_inherits->num; i++) {
2146 ns_inherit *inherit = ns->ns_inherits->inherits[i];
2147 p = search_dso_by_fstat(st, inherit->inherited_ns, file_offset);
2148 if (p && is_sharable(inherit, p->shortname)) return p;
2149 }
2150 }
2151 return NULL;
2152 }
2153
2154 #ifndef LOAD_ORDER_RANDOMIZATION
2155 /* add namespace function */
load_library(const char * name,struct dso * needed_by,ns_t * namespace,bool check_inherited,struct reserved_address_params * reserved_params)2156 struct dso *load_library(
2157 const char *name, struct dso *needed_by, ns_t *namespace, bool check_inherited, struct reserved_address_params *reserved_params)
2158 {
2159 char buf[PATH_MAX + 1];
2160 const char *pathname;
2161 unsigned char *map;
2162 struct dso *p, temp_dso = {0};
2163 int fd;
2164 struct stat st;
2165 size_t alloc_size;
2166 int n_th = 0;
2167 int is_self = 0;
2168
2169 if (!*name) {
2170 errno = EINVAL;
2171 return 0;
2172 }
2173
2174 /* Catch and block attempts to reload the implementation itself */
2175 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
2176 static const char reserved[] =
2177 "c.pthread.rt.m.dl.util.xnet.";
2178 const char *rp, *next;
2179 for (rp=reserved; *rp; rp=next) {
2180 next = strchr(rp, '.') + 1;
2181 if (strncmp(name+3, rp, next-rp) == 0)
2182 break;
2183 }
2184 if (*rp) {
2185 if (ldd_mode) {
2186 /* Track which names have been resolved
2187 * and only report each one once. */
2188 static unsigned reported;
2189 unsigned mask = 1U<<(rp-reserved);
2190 if (!(reported & mask)) {
2191 reported |= mask;
2192 dprintf(1, "\t%s => %s (%p)\n",
2193 name, ldso.name,
2194 ldso.base);
2195 }
2196 }
2197 is_self = 1;
2198 }
2199 }
2200 if (!strcmp(name, ldso.name)) is_self = 1;
2201 if (is_self) {
2202 if (!ldso.prev) {
2203 tail->next = &ldso;
2204 ldso.prev = tail;
2205 tail = &ldso;
2206 ldso.namespace = namespace;
2207 ns_add_dso(namespace, &ldso);
2208 }
2209 return &ldso;
2210 }
2211 if (strchr(name, '/')) {
2212 pathname = name;
2213
2214 if (!is_accessible(namespace, pathname, g_is_asan, check_inherited)) {
2215 fd = -1;
2216 LD_LOGD("load_library is_accessible return false,fd = -1");
2217 } else {
2218 fd = open(name, O_RDONLY|O_CLOEXEC);
2219 LD_LOGD("load_library is_accessible return true, open file fd:%{public}d .", fd);
2220 }
2221 } else {
2222 /* Search for the name to see if it's already loaded */
2223 /* Search in namespace */
2224 p = find_library_by_name(name, namespace, check_inherited);
2225 if (p) {
2226 LD_LOGD("load_library find_library_by_name found p, return it!");
2227 return p;
2228 }
2229 if (strlen(name) > NAME_MAX) {
2230 LD_LOGE("load_library name exceeding the maximum length, return 0!");
2231 return 0;
2232 }
2233 fd = -1;
2234 if (namespace->env_paths) fd = path_open(name, namespace->env_paths, buf, sizeof buf);
2235 for (p = needed_by; fd == -1 && p; p = p->needed_by) {
2236 if (fixup_rpath(p, buf, sizeof buf) < 0) {
2237 LD_LOGD("load_library Inhibit further search,fd = -2.");
2238 fd = -2; /* Inhibit further search. */
2239 }
2240 if (p->rpath) {
2241 fd = path_open(name, p->rpath, buf, sizeof buf);
2242 LD_LOGD("load_library p->rpath path_open fd:%{public}d.", fd);
2243 }
2244
2245 }
2246 if (g_is_asan) {
2247 fd = handle_asan_path_open(fd, name, namespace, buf, sizeof buf);
2248 LD_LOGD("load_library handle_asan_path_open fd:%{public}d.", fd);
2249 } else {
2250 if (fd == -1 && namespace->lib_paths) {
2251 fd = path_open(name, namespace->lib_paths, buf, sizeof buf);
2252 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", fd);
2253 }
2254 }
2255 pathname = buf;
2256 LD_LOGD("load_library lib_paths pathname:%{public}s.", pathname);
2257 }
2258 if (fd < 0) {
2259 if (!check_inherited || !namespace->ns_inherits) return 0;
2260 /* Load lib in inherited namespace. Do not check inherited again.*/
2261 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
2262 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
2263 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) continue;
2264 p = load_library(name, needed_by, inherit->inherited_ns, false, reserved_params);
2265 if (p) {
2266 LD_LOGD("load_library search in inherited, found p ,inherited_ns name:%{public}s",
2267 inherit->inherited_ns->ns_name);
2268 return p;
2269 }
2270 }
2271 return 0;
2272 }
2273 if (fstat(fd, &st) < 0) {
2274 close(fd);
2275 LD_LOGE("load_library fstat < 0,return 0!");
2276 return 0;
2277 }
2278 /* Search in namespace */
2279 p = find_library_by_fstat(&st, namespace, check_inherited, 0);
2280 if (p) {
2281 /* If this library was previously loaded with a
2282 * pathname but a search found the same inode,
2283 * setup its shortname so it can be found by name. */
2284 if (!p->shortname && pathname != name)
2285 p->shortname = strrchr(p->name, '/')+1;
2286 close(fd);
2287 LD_LOGD("load_library find_library_by_fstat, found p and return it!");
2288 return p;
2289 }
2290 map = noload ? 0 : map_library(fd, &temp_dso, reserved_params);
2291 close(fd);
2292 if (!map) return 0;
2293
2294 /* Avoid the danger of getting two versions of libc mapped into the
2295 * same process when an absolute pathname was used. The symbols
2296 * checked are chosen to catch both musl and glibc, and to avoid
2297 * false positives from interposition-hack libraries. */
2298 decode_dyn(&temp_dso);
2299 if (find_sym(&temp_dso, "__libc_start_main", 1).sym &&
2300 find_sym(&temp_dso, "stdin", 1).sym) {
2301 unmap_library(&temp_dso);
2302 return load_library("libc.so", needed_by, namespace, true, reserved_params);
2303 }
2304 /* Past this point, if we haven't reached runtime yet, ldso has
2305 * committed either to use the mapped library or to abort execution.
2306 * Unmapping is not possible, so we can safely reclaim gaps. */
2307 if (!runtime) reclaim_gaps(&temp_dso);
2308
2309 /* Allocate storage for the new DSO. When there is TLS, this
2310 * storage must include a reservation for all pre-existing
2311 * threads to obtain copies of both the new TLS, and an
2312 * extended DTV capable of storing an additional slot for
2313 * the newly-loaded DSO. */
2314 alloc_size = sizeof *p + strlen(pathname) + 1;
2315 if (runtime && temp_dso.tls.image) {
2316 size_t per_th = temp_dso.tls.size + temp_dso.tls.align
2317 + sizeof(void *) * (tls_cnt+3);
2318 n_th = libc.threads_minus_1 + 1;
2319 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
2320 else alloc_size += n_th * per_th;
2321 }
2322 p = calloc(1, alloc_size);
2323 if (!p) {
2324 unmap_library(&temp_dso);
2325 return 0;
2326 }
2327 memcpy(p, &temp_dso, sizeof temp_dso);
2328 p->dev = st.st_dev;
2329 p->ino = st.st_ino;
2330 p->needed_by = needed_by;
2331 p->name = p->buf;
2332 p->runtime_loaded = runtime;
2333 strcpy(p->name, pathname);
2334 /* Add a shortname only if name arg was not an explicit pathname. */
2335 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
2336 if (p->tls.image) {
2337 p->tls_id = ++tls_cnt;
2338 tls_align = MAXP2(tls_align, p->tls.align);
2339 #ifdef TLS_ABOVE_TP
2340 p->tls.offset = tls_offset + ( (p->tls.align-1) &
2341 (-tls_offset + (uintptr_t)p->tls.image) );
2342 tls_offset = p->tls.offset + p->tls.size;
2343 #else
2344 tls_offset += p->tls.size + p->tls.align - 1;
2345 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
2346 & (p->tls.align-1);
2347 p->tls.offset = tls_offset;
2348 #endif
2349 p->new_dtv = (void *)(-sizeof(size_t) &
2350 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
2351 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
2352 if (tls_tail) tls_tail->next = &p->tls;
2353 else libc.tls_head = &p->tls;
2354 tls_tail = &p->tls;
2355 }
2356
2357 tail->next = p;
2358 p->prev = tail;
2359 tail = p;
2360
2361 /* Add dso to namespace */
2362 p->namespace = namespace;
2363 ns_add_dso(namespace, p);
2364 if (runtime)
2365 p->by_dlopen = 1;
2366
2367 if (DL_FDPIC) makefuncdescs(p);
2368
2369 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
2370
2371 return p;
2372 }
2373
load_direct_deps(struct dso * p,ns_t * namespace,struct reserved_address_params * reserved_params)2374 static void load_direct_deps(struct dso *p, ns_t *namespace, struct reserved_address_params *reserved_params)
2375 {
2376 size_t i, cnt=0;
2377
2378 if (p->deps) return;
2379 /* For head, all preloads are direct pseudo-dependencies.
2380 * Count and include them now to avoid realloc later. */
2381 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2382 cnt++;
2383 for (i=0; p->dynv[i]; i+=2)
2384 if (p->dynv[i] == DT_NEEDED) cnt++;
2385 /* Use builtin buffer for apps with no external deps, to
2386 * preserve property of no runtime failure paths. */
2387 p->deps = (p==head && cnt<2) ? builtin_deps :
2388 calloc(cnt+1, sizeof *p->deps);
2389 if (!p->deps) {
2390 error("Error loading dependencies for %s", p->name);
2391 if (runtime) longjmp(*rtld_fail, 1);
2392 }
2393 cnt=0;
2394 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2395 p->deps[cnt++] = q;
2396 for (i=0; p->dynv[i]; i+=2) {
2397 if (p->dynv[i] != DT_NEEDED) continue;
2398 struct dso *dep = load_library(p->strings + p->dynv[i + 1], p, namespace, true, reserved_params);
2399 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
2400 if (!dep) {
2401 error("Error loading shared library %s: %m (needed by %s)",
2402 p->strings + p->dynv[i+1], p->name);
2403 if (runtime) longjmp(*rtld_fail, 1);
2404 continue;
2405 }
2406 p->deps[cnt++] = dep;
2407 }
2408 p->deps[cnt] = 0;
2409 p->ndeps_direct = cnt;
2410 for (i = 0; i < p->ndeps_direct; i++) {
2411 add_dso_parent(p->deps[i], p);
2412 }
2413 }
2414
load_deps(struct dso * p,struct reserved_address_params * reserved_params)2415 static void load_deps(struct dso *p, struct reserved_address_params *reserved_params)
2416 {
2417 if (p->deps) return;
2418 for (; p; p = p->next)
2419 load_direct_deps(p, p->namespace, reserved_params);
2420 }
2421 #endif
2422
extend_bfs_deps(struct dso * p,bool to_deps_all)2423 static void extend_bfs_deps(struct dso *p, bool to_deps_all)
2424 {
2425 size_t i, j, cnt, ndeps_all;
2426 struct dso **tmp;
2427
2428 /* Can't use realloc if the original p->deps was allocated at
2429 * program entry and malloc has been replaced, or if it's
2430 * the builtin non-allocated trivial main program deps array. */
2431 int no_realloc = (__malloc_replaced && !p->runtime_loaded)
2432 || p->deps == builtin_deps;
2433
2434 if (p->bfs_built) return;
2435 if (to_deps_all && p->deps_all_built) {
2436 return;
2437 }
2438
2439 ndeps_all = p->ndeps_direct;
2440 if (to_deps_all) {
2441 // Use one more because the last one of the deps is NULL.
2442 p->deps_all = calloc(ndeps_all + 1, sizeof *p->deps);
2443 }
2444
2445 /* Mark existing (direct) deps so they won't be duplicated. */
2446 for (i=0; p->deps[i]; i++) {
2447 if (to_deps_all) {
2448 p->deps_all[i] = p->deps[i];
2449 }
2450 p->deps[i]->mark = 1;
2451 }
2452
2453 /* For each dependency already in the list, copy its list of direct
2454 * dependencies to the list, excluding any items already in the
2455 * list. Note that the list this loop iterates over will grow during
2456 * the loop, but since duplicates are excluded, growth is bounded. */
2457 if (to_deps_all) {
2458 for (i=0; p->deps_all[i]; i++) {
2459 struct dso *dep = p->deps_all[i];
2460 for (j=cnt=0; j<dep->ndeps_direct; j++)
2461 if (!dep->deps[j]->mark) cnt++;
2462 tmp = no_realloc ?
2463 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2464 realloc(p->deps_all, sizeof(*tmp) * (ndeps_all+cnt+1));
2465 if (!tmp) {
2466 error("Error recording dependencies for %s", p->name);
2467 if (runtime) longjmp(*rtld_fail, 1);
2468 continue;
2469 }
2470 if (no_realloc) {
2471 memcpy(tmp, p->deps_all, sizeof(*tmp) * (ndeps_all+1));
2472 no_realloc = 0;
2473 }
2474 p->deps_all = tmp;
2475 for (j=0; j<dep->ndeps_direct; j++) {
2476 if (dep->deps[j]->mark) continue;
2477 dep->deps[j]->mark = 1;
2478 p->deps_all[ndeps_all++] = dep->deps[j];
2479 }
2480 p->deps_all[ndeps_all] = 0;
2481 }
2482 p->deps_all_built = 1;
2483 } else {
2484 for (i=0; p->deps[i]; i++) {
2485 struct dso *dep = p->deps[i];
2486 for (j=cnt=0; j<dep->ndeps_direct; j++)
2487 if (!dep->deps[j]->mark) cnt++;
2488 tmp = no_realloc ?
2489 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2490 realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1));
2491 if (!tmp) {
2492 error("Error recording dependencies for %s", p->name);
2493 if (runtime) longjmp(*rtld_fail, 1);
2494 continue;
2495 }
2496 if (no_realloc) {
2497 memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1));
2498 no_realloc = 0;
2499 }
2500 p->deps = tmp;
2501 for (j=0; j<dep->ndeps_direct; j++) {
2502 if (dep->deps[j]->mark) continue;
2503 dep->deps[j]->mark = 1;
2504 p->deps[ndeps_all++] = dep->deps[j];
2505 }
2506 p->deps[ndeps_all] = 0;
2507 }
2508 p->bfs_built = 1;
2509 }
2510 for (p=head; p; p=p->next)
2511 p->mark = 0;
2512 }
2513
2514 #ifndef LOAD_ORDER_RANDOMIZATION
load_preload(char * s,ns_t * ns)2515 static void load_preload(char *s, ns_t *ns)
2516 {
2517 int tmp;
2518 char *z;
2519 for (z=s; *z; s=z) {
2520 for ( ; *s && (isspace(*s) || *s==':'); s++);
2521 for (z=s; *z && !isspace(*z) && *z!=':'; z++);
2522 tmp = *z;
2523 *z = 0;
2524 load_library(s, 0, ns, true, NULL);
2525 *z = tmp;
2526 }
2527 }
2528 #endif
2529
add_syms(struct dso * p)2530 static void add_syms(struct dso *p)
2531 {
2532 if (!p->syms_next && syms_tail != p) {
2533 syms_tail->syms_next = p;
2534 syms_tail = p;
2535 }
2536 }
2537
revert_syms(struct dso * old_tail)2538 static void revert_syms(struct dso *old_tail)
2539 {
2540 struct dso *p, *next;
2541 /* Chop off the tail of the list of dsos that participate in
2542 * the global symbol table, reverting them to RTLD_LOCAL. */
2543 for (p=old_tail; p; p=next) {
2544 next = p->syms_next;
2545 p->syms_next = 0;
2546 }
2547 syms_tail = old_tail;
2548 }
2549
do_mips_relocs(struct dso * p,size_t * got)2550 static void do_mips_relocs(struct dso *p, size_t *got)
2551 {
2552 size_t i, j, rel[2];
2553 unsigned char *base = p->base;
2554 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO);
2555 if (p==&ldso) {
2556 got += i;
2557 } else {
2558 while (i--) *got++ += (size_t)base;
2559 }
2560 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
2561 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
2562 Sym *sym = p->syms + j;
2563 rel[0] = (unsigned char *)got - base;
2564 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
2565 rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT);
2566 do_relocs(p, rel, sizeof rel, 2);
2567 }
2568 }
2569
sleb128_decoder(uint8_t * current,uint8_t * end,size_t * value)2570 static uint8_t* sleb128_decoder(uint8_t* current, uint8_t* end, size_t* value)
2571 {
2572 size_t result = 0;
2573 static const size_t size = CHAR_BIT * sizeof(result);
2574
2575 size_t shift = 0;
2576 uint8_t byte;
2577
2578 do {
2579 if (current >= end) {
2580 a_crash();
2581 }
2582
2583 byte = *current++;
2584 result |= ((size_t)(byte & 127) << shift);
2585 shift += 7;
2586 } while (byte & 128);
2587
2588 if (shift < size && (byte & 64)) {
2589 result |= -((size_t)(1) << shift);
2590 }
2591
2592 *value = result;
2593
2594 return current;
2595 }
2596
do_android_relocs(struct dso * p,size_t dt_name,size_t dt_size)2597 static void do_android_relocs(struct dso *p, size_t dt_name, size_t dt_size)
2598 {
2599 size_t android_rel_addr = 0, android_rel_size = 0;
2600 uint8_t *android_rel_curr, *android_rel_end;
2601
2602 search_vec(p->dynv, &android_rel_addr, dt_name);
2603 search_vec(p->dynv, &android_rel_size, dt_size);
2604
2605 if (!android_rel_addr || (android_rel_size < 4)) {
2606 return;
2607 }
2608
2609 android_rel_curr = laddr(p, android_rel_addr);
2610 if (memcmp(android_rel_curr, "APS2", ANDROID_REL_SIGN_SIZE)) {
2611 return;
2612 }
2613
2614 android_rel_curr += ANDROID_REL_SIGN_SIZE;
2615 android_rel_size -= ANDROID_REL_SIGN_SIZE;
2616
2617 android_rel_end = android_rel_curr + android_rel_size;
2618
2619 size_t relocs_num;
2620 size_t rel[3] = {0};
2621
2622 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &relocs_num);
2623 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[0]);
2624
2625 for (size_t i = 0; i < relocs_num;) {
2626
2627 size_t group_size, group_flags;
2628
2629 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_size);
2630 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_flags);
2631
2632 size_t group_r_offset_delta = 0;
2633
2634 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2635 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_r_offset_delta);
2636 }
2637
2638 if (group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) {
2639 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2640 }
2641
2642 const size_t addend_flags = group_flags & (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG);
2643
2644 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2645 } else if (addend_flags == (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG)) {
2646 size_t addend;
2647 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2648 rel[2] += addend;
2649 } else {
2650 rel[2] = 0;
2651 }
2652
2653 for (size_t j = 0; j < group_size; j++) {
2654 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2655 rel[0] += group_r_offset_delta;
2656 } else {
2657 size_t offset_detla;
2658 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &offset_detla);
2659
2660 rel[0] += offset_detla;
2661 }
2662
2663 if ((group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) == 0) {
2664 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2665 }
2666
2667 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2668 size_t addend;
2669 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2670 rel[2] += addend;
2671 }
2672
2673 if (dt_name == DT_ANDROID_REL) {
2674 do_relocs(p, rel, sizeof(size_t) * 2, 2);
2675 } else {
2676 do_relocs(p, rel, sizeof(size_t) * 3, 3);
2677 }
2678 }
2679
2680 i += group_size;
2681 }
2682 }
2683
do_relr_relocs(struct dso * dso,size_t * relr,size_t relr_size)2684 static void do_relr_relocs(struct dso *dso, size_t *relr, size_t relr_size)
2685 {
2686 if (dso == &ldso) return; /* self-relocation was done in _dlstart */
2687 unsigned char *base = dso->base;
2688 size_t *reloc_addr;
2689 for (; relr_size; relr++, relr_size -= sizeof(size_t))
2690 if ((relr[0] & 1) == 0) {
2691 reloc_addr = laddr(dso, relr[0]);
2692 *reloc_addr++ += (size_t)base;
2693 } else {
2694 int i = 0;
2695 for (size_t bitmap = relr[0]; (bitmap >>= 1); i++)
2696 if (bitmap & 1)
2697 reloc_addr[i] += (size_t)base;
2698 reloc_addr += 8 * sizeof(size_t) - 1;
2699 }
2700 }
2701
reloc_all(struct dso * p,const dl_extinfo * extinfo)2702 static void reloc_all(struct dso *p, const dl_extinfo *extinfo)
2703 {
2704 ssize_t relro_fd_offset = 0;
2705 size_t dyn[DYN_CNT];
2706 for (; p; p=p->next) {
2707 if (p->relocated) continue;
2708 if (p != &ldso) {
2709 add_can_search_so_list_in_dso(p, head);
2710 }
2711 decode_vec(p->dynv, dyn, DYN_CNT);
2712 if (NEED_MIPS_GOT_RELOCS)
2713 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT]));
2714 do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
2715 2+(dyn[DT_PLTREL]==DT_RELA));
2716 do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2);
2717 do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3);
2718 if (!DL_FDPIC)
2719 do_relr_relocs(p, laddr(p, dyn[DT_RELR]), dyn[DT_RELRSZ]);
2720
2721 do_android_relocs(p, DT_ANDROID_REL, DT_ANDROID_RELSZ);
2722 do_android_relocs(p, DT_ANDROID_RELA, DT_ANDROID_RELASZ);
2723
2724 if (head != &ldso && p->relro_start != p->relro_end &&
2725 mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ)
2726 && errno != ENOSYS) {
2727 error("Error relocating %s: RELRO protection failed: %m",
2728 p->name);
2729 if (runtime) longjmp(*rtld_fail, 1);
2730 }
2731 /* Handle serializing/mapping the RELRO segment */
2732 handle_relro_sharing(p, extinfo, &relro_fd_offset);
2733
2734 p->relocated = 1;
2735 free_reloc_can_search_dso(p);
2736 }
2737 }
2738
kernel_mapped_dso(struct dso * p)2739 static void kernel_mapped_dso(struct dso *p)
2740 {
2741 size_t min_addr = -1, max_addr = 0, cnt;
2742 Phdr *ph = p->phdr;
2743 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
2744 if (ph->p_type == PT_DYNAMIC) {
2745 p->dynv = laddr(p, ph->p_vaddr);
2746 } else if (ph->p_type == PT_GNU_RELRO) {
2747 p->relro_start = ph->p_vaddr & -PAGE_SIZE;
2748 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
2749 } else if (ph->p_type == PT_GNU_STACK) {
2750 if (!runtime && ph->p_memsz > __default_stacksize) {
2751 __default_stacksize =
2752 ph->p_memsz < DEFAULT_STACK_MAX ?
2753 ph->p_memsz : DEFAULT_STACK_MAX;
2754 }
2755 }
2756 if (ph->p_type != PT_LOAD) continue;
2757 if (ph->p_vaddr < min_addr)
2758 min_addr = ph->p_vaddr;
2759 if (ph->p_vaddr+ph->p_memsz > max_addr)
2760 max_addr = ph->p_vaddr+ph->p_memsz;
2761 }
2762 min_addr &= -PAGE_SIZE;
2763 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
2764 p->map = p->base + min_addr;
2765 p->map_len = max_addr - min_addr;
2766 p->kernel_mapped = 1;
2767 }
2768
__libc_exit_fini()2769 void __libc_exit_fini()
2770 {
2771 struct dso *p;
2772 size_t dyn[DYN_CNT];
2773 pthread_t self = __pthread_self();
2774
2775 /* Take both locks before setting shutting_down, so that
2776 * either lock is sufficient to read its value. The lock
2777 * order matches that in dlopen to avoid deadlock. */
2778 pthread_rwlock_wrlock(&lock);
2779 pthread_mutex_lock(&init_fini_lock);
2780 shutting_down = 1;
2781 pthread_rwlock_unlock(&lock);
2782 for (p=fini_head; p; p=p->fini_next) {
2783 while (p->ctor_visitor && p->ctor_visitor!=self)
2784 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2785 if (!p->constructed) continue;
2786 decode_vec(p->dynv, dyn, DYN_CNT);
2787 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
2788 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
2789 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n;
2790 while (n--) ((void (*)(void))*--fn)();
2791 }
2792 #ifndef NO_LEGACY_INITFINI
2793 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
2794 fpaddr(p, dyn[DT_FINI])();
2795 #endif
2796 }
2797 }
2798
__pthread_mutex_unlock_atfork(int who)2799 void __pthread_mutex_unlock_atfork(int who)
2800 {
2801 if (who == 0) {
2802 // If a multithread process lock dlclose_lock and call fork,
2803 // dlclose_lock will never unlock before child process call execve.
2804 // so reset dlclose_lock to make sure child process can call dlclose after fork
2805 __pthread_mutex_unlock_recursive_inner(&dlclose_lock);
2806 }
2807 }
2808
__ldso_atfork(int who)2809 void __ldso_atfork(int who)
2810 {
2811 if (who<0) {
2812 pthread_rwlock_wrlock(&lock);
2813 pthread_mutex_lock(&init_fini_lock);
2814 } else {
2815 pthread_mutex_unlock(&init_fini_lock);
2816 pthread_rwlock_unlock(&lock);
2817 }
2818 }
2819
queue_ctors(struct dso * dso)2820 static struct dso **queue_ctors(struct dso *dso)
2821 {
2822 size_t cnt, qpos, spos, i;
2823 struct dso *p, **queue, **stack;
2824
2825 if (ldd_mode) return 0;
2826
2827 /* Bound on queue size is the total number of indirect deps.
2828 * If a bfs deps list was built, we can use it. Otherwise,
2829 * bound by the total number of DSOs, which is always safe and
2830 * is reasonable we use it (for main app at startup). */
2831 if (dso->bfs_built) {
2832 for (cnt=0; dso->deps[cnt]; cnt++)
2833 dso->deps[cnt]->mark = 0;
2834 cnt++; /* self, not included in deps */
2835 } else {
2836 for (cnt=0, p=head; p; cnt++, p=p->next)
2837 p->mark = 0;
2838 }
2839 cnt++; /* termination slot */
2840 if (dso==head && cnt <= countof(builtin_ctor_queue))
2841 queue = builtin_ctor_queue;
2842 else
2843 queue = calloc(cnt, sizeof *queue);
2844
2845 if (!queue) {
2846 error("Error allocating constructor queue: %m\n");
2847 if (runtime) longjmp(*rtld_fail, 1);
2848 return 0;
2849 }
2850
2851 /* Opposite ends of the allocated buffer serve as an output queue
2852 * and a working stack. Setup initial stack with just the argument
2853 * dso and initial queue empty... */
2854 stack = queue;
2855 qpos = 0;
2856 spos = cnt;
2857 stack[--spos] = dso;
2858 dso->next_dep = 0;
2859 dso->mark = 1;
2860
2861 /* Then perform pseudo-DFS sort, but ignoring circular deps. */
2862 while (spos<cnt) {
2863 p = stack[spos++];
2864 while (p->next_dep < p->ndeps_direct) {
2865 if (p->deps[p->next_dep]->mark) {
2866 p->next_dep++;
2867 } else {
2868 stack[--spos] = p;
2869 p = p->deps[p->next_dep];
2870 p->next_dep = 0;
2871 p->mark = 1;
2872 }
2873 }
2874 queue[qpos++] = p;
2875 }
2876 queue[qpos] = 0;
2877 for (i=0; i<qpos; i++) queue[i]->mark = 0;
2878
2879 return queue;
2880 }
2881
do_init_fini(struct dso ** queue)2882 static void do_init_fini(struct dso **queue)
2883 {
2884 struct dso *p;
2885 size_t dyn[DYN_CNT], i;
2886 pthread_t self = __pthread_self();
2887
2888 pthread_mutex_lock(&init_fini_lock);
2889 for (i=0; (p=queue[i]); i++) {
2890 while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down)
2891 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2892 if (p->ctor_visitor || p->constructed)
2893 continue;
2894 p->ctor_visitor = self;
2895
2896 decode_vec(p->dynv, dyn, DYN_CNT);
2897 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
2898 p->fini_next = fini_head;
2899 fini_head = p;
2900 }
2901
2902 pthread_mutex_unlock(&init_fini_lock);
2903
2904 #ifndef NO_LEGACY_INITFINI
2905 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
2906 fpaddr(p, dyn[DT_INIT])();
2907 #endif
2908 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
2909 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
2910 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]);
2911 if (p != &ldso) {
2912 trace_marker_begin(HITRACE_TAG_MUSL, "calling constructors: ", p->name);
2913 }
2914 while (n--) ((void (*)(void))*fn++)();
2915 if (p != &ldso) {
2916 trace_marker_end(HITRACE_TAG_MUSL);
2917 }
2918 }
2919
2920 pthread_mutex_lock(&init_fini_lock);
2921 p->ctor_visitor = 0;
2922 p->constructed = 1;
2923 pthread_cond_broadcast(&ctor_cond);
2924 }
2925 pthread_mutex_unlock(&init_fini_lock);
2926 }
2927
__libc_start_init(void)2928 void __libc_start_init(void)
2929 {
2930 do_init_fini(main_ctor_queue);
2931 if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
2932 free(main_ctor_queue);
2933 main_ctor_queue = 0;
2934 }
2935
dl_debug_state(void)2936 static void dl_debug_state(void)
2937 {
2938 }
2939
2940 weak_alias(dl_debug_state, _dl_debug_state);
2941
__init_tls(size_t * auxv)2942 void __init_tls(size_t *auxv)
2943 {
2944 }
2945
update_tls_size()2946 static void update_tls_size()
2947 {
2948 libc.tls_cnt = tls_cnt;
2949 libc.tls_align = tls_align;
2950 libc.tls_size = ALIGN(
2951 (1+tls_cnt) * sizeof(void *) +
2952 tls_offset +
2953 sizeof(struct pthread) +
2954 tls_align * 2,
2955 tls_align);
2956 }
2957
install_new_tls(void)2958 static void install_new_tls(void)
2959 {
2960 sigset_t set;
2961 pthread_t self = __pthread_self(), td;
2962 struct dso *dtv_provider = container_of(tls_tail, struct dso, tls);
2963 uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv;
2964 struct dso *p;
2965 size_t i, j;
2966 size_t old_cnt = self->dtv[0];
2967
2968 __block_app_sigs(&set);
2969 __tl_lock();
2970 if (get_tl_lock_caller_count()) {
2971 get_tl_lock_caller_count()->install_new_tls_tl_lock++;
2972 }
2973 /* Copy existing dtv contents from all existing threads. */
2974 for (i=0, td=self; !i || td!=self; i++, td=td->next) {
2975 memcpy(newdtv+i, td->dtv,
2976 (old_cnt+1)*sizeof(uintptr_t));
2977 newdtv[i][0] = tls_cnt;
2978 }
2979 /* Install new dtls into the enlarged, uninstalled dtv copies. */
2980 for (p=head; ; p=p->next) {
2981 if (p->tls_id <= old_cnt) continue;
2982 unsigned char *mem = p->new_tls;
2983 for (j=0; j<i; j++) {
2984 unsigned char *new = mem;
2985 new += ((uintptr_t)p->tls.image - (uintptr_t)mem)
2986 & (p->tls.align-1);
2987 memcpy(new, p->tls.image, p->tls.len);
2988 newdtv[j][p->tls_id] =
2989 (uintptr_t)new + DTP_OFFSET;
2990 mem += p->tls.size + p->tls.align;
2991 }
2992 if (p->tls_id == tls_cnt) break;
2993 }
2994
2995 /* Broadcast barrier to ensure contents of new dtv is visible
2996 * if the new dtv pointer is. The __membarrier function has a
2997 * fallback emulation using signals for kernels that lack the
2998 * feature at the syscall level. */
2999
3000 __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
3001
3002 /* Install new dtv for each thread. */
3003 for (j=0, td=self; !j || td!=self; j++, td=td->next) {
3004 td->dtv = newdtv[j];
3005 }
3006
3007 if (get_tl_lock_caller_count()) {
3008 get_tl_lock_caller_count()->install_new_tls_tl_lock--;
3009 }
3010 __tl_unlock();
3011 __restore_sigs(&set);
3012 }
3013
3014 /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
3015 * following stage 2 and stage 3 functions via primitive symbolic lookup
3016 * since it does not have access to their addresses to begin with. */
3017
3018 /* Stage 2 of the dynamic linker is called after relative relocations
3019 * have been processed. It can make function calls to static functions
3020 * and access string literals and static data, but cannot use extern
3021 * symbols. Its job is to perform symbolic relocations on the dynamic
3022 * linker itself, but some of the relocations performed may need to be
3023 * replaced later due to copy relocations in the main program. */
3024
__dls2(unsigned char * base,size_t * sp)3025 hidden void __dls2(unsigned char *base, size_t *sp)
3026 {
3027 size_t *auxv;
3028 for (auxv=sp+1+*sp+1; *auxv; auxv++);
3029 auxv++;
3030 if (DL_FDPIC) {
3031 void *p1 = (void *)sp[-2];
3032 void *p2 = (void *)sp[-1];
3033 if (!p1) {
3034 size_t aux[AUX_CNT];
3035 decode_vec(auxv, aux, AUX_CNT);
3036 if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE];
3037 else ldso.base = (void *)(aux[AT_PHDR] & -4096);
3038 }
3039 app_loadmap = p2 ? p1 : 0;
3040 ldso.loadmap = p2 ? p2 : p1;
3041 ldso.base = laddr(&ldso, 0);
3042 } else {
3043 ldso.base = base;
3044 }
3045 size_t aux[AUX_CNT];
3046 decode_vec(auxv, aux, AUX_CNT);
3047 libc.page_size = aux[AT_PAGESZ];
3048 Ehdr *ehdr = __ehdr_start ? (void *)__ehdr_start : (void *)ldso.base;
3049 ldso.name = ldso.shortname = "libc.so";
3050 ldso.phnum = ehdr->e_phnum;
3051 ldso.phdr = laddr(&ldso, ehdr->e_phoff);
3052 ldso.phentsize = ehdr->e_phentsize;
3053 ldso.is_global = true;
3054 search_vec(auxv, &ldso_page_size, AT_PAGESZ);
3055 kernel_mapped_dso(&ldso);
3056 decode_dyn(&ldso);
3057
3058 if (DL_FDPIC) makefuncdescs(&ldso);
3059
3060 /* Prepare storage for to save clobbered REL addends so they
3061 * can be reused in stage 3. There should be very few. If
3062 * something goes wrong and there are a huge number, abort
3063 * instead of risking stack overflow. */
3064 size_t dyn[DYN_CNT];
3065 decode_vec(ldso.dynv, dyn, DYN_CNT);
3066 size_t *rel = laddr(&ldso, dyn[DT_REL]);
3067 size_t rel_size = dyn[DT_RELSZ];
3068 size_t symbolic_rel_cnt = 0;
3069 apply_addends_to = rel;
3070 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
3071 if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++;
3072 if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash();
3073 size_t addends[symbolic_rel_cnt+1];
3074 saved_addends = addends;
3075
3076 head = &ldso;
3077 reloc_all(&ldso, NULL);
3078
3079 ldso.relocated = 0;
3080
3081 /* Call dynamic linker stage-2b, __dls2b, looking it up
3082 * symbolically as a barrier against moving the address
3083 * load across the above relocation processing. */
3084 struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0);
3085 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp, auxv, aux);
3086 else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp, auxv, aux);
3087 }
3088
3089 /* Stage 2b sets up a valid thread pointer, which requires relocations
3090 * completed in stage 2, and on which stage 3 is permitted to depend.
3091 * This is done as a separate stage, with symbolic lookup as a barrier,
3092 * so that loads of the thread pointer and &errno can be pure/const and
3093 * thereby hoistable. */
3094
__dls2b(size_t * sp,size_t * auxv,size_t * aux)3095 void __dls2b(size_t *sp, size_t *auxv, size_t *aux)
3096 {
3097 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
3098 * use during dynamic linking. If possible it will also serve as the
3099 * thread pointer at runtime. */
3100 search_vec(auxv, &__hwcap, AT_HWCAP);
3101 libc.auxv = auxv;
3102 libc.tls_size = sizeof builtin_tls;
3103 libc.tls_align = tls_align;
3104 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
3105 a_crash();
3106 }
3107 __pthread_self()->stack = (void *)(sp + 1);
3108 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
3109 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp, auxv, aux);
3110 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp, auxv, aux);
3111 }
3112
3113 /* Stage 3 of the dynamic linker is called with the dynamic linker/libc
3114 * fully functional. Its job is to load (if not already loaded) and
3115 * process dependencies and relocations for the main application and
3116 * transfer control to its entry point. */
3117
__dls3(size_t * sp,size_t * auxv,size_t * aux)3118 void __dls3(size_t *sp, size_t *auxv, size_t *aux)
3119 {
3120 static struct dso app, vdso;
3121 size_t i;
3122 char *env_preload=0;
3123 char *replace_argv0=0;
3124 size_t vdso_base;
3125 int argc = *sp;
3126 char **argv = (void *)(sp+1);
3127 char **argv_orig = argv;
3128 char **envp = argv+argc+1;
3129
3130 /* Find aux vector just past environ[] and use it to initialize
3131 * global data that may be needed before we can make syscalls. */
3132 __environ = envp;
3133 search_vec(auxv, &__sysinfo, AT_SYSINFO);
3134 __pthread_self()->sysinfo = __sysinfo;
3135 libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
3136 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]);
3137
3138 /* Only trust user/env if kernel says we're not suid/sgid */
3139 if (!libc.secure) {
3140 env_path = getenv("LD_LIBRARY_PATH");
3141 env_preload = getenv("LD_PRELOAD");
3142 }
3143
3144 /* Activate error handler function */
3145 error = error_impl;
3146
3147 #ifdef OHOS_ENABLE_PARAMETER
3148 InitParameterClient();
3149 #endif
3150 // we may abort when linking other libs, load signal handler before stage start
3151 #ifdef DFX_SIGNAL_LIBC
3152 DFX_InstallSignalHandler();
3153 #endif
3154 InitHilogSocketFd();
3155 __init_fdsan();
3156 /* If the main program was already loaded by the kernel,
3157 * AT_PHDR will point to some location other than the dynamic
3158 * linker's program headers. */
3159 if (aux[AT_PHDR] != (size_t)ldso.phdr) {
3160 size_t interp_off = 0;
3161 size_t tls_image = 0;
3162 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
3163 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR];
3164 app.phnum = aux[AT_PHNUM];
3165 app.phentsize = aux[AT_PHENT];
3166 for (i = aux[AT_PHNUM]; i; i--, phdr = (void *)((char *)phdr + aux[AT_PHENT])) {
3167 if (phdr->p_type == PT_PHDR)
3168 app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
3169 else if (phdr->p_type == PT_INTERP)
3170 interp_off = (size_t)phdr->p_vaddr;
3171 else if (phdr->p_type == PT_TLS) {
3172 tls_image = phdr->p_vaddr;
3173 app.tls.len = phdr->p_filesz;
3174 app.tls.size = phdr->p_memsz;
3175 app.tls.align = phdr->p_align;
3176 }
3177 }
3178 if (DL_FDPIC) app.loadmap = app_loadmap;
3179 if (app.tls.size) app.tls.image = laddr(&app, tls_image);
3180 if (interp_off) ldso.name = laddr(&app, interp_off);
3181 if ((aux[0] & (1UL<<AT_EXECFN))
3182 && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
3183 app.name = (char *)aux[AT_EXECFN];
3184 else
3185 app.name = argv[0];
3186 kernel_mapped_dso(&app);
3187 } else {
3188 int fd;
3189 char *ldname = argv[0];
3190 size_t l = strlen(ldname);
3191 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
3192 argv++;
3193 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
3194 char *opt = argv[0]+2;
3195 *argv++ = (void *)-1;
3196 if (!*opt) {
3197 break;
3198 } else if (!memcmp(opt, "list", 5)) {
3199 ldd_mode = 1;
3200 } else if (!memcmp(opt, "library-path", 12)) {
3201 if (opt[12]=='=') env_path = opt+13;
3202 else if (opt[12]) *argv = 0;
3203 else if (*argv) env_path = *argv++;
3204 } else if (!memcmp(opt, "preload", 7)) {
3205 if (opt[7]=='=') env_preload = opt+8;
3206 else if (opt[7]) *argv = 0;
3207 else if (*argv) env_preload = *argv++;
3208 } else if (!memcmp(opt, "argv0", 5)) {
3209 if (opt[5]=='=') replace_argv0 = opt+6;
3210 else if (opt[5]) *argv = 0;
3211 else if (*argv) replace_argv0 = *argv++;
3212 } else {
3213 argv[0] = 0;
3214 }
3215 }
3216 argv[-1] = (void *)(argc - (argv-argv_orig));
3217 if (!argv[0]) {
3218 dprintf(2, "musl libc (" LDSO_ARCH ")\n"
3219 "Version %s\n"
3220 "Dynamic Program Loader\n"
3221 "Usage: %s [options] [--] pathname%s\n",
3222 __libc_version, ldname,
3223 ldd_mode ? "" : " [args]");
3224 _exit(1);
3225 }
3226 fd = open(argv[0], O_RDONLY);
3227 if (fd < 0) {
3228 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
3229 _exit(1);
3230 }
3231 Ehdr *ehdr = map_library(fd, &app, NULL);
3232 if (!ehdr) {
3233 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
3234 _exit(1);
3235 }
3236 close(fd);
3237 ldso.name = ldname;
3238 app.name = argv[0];
3239 aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry);
3240 /* Find the name that would have been used for the dynamic
3241 * linker had ldd not taken its place. */
3242 if (ldd_mode) {
3243 for (i=0; i<app.phnum; i++) {
3244 if (app.phdr[i].p_type == PT_INTERP)
3245 ldso.name = laddr(&app, app.phdr[i].p_vaddr);
3246 }
3247 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
3248 }
3249 }
3250 if (app.tls.size) {
3251 libc.tls_head = tls_tail = &app.tls;
3252 app.tls_id = tls_cnt = 1;
3253 #ifdef TLS_ABOVE_TP
3254 app.tls.offset = GAP_ABOVE_TP;
3255 app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image)
3256 & (app.tls.align-1);
3257 tls_offset = app.tls.offset + app.tls.size;
3258 #else
3259 tls_offset = app.tls.offset = app.tls.size
3260 + ( -((uintptr_t)app.tls.image + app.tls.size)
3261 & (app.tls.align-1) );
3262 #endif
3263 tls_align = MAXP2(tls_align, app.tls.align);
3264 }
3265 decode_dyn(&app);
3266 if (DL_FDPIC) {
3267 makefuncdescs(&app);
3268 if (!app.loadmap) {
3269 app.loadmap = (void *)&app_dummy_loadmap;
3270 app.loadmap->nsegs = 1;
3271 app.loadmap->segs[0].addr = (size_t)app.map;
3272 app.loadmap->segs[0].p_vaddr = (size_t)app.map
3273 - (size_t)app.base;
3274 app.loadmap->segs[0].p_memsz = app.map_len;
3275 }
3276 argv[-3] = (void *)app.loadmap;
3277 }
3278 app.is_global = true;
3279
3280 /* Initial dso chain consists only of the app. */
3281 head = tail = syms_tail = &app;
3282
3283 /* Donate unused parts of app and library mapping to malloc */
3284 reclaim_gaps(&app);
3285 reclaim_gaps(&ldso);
3286
3287 find_and_set_bss_name(&app);
3288 find_and_set_bss_name(&ldso);
3289
3290 /* Load preload/needed libraries, add symbols to global namespace. */
3291 ldso.deps = (struct dso **)no_deps;
3292 /* Init g_is_asan */
3293 g_is_asan = false;
3294 LD_LOGD("__dls3 ldso.name:%{public}s.", ldso.name);
3295 /* Through ldso Name to judge whether the Asan function is enabled */
3296 if (strstr(ldso.name, "-asan")) {
3297 g_is_asan = true;
3298 LD_LOGD("__dls3 g_is_asan is true.");
3299 }
3300 /* Init all namespaces by config file. there is a default namespace always*/
3301 init_namespace(&app);
3302
3303 #ifdef LOAD_ORDER_RANDOMIZATION
3304 struct loadtasks *tasks = create_loadtasks();
3305 if (!tasks) {
3306 _exit(1);
3307 }
3308 if (env_preload) {
3309 load_preload(env_preload, get_default_ns(), tasks);
3310 }
3311 for (struct dso *q = head; q; q = q->next) {
3312 q->is_global = true;
3313 q->is_preload = true;
3314 }
3315 preload_deps(&app, tasks);
3316 unmap_preloaded_sections(tasks);
3317 shuffle_loadtasks(tasks);
3318 run_loadtasks(tasks, NULL);
3319 free_loadtasks(tasks);
3320 assign_tls(app.next);
3321 #else
3322 if (env_preload) load_preload(env_preload, get_default_ns());
3323 for (struct dso *q = head; q; q = q->next) {
3324 q->is_global = true;
3325 q->is_preload = true;
3326 }
3327 load_deps(&app, NULL);
3328 #endif
3329
3330 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of app, including app self. */
3331 for (struct dso *p = head; p; p = p->next) {
3332 p->is_reloc_head_so_dep = true;
3333 add_syms(p);
3334 }
3335
3336 /* Attach to vdso, if provided by the kernel, last so that it does
3337 * not become part of the global namespace. */
3338 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) {
3339 Ehdr *ehdr = (void *)vdso_base;
3340 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
3341 vdso.phnum = ehdr->e_phnum;
3342 vdso.phentsize = ehdr->e_phentsize;
3343 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
3344 if (phdr->p_type == PT_DYNAMIC)
3345 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
3346 if (phdr->p_type == PT_LOAD)
3347 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
3348 }
3349 vdso.name = "";
3350 vdso.shortname = "linux-gate.so.1";
3351 vdso.relocated = 1;
3352 vdso.deps = (struct dso **)no_deps;
3353 decode_dyn(&vdso);
3354 vdso.prev = tail;
3355 tail->next = &vdso;
3356 tail = &vdso;
3357 vdso.namespace = get_default_ns();
3358 ns_add_dso(vdso.namespace, &vdso);
3359 }
3360
3361 for (i=0; app.dynv[i]; i+=2) {
3362 if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG)
3363 app.dynv[i+1] = (size_t)&debug;
3364 if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) {
3365 size_t *ptr = (size_t *) app.dynv[i+1];
3366 *ptr = (size_t)&debug;
3367 }
3368 if (app.dynv[i]==DT_DEBUG_INDIRECT_REL) {
3369 size_t *ptr = (size_t *)((size_t)&app.dynv[i] + app.dynv[i+1]);
3370 *ptr = (size_t)&debug;
3371 }
3372 }
3373
3374 /* This must be done before final relocations, since it calls
3375 * malloc, which may be provided by the application. Calling any
3376 * application code prior to the jump to its entry point is not
3377 * valid in our model and does not work with FDPIC, where there
3378 * are additional relocation-like fixups that only the entry point
3379 * code can see to perform. */
3380 main_ctor_queue = queue_ctors(&app);
3381
3382 /* Initial TLS must also be allocated before final relocations
3383 * might result in calloc being a call to application code. */
3384 update_tls_size();
3385 void *initial_tls = builtin_tls;
3386 if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) {
3387 initial_tls = calloc(libc.tls_size, 1);
3388 if (!initial_tls) {
3389 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
3390 argv[0], libc.tls_size);
3391 _exit(127);
3392 }
3393 }
3394 static_tls_cnt = tls_cnt;
3395
3396 /* The main program must be relocated LAST since it may contain
3397 * copy relocations which depend on libraries' relocations. */
3398 reloc_all(app.next, NULL);
3399 reloc_all(&app, NULL);
3400 for (struct dso *q = head; q; q = q->next) {
3401 q->is_reloc_head_so_dep = false;
3402 }
3403
3404 /* Actual copying to new TLS needs to happen after relocations,
3405 * since the TLS images might have contained relocated addresses. */
3406 if (initial_tls != builtin_tls) {
3407 pthread_t self = __pthread_self();
3408 pthread_t td = __copy_tls(initial_tls);
3409 if (__init_tp(td) < 0) {
3410 a_crash();
3411 }
3412 td->tsd = self->tsd;
3413 } else {
3414 size_t tmp_tls_size = libc.tls_size;
3415 pthread_t self = __pthread_self();
3416 /* Temporarily set the tls size to the full size of
3417 * builtin_tls so that __copy_tls will use the same layout
3418 * as it did for before. Then check, just to be safe. */
3419 libc.tls_size = sizeof builtin_tls;
3420 if (__copy_tls((void*)builtin_tls) != self) a_crash();
3421 libc.tls_size = tmp_tls_size;
3422 }
3423
3424 if (init_cfi_shadow(head, &ldso) == CFI_FAILED) {
3425 error("[%s] init_cfi_shadow failed: %m", __FUNCTION__);
3426 }
3427
3428 if (ldso_fail) _exit(127);
3429 if (ldd_mode) _exit(0);
3430
3431 /* Determine if malloc was interposed by a replacement implementation
3432 * so that calloc and the memalign family can harden against the
3433 * possibility of incomplete replacement. */
3434 if (find_sym(head, "malloc", 1).dso != &ldso)
3435 __malloc_replaced = 1;
3436 if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
3437 __aligned_alloc_replaced = 1;
3438
3439 /* Switch to runtime mode: any further failures in the dynamic
3440 * linker are a reportable failure rather than a fatal startup
3441 * error. */
3442 runtime = 1;
3443
3444 sync_with_debugger();
3445
3446 if (replace_argv0) argv[0] = replace_argv0;
3447
3448 #ifdef USE_GWP_ASAN
3449 init_gwp_asan_by_libc(false);
3450 #endif
3451
3452 errno = 0;
3453
3454 CRTJMP((void *)aux[AT_ENTRY], argv - 1);
3455 for(;;);
3456 }
3457
prepare_lazy(struct dso * p)3458 static void prepare_lazy(struct dso *p)
3459 {
3460 size_t dyn[DYN_CNT], n, flags1=0;
3461 decode_vec(p->dynv, dyn, DYN_CNT);
3462 search_vec(p->dynv, &flags1, DT_FLAGS_1);
3463 if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW))
3464 return;
3465 n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1;
3466 if (NEED_MIPS_GOT_RELOCS) {
3467 size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
3468 size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
3469 n += i-j;
3470 }
3471 p->lazy = calloc(n, 3*sizeof(size_t));
3472 if (!p->lazy) {
3473 error("Error preparing lazy relocation for %s: %m", p->name);
3474 longjmp(*rtld_fail, 1);
3475 }
3476 p->lazy_next = lazy_head;
3477 lazy_head = p;
3478 }
3479
dlopen_post(struct dso * p,int mode)3480 static void *dlopen_post(struct dso* p, int mode) {
3481 if (p == NULL) {
3482 return p;
3483 }
3484 bool is_dlclose_debug = false;
3485 if (is_dlclose_debug_enable()) {
3486 is_dlclose_debug = true;
3487 }
3488 p->nr_dlopen++;
3489 if (is_dlclose_debug) {
3490 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d ",
3491 p->name, p->name, p->nr_dlopen);
3492 }
3493 if (p->bfs_built) {
3494 for (int i = 0; p->deps[i]; i++) {
3495 p->deps[i]->nr_dlopen++;
3496 if (is_dlclose_debug) {
3497 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d",
3498 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
3499 }
3500 if (mode & RTLD_NODELETE) {
3501 p->deps[i]->flags |= DSO_FLAGS_NODELETE;
3502 }
3503 }
3504 }
3505
3506 #ifdef HANDLE_RANDOMIZATION
3507 void *handle = assign_valid_handle(p);
3508 if (handle == NULL) {
3509 LD_LOGE("dlopen_post: generate random handle failed");
3510 do_dlclose(p, 0);
3511 }
3512
3513 return handle;
3514 #endif
3515
3516 return p;
3517 }
3518
3519 static char *dlopen_permitted_list[] =
3520 {
3521 "default",
3522 "ndk",
3523 };
3524
3525 #define PERMITIED_TARGET "nweb_ns"
in_permitted_list(char * caller,char * target)3526 static bool in_permitted_list(char *caller, char *target)
3527 {
3528 for (int i = 0; i < sizeof(dlopen_permitted_list)/sizeof(char*); i++) {
3529 if (strcmp(dlopen_permitted_list[i], caller) == 0) {
3530 return true;
3531 }
3532 }
3533
3534 if (strcmp(PERMITIED_TARGET, target) == 0) {
3535 return true;
3536 }
3537
3538 return false;
3539 }
3540
is_permitted(const void * caller_addr,char * target)3541 static bool is_permitted(const void *caller_addr, char *target)
3542 {
3543 struct dso *caller;
3544 ns_t *ns;
3545 caller = (struct dso *)addr2dso((size_t)caller_addr);
3546 if ((caller == NULL) || (caller->namespace == NULL)) {
3547 LD_LOGE("caller ns get error");
3548 return false;
3549 }
3550
3551 ns = caller->namespace;
3552 if (in_permitted_list(ns->ns_name, target) == false) {
3553 LD_LOGE("caller ns: %{public}s have no permission, target is %{public}s", ns->ns_name, target);
3554 return false;
3555 }
3556
3557 return true;
3558 }
3559
3560 /* Add namespace function.
3561 * Some limitations come from sanitizer:
3562 * Sanitizer requires this interface to be exposed.
3563 * Pay attention to call __builtin_return_address in this interface because sanitizer can hook and call this interface.
3564 */
dlopen_impl(const char * file,int mode,const char * namespace,const void * caller_addr,const dl_extinfo * extinfo)3565 void *dlopen_impl(
3566 const char *file, int mode, const char *namespace, const void *caller_addr, const dl_extinfo *extinfo)
3567 {
3568 struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next;
3569 struct tls_module *orig_tls_tail;
3570 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
3571 size_t i;
3572 int cs;
3573 jmp_buf jb;
3574 struct dso **volatile ctor_queue = 0;
3575 ns_t *ns;
3576 struct dso *caller;
3577 bool reserved_address = false;
3578 bool reserved_address_recursive = false;
3579 struct reserved_address_params reserved_params = {0};
3580 struct dlopen_time_info dlopen_cost = {0};
3581 struct timespec time_start, time_end, total_start, total_end;
3582 struct dso *current_so = NULL;
3583 clock_gettime(CLOCK_MONOTONIC, &total_start);
3584 #ifdef LOAD_ORDER_RANDOMIZATION
3585 struct loadtasks *tasks = NULL;
3586 struct loadtask *task = NULL;
3587 bool is_task_appended = false;
3588 #endif
3589 #ifdef IS_ASAN
3590 char asan_file[PATH_MAX] = {0};
3591 #endif
3592
3593 if (!file) {
3594 LD_LOGD("dlopen_impl file is null, return head.");
3595 return dlopen_post(head, mode);
3596 }
3597
3598 #ifdef IS_ASAN
3599 if (g_is_asan) {
3600 char *place = strstr(file, LIB);
3601 if (place && asan_file) {
3602 int ret = snprintf(asan_file, sizeof asan_file, "%.*s/asan%s", (int)(place - file), file, place);
3603 if (ret > 0 && access(asan_file, F_OK) == 0) {
3604 LD_LOGI("dlopen_impl redirect to asan library.");
3605 file = asan_file;
3606 }
3607 }
3608 }
3609 #endif
3610
3611 if (extinfo) {
3612 reserved_address_recursive = extinfo->flag & DL_EXT_RESERVED_ADDRESS_RECURSIVE;
3613 if (extinfo->flag & DL_EXT_RESERVED_ADDRESS) {
3614 reserved_address = true;
3615 reserved_params.start_addr = extinfo->reserved_addr;
3616 reserved_params.reserved_size = extinfo->reserved_size;
3617 reserved_params.must_use_reserved = true;
3618 reserved_params.reserved_address_recursive = reserved_address_recursive;
3619 } else if (extinfo->flag & DL_EXT_RESERVED_ADDRESS_HINT) {
3620 reserved_address = true;
3621 reserved_params.start_addr = extinfo->reserved_addr;
3622 reserved_params.reserved_size = extinfo->reserved_size;
3623 reserved_params.must_use_reserved = false;
3624 reserved_params.reserved_address_recursive = reserved_address_recursive;
3625 }
3626 }
3627
3628 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
3629 pthread_rwlock_wrlock(&lock);
3630 __inhibit_ptc();
3631 trace_marker_reset();
3632 trace_marker_begin(HITRACE_TAG_MUSL, "dlopen: ", file);
3633
3634 /* When namespace does not exist, use caller's namespce
3635 * and when caller does not exist, use default namespce. */
3636 caller = (struct dso *)addr2dso((size_t)caller_addr);
3637 ns = find_ns_by_name(namespace);
3638 if (!ns) ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3639
3640 p = 0;
3641 if (shutting_down) {
3642 error("Cannot dlopen while program is exiting.");
3643 goto end;
3644 }
3645 orig_tls_tail = tls_tail;
3646 orig_tls_cnt = tls_cnt;
3647 orig_tls_offset = tls_offset;
3648 orig_tls_align = tls_align;
3649 orig_lazy_head = lazy_head;
3650 orig_syms_tail = syms_tail;
3651 orig_tail = tail;
3652 noload = mode & RTLD_NOLOAD;
3653
3654 rtld_fail = &jb;
3655 if (setjmp(*rtld_fail)) {
3656 /* Clean up anything new that was (partially) loaded */
3657 revert_syms(orig_syms_tail);
3658 for (p = orig_tail->next; p; p = next) {
3659 next = p->next;
3660 while (p->td_index) {
3661 void *tmp = p->td_index->next;
3662 free(p->td_index);
3663 p->td_index = tmp;
3664 }
3665 free(p->funcdescs);
3666 free(p->rpath);
3667 if (p->deps) {
3668 for (int i = 0; i < p->ndeps_direct; i++) {
3669 remove_dso_parent(p->deps[i], p);
3670 }
3671 }
3672 free(p->deps);
3673 dlclose_ns(p);
3674 unmap_library(p);
3675 if (p->parents) {
3676 free(p->parents);
3677 }
3678 free_reloc_can_search_dso(p);
3679 }
3680 for (p=orig_tail->next; p; p=next) {
3681 next = p->next;
3682 free(p);
3683 }
3684 free(ctor_queue);
3685 ctor_queue = 0;
3686 if (!orig_tls_tail) libc.tls_head = 0;
3687 tls_tail = orig_tls_tail;
3688 if (tls_tail) tls_tail->next = 0;
3689 tls_cnt = orig_tls_cnt;
3690 tls_offset = orig_tls_offset;
3691 tls_align = orig_tls_align;
3692 lazy_head = orig_lazy_head;
3693 tail = orig_tail;
3694 tail->next = 0;
3695 p = 0;
3696 goto end;
3697 } else {
3698 #ifdef LOAD_ORDER_RANDOMIZATION
3699 tasks = create_loadtasks();
3700 if (!tasks) {
3701 LD_LOGE("dlopen_impl create loadtasks failed");
3702 goto end;
3703 }
3704 task = create_loadtask(file, head, ns, true);
3705 if (!task) {
3706 LD_LOGE("dlopen_impl create loadtask failed");
3707 goto end;
3708 }
3709 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3710 clock_gettime(CLOCK_MONOTONIC, &time_start);
3711 if (!load_library_header(task)) {
3712 error(noload ?
3713 "Library %s is not already loaded" :
3714 "Error loading shared library %s: %m",
3715 file);
3716 LD_LOGE("dlopen_impl load library header failed for %{public}s", task->name);
3717 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3718 goto end;
3719 }
3720 if (reserved_address) {
3721 reserved_params.target = task->p;
3722 }
3723 }
3724 if (!task->p) {
3725 LD_LOGE("dlopen_impl load library failed for %{public}s", task->name);
3726 error(noload ?
3727 "Library %s is not already loaded" :
3728 "Error loading shared library %s: %m",
3729 file);
3730 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3731 goto end;
3732 }
3733 clock_gettime(CLOCK_MONOTONIC, &time_end);
3734 dlopen_cost.entry_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3735 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3736 if (!task->isloaded) {
3737 is_task_appended = append_loadtasks(tasks, task);
3738 }
3739 clock_gettime(CLOCK_MONOTONIC, &time_start);
3740 preload_deps(task->p, tasks);
3741 clock_gettime(CLOCK_MONOTONIC, &time_end);
3742 dlopen_cost.deps_header_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3743 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3744 unmap_preloaded_sections(tasks);
3745 if (!reserved_address_recursive) {
3746 shuffle_loadtasks(tasks);
3747 }
3748 clock_gettime(CLOCK_MONOTONIC, &time_start);
3749 run_loadtasks(tasks, reserved_address ? &reserved_params : NULL);
3750 clock_gettime(CLOCK_MONOTONIC, &time_end);
3751 dlopen_cost.map_so_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3752 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3753 p = task->p;
3754 if (!task->isloaded) {
3755 assign_tls(p);
3756 }
3757 if (!is_task_appended) {
3758 free_task(task);
3759 task = NULL;
3760 }
3761 free_loadtasks(tasks);
3762 tasks = NULL;
3763 #else
3764 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3765 p = load_library(file, head, ns, true, reserved_address ? &reserved_params : NULL);
3766 }
3767
3768 if (!p) {
3769 error(noload ?
3770 "Library %s is not already loaded" :
3771 "Error loading shared library %s: %m",
3772 file);
3773 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3774 goto end;
3775 }
3776 /* First load handling */
3777 load_deps(p, reserved_address && reserved_address_recursive ? &reserved_params : NULL);
3778 #endif
3779 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3780 extend_bfs_deps(p, 0);
3781 pthread_mutex_lock(&init_fini_lock);
3782 int constructed = p->constructed;
3783 pthread_mutex_unlock(&init_fini_lock);
3784 if (!constructed) ctor_queue = queue_ctors(p);
3785 if (!p->relocated && (mode & RTLD_LAZY)) {
3786 prepare_lazy(p);
3787 for (i = 0; p->deps[i]; i++)
3788 if (!p->deps[i]->relocated)
3789 prepare_lazy(p->deps[i]);
3790 }
3791 if (!p->relocated || (mode & RTLD_GLOBAL)) {
3792 /* Make new symbols global, at least temporarily, so we can do
3793 * relocations. If not RTLD_GLOBAL, this is reverted below. */
3794 add_syms(p);
3795 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of p, including p self. */
3796 p->is_reloc_head_so_dep = true;
3797 for (i = 0; p->deps[i]; i++) {
3798 p->deps[i]->is_reloc_head_so_dep = true;
3799 add_syms(p->deps[i]);
3800 }
3801 }
3802 struct dso *reloc_head_so = p;
3803 trace_marker_begin(HITRACE_TAG_MUSL, "linking: entry so", p->name);
3804 clock_gettime(CLOCK_MONOTONIC, &time_start);
3805 if (!p->relocated) {
3806 reloc_all(p, extinfo);
3807 }
3808 clock_gettime(CLOCK_MONOTONIC, &time_end);
3809 dlopen_cost.reloc_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3810 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3811 trace_marker_end(HITRACE_TAG_MUSL);
3812 reloc_head_so->is_reloc_head_so_dep = false;
3813 for (size_t i = 0; reloc_head_so->deps[i]; i++) {
3814 reloc_head_so->deps[i]->is_reloc_head_so_dep = false;
3815 }
3816
3817 /* If RTLD_GLOBAL was not specified, undo any new additions
3818 * to the global symbol table. This is a nop if the library was
3819 * previously loaded and already global. */
3820 if (!(mode & RTLD_GLOBAL))
3821 revert_syms(orig_syms_tail);
3822
3823 /* Processing of deferred lazy relocations must not happen until
3824 * the new libraries are committed; otherwise we could end up with
3825 * relocations resolved to symbol definitions that get removed. */
3826 redo_lazy_relocs();
3827 clock_gettime(CLOCK_MONOTONIC, &time_start);
3828 if (map_dso_to_cfi_shadow(p) == CFI_FAILED) {
3829 error("[%s] map_dso_to_cfi_shadow failed: %m", __FUNCTION__);
3830 longjmp(*rtld_fail, 1);
3831 }
3832 clock_gettime(CLOCK_MONOTONIC, &time_end);
3833 dlopen_cost.map_cfi_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3834 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3835
3836 if (mode & RTLD_NODELETE) {
3837 p->flags |= DSO_FLAGS_NODELETE;
3838 }
3839
3840 update_tls_size();
3841 if (tls_cnt != orig_tls_cnt)
3842 install_new_tls();
3843
3844 if (orig_tail != tail) {
3845 notify_addition_to_debugger(orig_tail->next);
3846 }
3847
3848 orig_tail = tail;
3849 current_so = p;
3850 p = dlopen_post(p, mode);
3851 end:
3852 #ifdef LOAD_ORDER_RANDOMIZATION
3853 if (!is_task_appended) {
3854 free_task(task);
3855 }
3856 free_loadtasks(tasks);
3857 #endif
3858 __release_ptc();
3859 clock_gettime(CLOCK_MONOTONIC, &time_start);
3860 pthread_rwlock_unlock(&lock);
3861 if (ctor_queue) {
3862 do_init_fini(ctor_queue);
3863 free(ctor_queue);
3864 }
3865 clock_gettime(CLOCK_MONOTONIC, &time_end);
3866 dlopen_cost.init_time = (time_end.tv_sec - time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3867 + (time_end.tv_nsec - time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3868 pthread_setcancelstate(cs, 0);
3869 trace_marker_end(HITRACE_TAG_MUSL); // "dlopen: " trace end.
3870 clock_gettime(CLOCK_MONOTONIC, &total_end);
3871 #ifdef USE_ENCAPS
3872 dlopen_cost.encaps_time = encpas_cost_time;
3873 #else
3874 dlopen_cost.encaps_time = 0;
3875 #endif
3876 dlopen_cost.total_time = (total_end.tv_sec - total_start.tv_sec) * CLOCK_SECOND_TO_MILLI
3877 + (total_end.tv_nsec - total_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
3878 if ((dlopen_cost.total_time > DLOPEN_TIME_THRESHOLD || is_dlopen_debug_enable()) && current_so) {
3879 LD_LOGE("dlopen so: %{public}s time cost: "
3880 "total_time: %{public}d ms, "
3881 "entry_header_time: %{public}d ms, "
3882 "deps_header_time: %{public}d ms, "
3883 "map_so_time: %{public}d ms, "
3884 "reloc_time: %{public}d ms, "
3885 "map_cfi_time: %{public}d ms, "
3886 "init_time: %{public}d ms, "
3887 "encaps_time: %{public}d ms",
3888 current_so->name,
3889 dlopen_cost.total_time,
3890 dlopen_cost.entry_header_time,
3891 dlopen_cost.deps_header_time,
3892 dlopen_cost.map_so_time,
3893 dlopen_cost.reloc_time,
3894 dlopen_cost.map_cfi_time,
3895 dlopen_cost.init_time,
3896 dlopen_cost.encaps_time);
3897 }
3898 #ifdef USE_ENCAPS
3899 encpas_cost_time = 0;
3900 #endif
3901 return p;
3902 }
3903
dlopen(const char * file,int mode)3904 void *dlopen(const char *file, int mode)
3905 {
3906 const void *caller_addr = __builtin_return_address(0);
3907 musl_log_reset();
3908 ld_log_reset();
3909 LD_LOGI("dlopen file:%{public}s, mode:%{public}x ,caller_addr:%{public}p .", file, mode, caller_addr);
3910 return dlopen_impl(file, mode, NULL, caller_addr, NULL);
3911 }
3912
dlns_init(Dl_namespace * dlns,const char * name)3913 void dlns_init(Dl_namespace *dlns, const char *name)
3914 {
3915 if (!dlns) {
3916 return;
3917 }
3918 if (!name) {
3919 dlns->name[0] = 0;
3920 return;
3921 }
3922
3923 const void *caller_addr = __builtin_return_address(0);
3924 if (is_permitted(caller_addr, name) == false) {
3925 return;
3926 }
3927
3928 snprintf(dlns->name, sizeof dlns->name, name);
3929 LD_LOGI("dlns_init dlns->name:%{public}s .", dlns->name);
3930 }
3931
dlns_get(const char * name,Dl_namespace * dlns)3932 int dlns_get(const char *name, Dl_namespace *dlns)
3933 {
3934 if (!dlns) {
3935 LD_LOGE("dlns_get dlns is null.");
3936 return EINVAL;
3937 }
3938 int ret = 0;
3939 ns_t *ns = NULL;
3940 pthread_rwlock_rdlock(&lock);
3941 if (!name) {
3942 struct dso *caller;
3943 const void *caller_addr = __builtin_return_address(0);
3944 caller = (struct dso *)addr2dso((size_t)caller_addr);
3945 ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3946 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3947 LD_LOGI("dlns_get name is null, current dlns dlns->name:%{public}s.", dlns->name);
3948 } else {
3949 ns = find_ns_by_name(name);
3950 if (ns) {
3951 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3952 LD_LOGI("dlns_get found ns, current dlns dlns->name:%{public}s.", dlns->name);
3953 } else {
3954 LD_LOGI("dlns_get not found ns! name:%{public}s.", name);
3955 ret = ENOKEY;
3956 }
3957 }
3958 pthread_rwlock_unlock(&lock);
3959 return ret;
3960 }
3961
dlopen_ns(Dl_namespace * dlns,const char * file,int mode)3962 void *dlopen_ns(Dl_namespace *dlns, const char *file, int mode)
3963 {
3964 const void *caller_addr = __builtin_return_address(0);
3965 if (is_permitted(caller_addr, dlns->name) == false) {
3966 return NULL;
3967 }
3968
3969 musl_log_reset();
3970 ld_log_reset();
3971 LD_LOGI("dlopen_ns file:%{public}s, mode:%{public}x , caller_addr:%{public}p , dlns->name:%{public}s.",
3972 file,
3973 mode,
3974 caller_addr,
3975 dlns ? dlns->name : "NULL");
3976 return dlopen_impl(file, mode, dlns->name, caller_addr, NULL);
3977 }
3978
dlopen_ns_ext(Dl_namespace * dlns,const char * file,int mode,const dl_extinfo * extinfo)3979 void *dlopen_ns_ext(Dl_namespace *dlns, const char *file, int mode, const dl_extinfo *extinfo)
3980 {
3981 const void *caller_addr = __builtin_return_address(0);
3982 if (is_permitted(caller_addr, dlns->name) == false) {
3983 return NULL;
3984 }
3985
3986 musl_log_reset();
3987 ld_log_reset();
3988 LD_LOGI("dlopen_ns_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , "
3989 "dlns->name:%{public}s. , extinfo->flag:%{public}x",
3990 file,
3991 mode,
3992 caller_addr,
3993 dlns->name,
3994 extinfo ? extinfo->flag : 0);
3995 return dlopen_impl(file, mode, dlns->name, caller_addr, extinfo);
3996 }
3997
dlns_create2(Dl_namespace * dlns,const char * lib_path,int flags)3998 int dlns_create2(Dl_namespace *dlns, const char *lib_path, int flags)
3999 {
4000 if (!dlns) {
4001 LD_LOGE("dlns_create2 dlns is null.");
4002 return EINVAL;
4003 }
4004 ns_t *ns;
4005
4006 pthread_rwlock_wrlock(&lock);
4007 const void *caller_addr = __builtin_return_address(0);
4008 if (is_permitted(caller_addr, dlns->name) == false) {
4009 pthread_rwlock_unlock(&lock);
4010 return EPERM;
4011 }
4012
4013 ns = find_ns_by_name(dlns->name);
4014 if (ns) {
4015 LD_LOGE("dlns_create2 ns is exist.");
4016 pthread_rwlock_unlock(&lock);
4017 return EEXIST;
4018 }
4019 ns = ns_alloc();
4020 if (!ns) {
4021 LD_LOGE("dlns_create2 no memery.");
4022 pthread_rwlock_unlock(&lock);
4023 return ENOMEM;
4024 }
4025 ns_set_name(ns, dlns->name);
4026 ns_set_flag(ns, flags);
4027 ns_add_dso(ns, get_default_ns()->ns_dsos->dsos[0]); /* add main app to this namespace*/
4028 nslist_add_ns(ns); /* add ns to list*/
4029 ns_set_lib_paths(ns, lib_path);
4030
4031 if ((flags & CREATE_INHERIT_DEFAULT) != 0) {
4032 ns_add_inherit(ns, get_default_ns(), NULL);
4033 }
4034
4035 if ((flags & CREATE_INHERIT_CURRENT) != 0) {
4036 struct dso *caller;
4037 caller_addr = __builtin_return_address(0);
4038 caller = (struct dso *)addr2dso((size_t)caller_addr);
4039 if (caller && caller->namespace) {
4040 ns_add_inherit(ns, caller->namespace, NULL);
4041 }
4042 }
4043
4044 LD_LOGI("dlns_create2:"
4045 "ns_name: %{public}s ,"
4046 "separated:%{public}d ,"
4047 "lib_paths:%{public}s ",
4048 ns->ns_name, ns->separated, ns->lib_paths);
4049 pthread_rwlock_unlock(&lock);
4050
4051 return 0;
4052 }
4053
dlns_create(Dl_namespace * dlns,const char * lib_path)4054 int dlns_create(Dl_namespace *dlns, const char *lib_path)
4055 {
4056 LD_LOGI("dlns_create lib_paths:%{public}s", lib_path);
4057 return dlns_create2(dlns, lib_path, CREATE_INHERIT_DEFAULT);
4058 }
4059
dlns_inherit(Dl_namespace * dlns,Dl_namespace * inherited,const char * shared_libs)4060 int dlns_inherit(Dl_namespace *dlns, Dl_namespace *inherited, const char *shared_libs)
4061 {
4062 if (!dlns || !inherited) {
4063 LD_LOGE("dlns_inherit dlns or inherited is null.");
4064 return EINVAL;
4065 }
4066
4067 pthread_rwlock_wrlock(&lock);
4068 const void *caller_addr = __builtin_return_address(0);
4069 if (is_permitted(caller_addr, dlns->name) == false) {
4070 pthread_rwlock_unlock(&lock);
4071 return EPERM;
4072 }
4073
4074 ns_t* ns = find_ns_by_name(dlns->name);
4075 ns_t* ns_inherited = find_ns_by_name(inherited->name);
4076 if (!ns || !ns_inherited) {
4077 LD_LOGE("dlns_inherit ns or ns_inherited is not found.");
4078 pthread_rwlock_unlock(&lock);
4079 return ENOKEY;
4080 }
4081 ns_add_inherit(ns, ns_inherited, shared_libs);
4082 pthread_rwlock_unlock(&lock);
4083
4084 return 0;
4085 }
4086
dlclose_ns(struct dso * p)4087 static void dlclose_ns(struct dso *p)
4088 {
4089 if (!p) return;
4090 ns_t * ns = p->namespace;
4091 if (!ns || !ns->ns_dsos) return;
4092 for (size_t i = 0; i < ns->ns_dsos->num; i++) {
4093 if (p == ns->ns_dsos->dsos[i]) {
4094 for (size_t j = i + 1; j < ns->ns_dsos->num; j++) {
4095 ns->ns_dsos->dsos[j - 1] = ns->ns_dsos->dsos[j];
4096 }
4097 ns->ns_dsos->num--;
4098 return;
4099 }
4100 }
4101 }
4102
__dl_invalid_handle(void * h)4103 hidden int __dl_invalid_handle(void *h)
4104 {
4105 struct dso *p;
4106 for (p=head; p; p=p->next) if (h==p) return 0;
4107 error("Invalid library handle %p", (void *)h);
4108 return 1;
4109 }
4110
addr2dso(size_t a)4111 void *addr2dso(size_t a)
4112 {
4113 struct dso *p;
4114 size_t i;
4115 for (p=head; p; p=p->next) {
4116 if (a < p->map || a - (size_t)p->map >= p->map_len) continue;
4117 Phdr *ph = p->phdr;
4118 size_t phcnt = p->phnum;
4119 size_t entsz = p->phentsize;
4120 size_t base = (size_t)p->base;
4121 for (; phcnt--; ph=(void *)((char *)ph+entsz)) {
4122 if (ph->p_type != PT_LOAD) continue;
4123 if (a-base-ph->p_vaddr < ph->p_memsz)
4124 return p;
4125 }
4126 if (a-(size_t)p->map < p->map_len)
4127 return 0;
4128 }
4129 return 0;
4130 }
4131
do_dlsym(struct dso * p,const char * s,const char * v,void * ra)4132 static void *do_dlsym(struct dso *p, const char *s, const char *v, void *ra)
4133 {
4134 int use_deps = 0;
4135 bool ra2dso = false;
4136 ns_t *ns = NULL;
4137 struct dso *caller = NULL;
4138 if (p == head || p == RTLD_DEFAULT) {
4139 p = head;
4140 ra2dso = true;
4141 } else if (p == RTLD_NEXT) {
4142 p = addr2dso((size_t)ra);
4143 if (!p) p=head;
4144 p = p->next;
4145 ra2dso = true;
4146 #ifndef HANDLE_RANDOMIZATION
4147 } else if (__dl_invalid_handle(p)) {
4148 return 0;
4149 #endif
4150 } else {
4151 use_deps = 1;
4152 ns = p->namespace;
4153 }
4154 if (ra2dso) {
4155 caller = (struct dso *)addr2dso((size_t)ra);
4156 if (caller && caller->namespace) {
4157 ns = caller->namespace;
4158 }
4159 }
4160 trace_marker_begin(HITRACE_TAG_MUSL, "dlsym: ", (s == NULL ? "(NULL)" : s));
4161 struct verinfo verinfo = { .s = s, .v = v, .use_vna_hash = false };
4162 struct symdef def = use_deps ? find_sym_by_deps(p, &verinfo, 0, ns) :
4163 find_sym2(p, &verinfo, 0, use_deps, ns);
4164 trace_marker_end(HITRACE_TAG_MUSL);
4165 if (!def.sym) {
4166 LD_LOGW("do_dlsym failed: symbol not found. so=%{public}s s=%{public}s v=%{public}s",
4167 (p == NULL ? "NULL" : p->name), s, v);
4168 error("do_dlsym failed: Symbol not found: %s, version: %s so=%s",
4169 s, strlen(v) > 0 ? v : "null", (p == NULL ? "NULL" : p->name));
4170 return 0;
4171 }
4172 if ((def.sym->st_info&0xf) == STT_TLS)
4173 return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET});
4174 if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC)
4175 return def.dso->funcdescs + (def.sym - def.dso->syms);
4176 return laddr(def.dso, def.sym->st_value);
4177 }
4178
4179 extern int invalidate_exit_funcs(struct dso *p);
4180
so_can_unload(struct dso * p,int check_flag)4181 static int so_can_unload(struct dso *p, int check_flag)
4182 {
4183 if ((check_flag & UNLOAD_COMMON_CHECK) != 0) {
4184 if (__dl_invalid_handle(p)) {
4185 LD_LOGE("[dlclose]: invalid handle %{public}p", p);
4186 error("[dlclose]: Handle is invalid.");
4187 return 0;
4188 }
4189
4190 if (!p->by_dlopen) {
4191 LD_LOGD("[dlclose]: skip unload %{public}s because it's not loaded by dlopen", p->name);
4192 return 0;
4193 }
4194
4195 /* dso is marked as RTLD_NODELETE library, do nothing here. */
4196 if ((p->flags & DSO_FLAGS_NODELETE) != 0) {
4197 LD_LOGD("[dlclose]: skip unload %{public}s because flags is RTLD_NODELETE", p->name);
4198 return 0;
4199 }
4200 }
4201
4202 if ((check_flag & UNLOAD_NR_DLOPEN_CHECK) != 0) {
4203 if (p->nr_dlopen > 0) {
4204 LD_LOGD("[dlclose]: skip unload %{public}s because nr_dlopen=%{public}d > 0", p->name, p->nr_dlopen);
4205 return 0;
4206 }
4207 }
4208
4209 return 1;
4210 }
4211
dlclose_post(struct dso * p)4212 static int dlclose_post(struct dso *p)
4213 {
4214 if (p == NULL) {
4215 return -1;
4216 }
4217 #ifdef ENABLE_HWASAN
4218 if (libc.unload_hook) {
4219 libc.unload_hook((unsigned long int)p->base, p->phdr, p->phnum);
4220 }
4221 #endif
4222 unmap_dso_from_cfi_shadow(p);
4223 unmap_library(p);
4224 if (p->parents) {
4225 free(p->parents);
4226 }
4227 free_reloc_can_search_dso(p);
4228 if (p->tls.size == 0) {
4229 free(p);
4230 }
4231
4232 ++subcnt;
4233 return 0;
4234 }
4235
dlclose_impl(struct dso * p)4236 static int dlclose_impl(struct dso *p)
4237 {
4238 struct dso *d;
4239
4240 trace_marker_reset();
4241 trace_marker_begin(HITRACE_TAG_MUSL, "dlclose", p->name);
4242
4243 /* remove dso symbols from global list */
4244 if (p->syms_next) {
4245 for (d = head; d->syms_next != p; d = d->syms_next)
4246 ; /* NOP */
4247 d->syms_next = p->syms_next;
4248 } else if (p == syms_tail) {
4249 for (d = head; d->syms_next != p; d = d->syms_next)
4250 ; /* NOP */
4251 d->syms_next = NULL;
4252 syms_tail = d;
4253 }
4254
4255 /* remove dso from lazy list if needed */
4256 if (p == lazy_head) {
4257 lazy_head = p->lazy_next;
4258 } else if (p->lazy_next) {
4259 for (d = lazy_head; d->lazy_next != p; d = d->lazy_next)
4260 ; /* NOP */
4261 d->lazy_next = p->lazy_next;
4262 }
4263
4264 pthread_mutex_lock(&init_fini_lock);
4265 /* remove dso from fini list */
4266 if (p == fini_head) {
4267 fini_head = p->fini_next;
4268 } else if (p->fini_next) {
4269 for (d = fini_head; d->fini_next != p; d = d->fini_next)
4270 ; /* NOP */
4271 d->fini_next = p->fini_next;
4272 }
4273 pthread_mutex_unlock(&init_fini_lock);
4274
4275 /* empty tls image */
4276 if (p->tls.size != 0) {
4277 p->tls.image = NULL;
4278 }
4279
4280 /* remove dso from global dso list */
4281 if (p == tail) {
4282 tail = p->prev;
4283 tail->next = NULL;
4284 } else {
4285 p->next->prev = p->prev;
4286 p->prev->next = p->next;
4287 }
4288
4289 /* remove dso from namespace */
4290 dlclose_ns(p);
4291
4292 /* */
4293 void* handle = find_handle_by_dso(p);
4294 if (handle) {
4295 remove_handle_node(handle);
4296 }
4297
4298 /* after destruct, invalidate atexit funcs which belong to this dso */
4299 #if (defined(FEATURE_ATEXIT_CB_PROTECT))
4300 invalidate_exit_funcs(p);
4301 #endif
4302
4303 notify_remove_to_debugger(p);
4304
4305 if (p->lazy != NULL)
4306 free(p->lazy);
4307 if (p->deps != no_deps)
4308 free(p->deps);
4309
4310 if (p->deps_all_built) {
4311 free(p->deps_all);
4312 }
4313
4314 trace_marker_end(HITRACE_TAG_MUSL);
4315
4316 return 0;
4317 }
4318
do_dlclose(struct dso * p,bool check_deps_all)4319 static int do_dlclose(struct dso *p, bool check_deps_all)
4320 {
4321 struct dso_entry *ef = NULL;
4322 struct dso_entry *ef_tmp = NULL;
4323 size_t n;
4324 int unload_check_result;
4325 TAILQ_HEAD(unload_queue, dso_entry) unload_queue;
4326 TAILQ_HEAD(need_unload_queue, dso_entry) need_unload_queue;
4327 unload_check_result = so_can_unload(p, UNLOAD_COMMON_CHECK);
4328 if (unload_check_result != 1) {
4329 return unload_check_result;
4330 }
4331 // Unconditionally subtract 1 because unconditionally add 1 at dlopen_post.
4332 if (p->nr_dlopen > 0) {
4333 --(p->nr_dlopen);
4334 } else {
4335 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4336 p->name, p->name);
4337 return 0;
4338 }
4339
4340 if (p->bfs_built) {
4341 for (int i = 0; p->deps[i]; i++) {
4342 if (p->deps[i]->nr_dlopen > 0) {
4343 p->deps[i]->nr_dlopen--;
4344 } else {
4345 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4346 p->deps[i]->name, p->name);
4347 return 0;
4348 }
4349 }
4350 } else {
4351 /* This part is used for thread local object destructors:
4352 * - nr_dlopen increases for all deps(include self) when a thread local object destructor is added.
4353 * - nr_dlopen decreases for all deps(include self) when a thread local object destructor is called.
4354 */
4355 if (check_deps_all && p->deps_all_built) {
4356 for (int i = 0; p->deps_all[i]; i++) {
4357 if (p->deps_all[i]->nr_dlopen > 0) {
4358 p->deps_all[i]->nr_dlopen--;
4359 } else {
4360 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4361 p->deps_all[i]->name, p->name);
4362 return 0;
4363 }
4364 }
4365 }
4366 }
4367
4368 unload_check_result = so_can_unload(p, UNLOAD_NR_DLOPEN_CHECK);
4369 if (unload_check_result != 1) {
4370 return unload_check_result;
4371 }
4372 TAILQ_INIT(&unload_queue);
4373 TAILQ_INIT(&need_unload_queue);
4374 struct dso_entry *start_entry = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4375 start_entry->dso = p;
4376 TAILQ_INSERT_TAIL(&unload_queue, start_entry, entries);
4377
4378 while (!TAILQ_EMPTY(&unload_queue)) {
4379 struct dso_entry *ecur = TAILQ_FIRST(&unload_queue);
4380 struct dso *cur = ecur->dso;
4381 TAILQ_REMOVE(&unload_queue, ecur, entries);
4382 bool already_in_need_unload_queue = false;
4383 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4384 if (ef->dso == cur) {
4385 already_in_need_unload_queue = true;
4386 break;
4387 }
4388 }
4389 if (already_in_need_unload_queue) {
4390 continue;
4391 }
4392 TAILQ_INSERT_TAIL(&need_unload_queue, ecur, entries);
4393 for (int i = 0; i < cur->ndeps_direct; i++) {
4394 remove_dso_parent(cur->deps[i], cur);
4395 if ((cur->deps[i]->parents_count == 0) && (so_can_unload(cur->deps[i], UNLOAD_ALL_CHECK) == 1)) {
4396 bool already_in_unload_queue = false;
4397 TAILQ_FOREACH(ef, &unload_queue, entries) {
4398 if (ef->dso == cur->deps[i]) {
4399 already_in_unload_queue = true;
4400 break;
4401 }
4402 }
4403 if (already_in_unload_queue) {
4404 continue;
4405 }
4406
4407 struct dso_entry *edeps = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4408 edeps->dso = cur->deps[i];
4409 TAILQ_INSERT_TAIL(&unload_queue, edeps, entries);
4410 }
4411 } /* for */
4412 } /* while */
4413
4414 if (is_dlclose_debug_enable()) {
4415 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4416 LD_LOGE("[dlclose]: unload %{public}s succeed when dlclose %{public}s", ef->dso->name, p->name);
4417 }
4418 for (size_t deps_num = 0; p->deps[deps_num]; deps_num++) {
4419 bool ready_to_unload = false;
4420 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4421 if (ef->dso == p->deps[deps_num]) {
4422 ready_to_unload = true;
4423 break;
4424 }
4425 }
4426 if (!ready_to_unload) {
4427 LD_LOGE("[dlclose]: unload %{public}s failed when dlclose %{public}s,"
4428 "nr_dlopen:%{public}d, by_dlopen:%{public}d, parents_count:%{public}d",
4429 p->deps[deps_num]->name, p->name, p->deps[deps_num]->nr_dlopen,
4430 p->deps[deps_num]->by_dlopen, p->deps[deps_num]->parents_count);
4431 }
4432 }
4433 }
4434
4435 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4436 dlclose_impl(ef->dso);
4437 }
4438
4439 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4440 /* call destructors if needed */
4441 pthread_mutex_lock(&init_fini_lock);
4442 int constructed = ef->dso->constructed;
4443 pthread_mutex_unlock(&init_fini_lock);
4444
4445 if (constructed) {
4446 size_t dyn[DYN_CNT];
4447 decode_vec(ef->dso->dynv, dyn, DYN_CNT);
4448 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
4449 n = dyn[DT_FINI_ARRAYSZ] / sizeof(size_t);
4450 size_t *fn = (size_t *)laddr(ef->dso, dyn[DT_FINI_ARRAY]) + n;
4451 trace_marker_begin(HITRACE_TAG_MUSL, "calling destructors:", ef->dso->name);
4452
4453 pthread_rwlock_unlock(&lock);
4454 while (n--)
4455 ((void (*)(void))*--fn)();
4456 pthread_rwlock_wrlock(&lock);
4457
4458 trace_marker_end(HITRACE_TAG_MUSL);
4459 }
4460 pthread_mutex_lock(&init_fini_lock);
4461 ef->dso->constructed = 0;
4462 pthread_mutex_unlock(&init_fini_lock);
4463 }
4464 }
4465 // Unload all sos at the end because weak symbol may cause later unloaded so to access the previous so's function.
4466 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4467 dlclose_post(ef->dso);
4468 }
4469 // Free dso_entry.
4470 TAILQ_FOREACH_SAFE(ef, &need_unload_queue, entries, ef_tmp) {
4471 if (ef) {
4472 free(ef);
4473 }
4474 }
4475
4476 return 0;
4477 }
4478
__dlclose(void * p)4479 hidden int __dlclose(void *p)
4480 {
4481 pthread_mutex_lock(&dlclose_lock);
4482 int rc;
4483 pthread_rwlock_wrlock(&lock);
4484 if (shutting_down) {
4485 error("Cannot dlclose while program is exiting.");
4486 pthread_rwlock_unlock(&lock);
4487 pthread_mutex_unlock(&dlclose_lock);
4488 return -1;
4489 }
4490 #ifdef HANDLE_RANDOMIZATION
4491 struct dso *dso = find_dso_by_handle(p);
4492 if (dso == NULL) {
4493 errno = EINVAL;
4494 error("Handle is invalid.");
4495 LD_LOGE("Handle is not find.");
4496 pthread_rwlock_unlock(&lock);
4497 pthread_mutex_unlock(&dlclose_lock);
4498 return -1;
4499 }
4500 rc = do_dlclose(dso, 0);
4501 #else
4502 rc = do_dlclose(p, 0);
4503 #endif
4504 pthread_rwlock_unlock(&lock);
4505 pthread_mutex_unlock(&dlclose_lock);
4506 return rc;
4507 }
4508
sym_is_matched(const Sym * sym,size_t addr_offset_so)4509 static inline int sym_is_matched(const Sym* sym, size_t addr_offset_so) {
4510 return sym->st_value &&
4511 (1<<(sym->st_info&0xf) != STT_TLS) &&
4512 (addr_offset_so >= sym->st_value) &&
4513 (addr_offset_so < sym->st_value + sym->st_size);
4514 }
4515
find_addr_by_elf(size_t addr_offset_so,struct dso * p)4516 static inline Sym* find_addr_by_elf(size_t addr_offset_so, struct dso *p) {
4517 uint32_t nsym = p->hashtab[1];
4518 Sym *sym = p->syms;
4519 for (; nsym; nsym--, sym++) {
4520 if (sym_is_matched(sym, addr_offset_so)) {
4521 return sym;
4522 }
4523 }
4524
4525 return NULL;
4526 }
4527
find_addr_by_gnu(size_t addr_offset_so,struct dso * p)4528 static inline Sym* find_addr_by_gnu(size_t addr_offset_so, struct dso *p) {
4529
4530 size_t i, nsym, first_hash_sym_index;
4531 uint32_t *hashval;
4532 Sym *sym_tab = p->syms;
4533 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2] * sizeof(size_t) / 4);
4534 // Points to the first defined symbol, all symbols before it are undefined.
4535 first_hash_sym_index = buckets[0];
4536 Sym *sym = &sym_tab[first_hash_sym_index];
4537
4538 // Get the location pointed by the last bucket.
4539 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
4540 if (buckets[i] > nsym)
4541 nsym = buckets[i];
4542 }
4543
4544 for (i = first_hash_sym_index; i < nsym; i++) {
4545 if (sym_is_matched(sym, addr_offset_so)) {
4546 return sym;
4547 }
4548 sym++;
4549 }
4550
4551 // Start traversing the hash list from the position pointed to by the last bucket.
4552 if (nsym) {
4553 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
4554 do {
4555 nsym++;
4556 if (sym_is_matched(sym, addr_offset_so)) {
4557 return sym;
4558 }
4559 sym++;
4560 }
4561 while (!(*hashval++ & 1));
4562 }
4563
4564 return NULL;
4565 }
4566
4567
dladdr(const void * addr_arg,Dl_info * info)4568 int dladdr(const void *addr_arg, Dl_info *info)
4569 {
4570 size_t addr = (size_t)addr_arg;
4571 struct dso *p;
4572 Sym *match_sym = NULL;
4573 char *strings;
4574
4575 pthread_rwlock_rdlock(&lock);
4576 p = addr2dso(addr);
4577 pthread_rwlock_unlock(&lock);
4578
4579 if (!p) return 0;
4580
4581 strings = p->strings;
4582 size_t addr_offset_so = addr - (size_t)p->base;
4583
4584 info->dli_fname = p->name;
4585 info->dli_fbase = p->map;
4586
4587 if (p->ghashtab) {
4588 match_sym = find_addr_by_gnu(addr_offset_so, p);
4589
4590 } else {
4591 match_sym = find_addr_by_elf(addr_offset_so, p);
4592 }
4593
4594 if (!match_sym) {
4595 info->dli_sname = 0;
4596 info->dli_saddr = 0;
4597 return 1;
4598 }
4599 info->dli_sname = strings + match_sym->st_name;
4600 info->dli_saddr = (void *)laddr(p, match_sym->st_value);
4601 return 1;
4602 }
4603
__dlsym(void * restrict p,const char * restrict s,void * restrict ra)4604 hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
4605 {
4606 void *res;
4607 pthread_rwlock_rdlock(&lock);
4608 #ifdef HANDLE_RANDOMIZATION
4609 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4610 struct dso *dso = find_dso_by_handle(p);
4611 if (dso == NULL) {
4612 pthread_rwlock_unlock(&lock);
4613 return 0;
4614 }
4615 res = do_dlsym(dso, s, "", ra);
4616 } else {
4617 res = do_dlsym(p, s, "", ra);
4618 }
4619 #else
4620 res = do_dlsym(p, s, "", ra);
4621 #endif
4622 pthread_rwlock_unlock(&lock);
4623 return res;
4624 }
4625
__dlvsym(void * restrict p,const char * restrict s,const char * restrict v,void * restrict ra)4626 hidden void *__dlvsym(void *restrict p, const char *restrict s, const char *restrict v, void *restrict ra)
4627 {
4628 void *res;
4629 pthread_rwlock_rdlock(&lock);
4630 #ifdef HANDLE_RANDOMIZATION
4631 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4632 struct dso *dso = find_dso_by_handle(p);
4633 if (dso == NULL) {
4634 pthread_rwlock_unlock(&lock);
4635 return 0;
4636 }
4637 res = do_dlsym(dso, s, v, ra);
4638 } else {
4639 res = do_dlsym(p, s, v, ra);
4640 }
4641 #else
4642 res = do_dlsym(p, s, v, ra);
4643 #endif
4644 pthread_rwlock_unlock(&lock);
4645 return res;
4646 }
4647
__dlsym_redir_time64(void * restrict p,const char * restrict s,void * restrict ra)4648 hidden void *__dlsym_redir_time64(void *restrict p, const char *restrict s, void *restrict ra)
4649 {
4650 #if _REDIR_TIME64
4651 const char *suffix, *suffix2 = "";
4652 char redir[36];
4653
4654 /* Map the symbol name to a time64 version of itself according to the
4655 * pattern used for naming the redirected time64 symbols. */
4656 size_t l = strnlen(s, sizeof redir);
4657 if (l<4 || l==sizeof redir) goto no_redir;
4658 if (s[l-2]=='_' && s[l-1]=='r') {
4659 l -= 2;
4660 suffix2 = s+l;
4661 }
4662 if (l<4) goto no_redir;
4663 if (!strcmp(s+l-4, "time")) suffix = "64";
4664 else suffix = "_time64";
4665
4666 /* Use the presence of the remapped symbol name in libc to determine
4667 * whether it's one that requires time64 redirection; replace if so. */
4668 snprintf(redir, sizeof redir, "__%.*s%s%s", (int)l, s, suffix, suffix2);
4669 if (find_sym(&ldso, redir, 1).sym) s = redir;
4670 no_redir:
4671 #endif
4672 return __dlsym(p, s, ra);
4673 }
4674
dl_iterate_phdr(int (* callback)(struct dl_phdr_info * info,size_t size,void * data),void * data)4675 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
4676 {
4677 pthread_mutex_lock(&dlclose_lock);
4678 struct dso *current;
4679 struct dl_phdr_info info;
4680 int ret = 0;
4681 for(current = head; current;) {
4682 info.dlpi_addr = (uintptr_t)current->base;
4683 info.dlpi_name = current->name;
4684 info.dlpi_phdr = current->phdr;
4685 info.dlpi_phnum = current->phnum;
4686 info.dlpi_adds = gencnt;
4687 info.dlpi_subs = subcnt;
4688 info.dlpi_tls_modid = current->tls_id;
4689 info.dlpi_tls_data = !current->tls_id ? 0 :
4690 __tls_get_addr((tls_mod_off_t[]){current->tls_id,0});
4691
4692 // FIXME: add dl_phdr_lock for unwind callback
4693 pthread_mutex_lock(&dl_phdr_lock);
4694 ret = (callback)(&info, sizeof (info), data);
4695 pthread_mutex_unlock(&dl_phdr_lock);
4696
4697 if (ret != 0) break;
4698 pthread_rwlock_rdlock(&lock);
4699 current = current->next;
4700 pthread_rwlock_unlock(&lock);
4701 }
4702 pthread_mutex_unlock(&dlclose_lock);
4703 return ret;
4704 }
4705
error_impl(const char * fmt,...)4706 static void error_impl(const char *fmt, ...)
4707 {
4708 va_list ap;
4709 va_start(ap, fmt);
4710 if (!runtime) {
4711 vdprintf(2, fmt, ap);
4712 dprintf(2, "\n");
4713 ldso_fail = 1;
4714 va_end(ap);
4715 return;
4716 }
4717 __dl_vseterr(fmt, ap);
4718 va_end(ap);
4719 }
4720
error_noop(const char * fmt,...)4721 static void error_noop(const char *fmt, ...)
4722 {
4723 }
4724
dlns_set_namespace_lib_path(const char * name,const char * lib_path)4725 int dlns_set_namespace_lib_path(const char * name, const char * lib_path)
4726 {
4727 if (!name || !lib_path) {
4728 LD_LOGE("dlns_set_namespace_lib_path name or lib_path is null.");
4729 return EINVAL;
4730 }
4731
4732 pthread_rwlock_wrlock(&lock);
4733 const void *caller_addr = __builtin_return_address(0);
4734 if (is_permitted(caller_addr, name) == false) {
4735 pthread_rwlock_unlock(&lock);
4736 return EPERM;
4737 }
4738
4739 ns_t* ns = find_ns_by_name(name);
4740 if (!ns) {
4741 pthread_rwlock_unlock(&lock);
4742 LD_LOGE("dlns_set_namespace_lib_path fail, input ns name : [%{public}s] is not found.", name);
4743 return ENOKEY;
4744 }
4745
4746 ns_set_lib_paths(ns, lib_path);
4747 pthread_rwlock_unlock(&lock);
4748 return 0;
4749 }
4750
dlns_set_namespace_separated(const char * name,const bool separated)4751 int dlns_set_namespace_separated(const char * name, const bool separated)
4752 {
4753 if (!name) {
4754 LD_LOGE("dlns_set_namespace_separated name is null.");
4755 return EINVAL;
4756 }
4757
4758 pthread_rwlock_wrlock(&lock);
4759 const void *caller_addr = __builtin_return_address(0);
4760 if (is_permitted(caller_addr, name) == false) {
4761 pthread_rwlock_unlock(&lock);
4762 return EPERM;
4763 }
4764
4765 ns_t* ns = find_ns_by_name(name);
4766 if (!ns) {
4767 pthread_rwlock_unlock(&lock);
4768 LD_LOGE("dlns_set_namespace_separated fail, input ns name : [%{public}s] is not found.", name);
4769 return ENOKEY;
4770 }
4771
4772 ns_set_separated(ns, separated);
4773 pthread_rwlock_unlock(&lock);
4774 return 0;
4775 }
4776
dlns_set_namespace_permitted_paths(const char * name,const char * permitted_paths)4777 int dlns_set_namespace_permitted_paths(const char * name, const char * permitted_paths)
4778 {
4779 if (!name || !permitted_paths) {
4780 LD_LOGE("dlns_set_namespace_permitted_paths name or permitted_paths is null.");
4781 return EINVAL;
4782 }
4783
4784 pthread_rwlock_wrlock(&lock);
4785 const void *caller_addr = __builtin_return_address(0);
4786 if (is_permitted(caller_addr, name) == false) {
4787 pthread_rwlock_unlock(&lock);
4788 return EPERM;
4789 }
4790
4791 ns_t* ns = find_ns_by_name(name);
4792 if (!ns) {
4793 pthread_rwlock_unlock(&lock);
4794 LD_LOGE("dlns_set_namespace_permitted_paths fail, input ns name : [%{public}s] is not found.", name);
4795 return ENOKEY;
4796 }
4797
4798 ns_set_permitted_paths(ns, permitted_paths);
4799 pthread_rwlock_unlock(&lock);
4800 return 0;
4801 }
4802
dlns_set_namespace_allowed_libs(const char * name,const char * allowed_libs)4803 int dlns_set_namespace_allowed_libs(const char * name, const char * allowed_libs)
4804 {
4805 if (!name || !allowed_libs) {
4806 LD_LOGE("dlns_set_namespace_allowed_libs name or allowed_libs is null.");
4807 return EINVAL;
4808 }
4809
4810 pthread_rwlock_wrlock(&lock);
4811 const void *caller_addr = __builtin_return_address(0);
4812 if (is_permitted(caller_addr, name) == false) {
4813 pthread_rwlock_unlock(&lock);
4814 return EPERM;
4815 }
4816
4817 ns_t* ns = find_ns_by_name(name);
4818 if (!ns) {
4819 pthread_rwlock_unlock(&lock);
4820 LD_LOGE("dlns_set_namespace_allowed_libs fail, input ns name : [%{public}s] is not found.", name);
4821 return ENOKEY;
4822 }
4823
4824 ns_set_allowed_libs(ns, allowed_libs);
4825 pthread_rwlock_unlock(&lock);
4826 return 0;
4827 }
4828
handle_asan_path_open(int fd,const char * name,ns_t * namespace,char * buf,size_t buf_size)4829 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size)
4830 {
4831 LD_LOGD("handle_asan_path_open fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4832 fd,
4833 name,
4834 namespace ? namespace->ns_name : "NULL");
4835 int fd_tmp = fd;
4836 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4837 if (namespace->lib_paths && namespace->asan_lib_paths) {
4838 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4839 char *new_lib_paths = malloc(newlen);
4840 memset(new_lib_paths, 0, newlen);
4841 strcpy(new_lib_paths, namespace->asan_lib_paths);
4842 strcat(new_lib_paths, ":");
4843 strcat(new_lib_paths, namespace->lib_paths);
4844 fd_tmp = path_open(name, new_lib_paths, buf, buf_size);
4845 LD_LOGD("handle_asan_path_open path_open new_lib_paths:%{public}s ,fd: %{public}d.", new_lib_paths, fd_tmp);
4846 free(new_lib_paths);
4847 } else if (namespace->asan_lib_paths) {
4848 fd_tmp = path_open(name, namespace->asan_lib_paths, buf, buf_size);
4849 LD_LOGD("handle_asan_path_open path_open asan_lib_paths:%{public}s ,fd: %{public}d.",
4850 namespace->asan_lib_paths,
4851 fd_tmp);
4852 } else {
4853 fd_tmp = path_open(name, namespace->lib_paths, buf, buf_size);
4854 LD_LOGD(
4855 "handle_asan_path_open path_open lib_paths:%{public}s ,fd: %{public}d.", namespace->lib_paths, fd_tmp);
4856 }
4857 }
4858 return fd_tmp;
4859 }
4860
dlopen_ext(const char * file,int mode,const dl_extinfo * extinfo)4861 void* dlopen_ext(const char *file, int mode, const dl_extinfo *extinfo)
4862 {
4863 const void *caller_addr = __builtin_return_address(0);
4864 musl_log_reset();
4865 ld_log_reset();
4866 if (extinfo != NULL) {
4867 if ((extinfo->flag & ~(DL_EXT_VALID_FLAG_BITS)) != 0) {
4868 LD_LOGE("Error dlopen_ext %{public}s: invalid flag %{public}x", file, extinfo->flag);
4869 return NULL;
4870 }
4871 }
4872 LD_LOGI("dlopen_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , extinfo->flag:%{public}x",
4873 file,
4874 mode,
4875 caller_addr,
4876 extinfo ? extinfo->flag : 0);
4877 return dlopen_impl(file, mode, NULL, caller_addr, extinfo);
4878 }
4879
4880 #ifdef LOAD_ORDER_RANDOMIZATION
open_library_by_path(const char * name,const char * s,struct loadtask * task,struct zip_info * z_info)4881 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info)
4882 {
4883 char *buf = task->buf;
4884 size_t buf_size = sizeof task->buf;
4885 size_t l;
4886 for (;;) {
4887 s += strspn(s, ":\n");
4888 l = strcspn(s, ":\n");
4889 if (l-1 >= INT_MAX) return;
4890 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
4891 char *separator = strstr(buf, ZIP_FILE_PATH_SEPARATOR);
4892 if (separator != NULL) {
4893 int res = open_uncompressed_library_in_zipfile(buf, z_info, separator);
4894 if (res == 0) {
4895 task->fd = z_info->fd;
4896 task->file_offset = z_info->file_offset;
4897 break;
4898 } else {
4899 memset(z_info->path_buf, 0, sizeof(z_info->path_buf));
4900 }
4901 } else {
4902 if ((task->fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) break;
4903 }
4904 }
4905 s += l;
4906 }
4907 return;
4908 }
4909
handle_asan_path_open_by_task(int fd,const char * name,ns_t * namespace,struct loadtask * task,struct zip_info * z_info)4910 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task,
4911 struct zip_info *z_info)
4912 {
4913 LD_LOGD("handle_asan_path_open_by_task fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4914 fd,
4915 name,
4916 namespace ? namespace->ns_name : "NULL");
4917 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4918 if (namespace->lib_paths && namespace->asan_lib_paths) {
4919 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4920 char *new_lib_paths = malloc(newlen);
4921 memset(new_lib_paths, 0, newlen);
4922 strcpy(new_lib_paths, namespace->asan_lib_paths);
4923 strcat(new_lib_paths, ":");
4924 strcat(new_lib_paths, namespace->lib_paths);
4925 open_library_by_path(name, new_lib_paths, task, z_info);
4926 LD_LOGD("handle_asan_path_open_by_task open_library_by_path new_lib_paths:%{public}s ,fd: %{public}d.",
4927 new_lib_paths,
4928 task->fd);
4929 free(new_lib_paths);
4930 } else if (namespace->asan_lib_paths) {
4931 open_library_by_path(name, namespace->asan_lib_paths, task, z_info);
4932 LD_LOGD("handle_asan_path_open_by_task open_library_by_path asan_lib_paths:%{public}s ,fd: %{public}d.",
4933 namespace->asan_lib_paths,
4934 task->fd);
4935 } else {
4936 open_library_by_path(name, namespace->lib_paths, task, z_info);
4937 LD_LOGD("handle_asan_path_open_by_task open_library_by_path lib_paths:%{public}s ,fd: %{public}d.",
4938 namespace->lib_paths,
4939 task->fd);
4940 }
4941 }
4942 return;
4943 }
4944
4945 /* Used to get an uncompress library offset in zip file, then we can use the offset to mmap the library directly. */
open_uncompressed_library_in_zipfile(const char * path,struct zip_info * z_info,char * separator)4946 int open_uncompressed_library_in_zipfile(const char *path, struct zip_info *z_info, char *separator)
4947 {
4948 struct local_file_header zip_file_header;
4949 struct central_dir_entry c_dir_entry;
4950 struct zip_end_locator end_locator;
4951
4952 /* Use "'!/' to split the path into zipfile path and library path in zipfile.
4953 * For example:
4954 * - path: x/xx/xxx.zip!/x/xx/xxx.so
4955 * - zipfile path: x/xx/xxx.zip
4956 * - library path in zipfile: x/xx/xxx.so */
4957 if (strlcpy(z_info->path_buf, path, PATH_BUF_SIZE) >= PATH_BUF_SIZE) {
4958 LD_LOGE("Open uncompressed library: input path %{public}s is too long.", path);
4959 return -1;
4960 }
4961 z_info->path_buf[separator - path] = '\0';
4962 z_info->file_path_index = separator - path + 2;
4963 char *zip_file_path = z_info->path_buf;
4964 char *lib_path = &z_info->path_buf[z_info->file_path_index];
4965 if (zip_file_path == NULL || lib_path == NULL) {
4966 LD_LOGE("Open uncompressed library: get zip and lib path failed.");
4967 return -1;
4968 }
4969 LD_LOGD("Open uncompressed library: input path: %{public}s, zip file path: %{public}s, library path: %{public}s.",
4970 path, zip_file_path, lib_path);
4971
4972 // Get zip file length
4973 FILE *zip_file = fopen(zip_file_path, "re");
4974 if (zip_file == NULL) {
4975 LD_LOGE("Open uncompressed library: fopen %{public}s failed.", zip_file_path);
4976 return -1;
4977 }
4978 if (fseek(zip_file, 0, SEEK_END) != 0) {
4979 LD_LOGE("Open uncompressed library: fseek SEEK_END failed.");
4980 fclose(zip_file);
4981 return -1;
4982 }
4983 int64_t zip_file_len = ftell(zip_file);
4984 if (zip_file_len == -1) {
4985 LD_LOGE("Open uncompressed library: get zip file length failed.");
4986 fclose(zip_file);
4987 return -1;
4988 }
4989
4990 // Read end of central directory record.
4991 size_t end_locator_len = sizeof(end_locator);
4992 size_t end_locator_pos = zip_file_len - end_locator_len;
4993 if (fseek(zip_file, end_locator_pos, SEEK_SET) != 0) {
4994 LD_LOGE("Open uncompressed library: fseek end locator position failed.");
4995 fclose(zip_file);
4996 return -1;
4997 }
4998 if (fread(&end_locator, sizeof(end_locator), 1, zip_file) != 1 || end_locator.signature != EOCD_SIGNATURE) {
4999 LD_LOGE("Open uncompressed library: fread end locator failed.");
5000 fclose(zip_file);
5001 return -1;
5002 }
5003
5004 char file_name[PATH_BUF_SIZE];
5005 uint64_t current_dir_pos = end_locator.offset;
5006 for (uint16_t i = 0; i < end_locator.total_entries; i++) {
5007 // Read central dir entry.
5008 if (fseek(zip_file, current_dir_pos, SEEK_SET) != 0) {
5009 LD_LOGE("Open uncompressed library: fseek current centra dir entry position failed.");
5010 fclose(zip_file);
5011 return -1;
5012 }
5013 if (fread(&c_dir_entry, sizeof(c_dir_entry), 1, zip_file) != 1 || c_dir_entry.signature != CENTRAL_SIGNATURE) {
5014 LD_LOGE("Open uncompressed library: fread centra dir entry failed.");
5015 fclose(zip_file);
5016 return -1;
5017 }
5018
5019 if (fread(file_name, c_dir_entry.name_size, 1, zip_file) != 1) {
5020 LD_LOGE("Open uncompressed library: fread file name failed.");
5021 fclose(zip_file);
5022 return -1;
5023 }
5024 if (strcmp(file_name, lib_path) == 0) {
5025 // Read local file header.
5026 if (fseek(zip_file, c_dir_entry.local_header_offset, SEEK_SET) != 0) {
5027 LD_LOGE("Open uncompressed library: fseek local file header failed.");
5028 fclose(zip_file);
5029 return -1;
5030 }
5031 if (fread(&zip_file_header, sizeof(zip_file_header), 1, zip_file) != 1) {
5032 LD_LOGE("Open uncompressed library: fread local file header failed.");
5033 fclose(zip_file);
5034 return -1;
5035 }
5036 if (zip_file_header.signature != LOCAL_FILE_HEADER_SIGNATURE) {
5037 LD_LOGE("Open uncompressed library: read local file header signature error.");
5038 fclose(zip_file);
5039 return -1;
5040 }
5041
5042 z_info->file_offset = c_dir_entry.local_header_offset + sizeof(zip_file_header) +
5043 zip_file_header.name_size + zip_file_header.extra_size;
5044 if (zip_file_header.compression_method != COMPRESS_STORED || z_info->file_offset % PAGE_SIZE != 0) {
5045 LD_LOGE("Open uncompressed library: open %{public}s in %{public}s failed because of misalignment or saved with compression."
5046 "compress method %{public}d, file offset %{public}lu",
5047 lib_path, zip_file_path, zip_file_header.compression_method, z_info->file_offset);
5048 fclose(zip_file);
5049 return -2;
5050 }
5051 z_info->found = true;
5052 break;
5053 }
5054
5055 memset(file_name, 0, sizeof(file_name));
5056 current_dir_pos += sizeof(c_dir_entry);
5057 current_dir_pos += c_dir_entry.name_size + c_dir_entry.extra_size + c_dir_entry.comment_size;
5058 }
5059 if (!z_info->found) {
5060 LD_LOGE("Open uncompressed library: %{public}s was not found in %{public}s.", lib_path, zip_file_path);
5061 fclose(zip_file);
5062 return -3;
5063 }
5064 z_info->fd = fileno(zip_file);
5065
5066 return 0;
5067 }
5068
task_check_xpm(struct loadtask * task)5069 static bool task_check_xpm(struct loadtask *task)
5070 {
5071 size_t mapLen = sizeof(Ehdr);
5072 void *map = mmap(0, mapLen, PROT_READ, MAP_PRIVATE | MAP_XPM, task->fd, task->file_offset);
5073 if (map == MAP_FAILED) {
5074 LD_LOGE("Xpm check failed for %{public}s, errno for mmap is: %{public}d", task->name, errno);
5075 return false;
5076 }
5077 munmap(map, mapLen);
5078 return true;
5079 }
5080
map_library_header(struct loadtask * task)5081 static bool map_library_header(struct loadtask *task)
5082 {
5083 off_t off_start;
5084 Phdr *ph;
5085 size_t i;
5086 size_t str_size;
5087 off_t str_table;
5088 if (!task_check_xpm(task)) {
5089 return false;
5090 }
5091
5092 ssize_t l = pread(task->fd, task->ehdr_buf, sizeof task->ehdr_buf, task->file_offset);
5093 task->eh = task->ehdr_buf;
5094 if (l < 0) {
5095 LD_LOGE("Error mapping header %{public}s: failed to read fd errno: %{public}d", task->name, errno);
5096 return false;
5097 }
5098 if (l < sizeof(Ehdr) || (task->eh->e_type != ET_DYN && task->eh->e_type != ET_EXEC)) {
5099 LD_LOGE("Error mapping header %{public}s: invaliled Ehdr l=%{public}d e_type=%{public}hu",
5100 task->name, l, task->eh->e_type);
5101 goto noexec;
5102 }
5103 task->phsize = task->eh->e_phentsize * task->eh->e_phnum;
5104 if (task->phsize > sizeof task->ehdr_buf - sizeof(Ehdr)) {
5105 task->allocated_buf = malloc(task->phsize);
5106 if (!task->allocated_buf) {
5107 LD_LOGE("Error mapping header %{public}s: failed to alloc memory errno: %{public}d", task->name, errno);
5108 return false;
5109 }
5110 l = pread(task->fd, task->allocated_buf, task->phsize, task->eh->e_phoff + task->file_offset);
5111 if (l < 0) {
5112 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5113 goto error;
5114 }
5115 if (l != task->phsize) {
5116 LD_LOGE("Error mapping header %{public}s: unmatched phsize errno: %{public}d", task->name, errno);
5117 goto noexec;
5118 }
5119 ph = task->ph0 = task->allocated_buf;
5120 } else if (task->eh->e_phoff + task->phsize > l) {
5121 l = pread(task->fd, task->ehdr_buf + 1, task->phsize, task->eh->e_phoff + task->file_offset);
5122 if (l < 0) {
5123 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
5124 goto error;
5125 }
5126 if (l != task->phsize) {
5127 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
5128 goto noexec;
5129 }
5130 ph = task->ph0 = (void *)(task->ehdr_buf + 1);
5131 } else {
5132 ph = task->ph0 = (void *)((char *)task->ehdr_buf + task->eh->e_phoff);
5133 }
5134
5135 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5136 if (ph->p_type == PT_DYNAMIC) {
5137 task->dyn = ph->p_vaddr;
5138 } else if (ph->p_type == PT_TLS) {
5139 task->tls_image = ph->p_vaddr;
5140 task->tls.align = ph->p_align;
5141 task->tls.len = ph->p_filesz;
5142 task->tls.size = ph->p_memsz;
5143 }
5144
5145 if (ph->p_type != PT_DYNAMIC) {
5146 continue;
5147 }
5148 // map the dynamic segment and the string table of the library
5149 off_start = ph->p_offset;
5150 off_start &= -PAGE_SIZE;
5151 task->dyn_map_len = ph->p_memsz + (ph->p_offset - off_start);
5152 /* The default value of file_offset is 0.
5153 * The value of file_offset may be greater than 0 when opening library from zip file.
5154 * The value of file_offset ensures PAGE_SIZE aligned. */
5155 task->dyn_map = mmap(0, task->dyn_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5156 if (task->dyn_map == MAP_FAILED) {
5157 LD_LOGE("Error mapping header %{public}s: failed to map dynamic section errno: %{public}d", task->name, errno);
5158 goto error;
5159 }
5160 task->dyn_addr = (size_t *)((unsigned char *)task->dyn_map + (ph->p_offset - off_start));
5161 size_t dyn_tmp;
5162 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRTAB)) {
5163 str_table = dyn_tmp;
5164 } else {
5165 LD_LOGE("Error mapping header %{public}s: DT_STRTAB not found", task->name);
5166 goto error;
5167 }
5168 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRSZ)) {
5169 str_size = dyn_tmp;
5170 } else {
5171 LD_LOGE("Error mapping header %{public}s: DT_STRSZ not found", task->name);
5172 goto error;
5173 }
5174 }
5175
5176 task->shsize = task->eh->e_shentsize * task->eh->e_shnum;
5177 off_start = task->eh->e_shoff;
5178 off_start &= -PAGE_SIZE;
5179 task->shsize += task->eh->e_shoff - off_start;
5180 task->shdr_allocated_buf = mmap(0, task->shsize, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5181 if (task->shdr_allocated_buf == MAP_FAILED) {
5182 LD_LOGE("Error mapping section header %{public}s: failed to map shdr_allocated_buf errno: %{public}d",
5183 task->name, errno);
5184 goto error;
5185 }
5186 Shdr *sh = (char *)task->shdr_allocated_buf + task->eh->e_shoff - off_start;
5187 for (i = task->eh->e_shnum; i; i--, sh = (void *)((char *)sh + task->eh->e_shentsize)) {
5188 if (sh->sh_type != SHT_STRTAB || sh->sh_addr != str_table || sh->sh_size != str_size) {
5189 continue;
5190 }
5191 off_start = sh->sh_offset;
5192 off_start &= -PAGE_SIZE;
5193 task->str_map_len = sh->sh_size + (sh->sh_offset - off_start);
5194 task->str_map = mmap(0, task->str_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
5195 if (task->str_map == MAP_FAILED) {
5196 LD_LOGE("Error mapping section header %{public}s: failed to map string section errno: %{public}d",
5197 task->name, errno);
5198 goto error;
5199 }
5200 task->str_addr = (char *)task->str_map + sh->sh_offset - off_start;
5201 break;
5202 }
5203 if (!task->dyn) {
5204 LD_LOGE("Error mapping header %{public}s: dynamic section not found", task->name);
5205 goto noexec;
5206 }
5207 return true;
5208 noexec:
5209 errno = ENOEXEC;
5210 error:
5211 free(task->allocated_buf);
5212 task->allocated_buf = NULL;
5213 if (task->shdr_allocated_buf != MAP_FAILED) {
5214 munmap(task->shdr_allocated_buf, task->shsize);
5215 task->shdr_allocated_buf = MAP_FAILED;
5216 }
5217 return false;
5218 }
5219
task_map_library(struct loadtask * task,struct reserved_address_params * reserved_params)5220 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5221 {
5222 size_t addr_min = SIZE_MAX, addr_max = 0, map_len;
5223 size_t this_min, this_max;
5224 size_t nsegs = 0;
5225 off_t off_start;
5226 Phdr *ph = task->ph0;
5227 unsigned prot;
5228 unsigned char *map = MAP_FAILED, *base;
5229 size_t i;
5230 int map_flags = MAP_PRIVATE;
5231 size_t start_addr;
5232 size_t start_alignment = PAGE_SIZE;
5233 bool hugepage_enabled = false;
5234
5235 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5236 if (ph->p_type == PT_GNU_RELRO) {
5237 task->p->relro_start = ph->p_vaddr & -PAGE_SIZE;
5238 task->p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
5239 } else if (ph->p_type == PT_GNU_STACK) {
5240 if (!runtime && ph->p_memsz > __default_stacksize) {
5241 __default_stacksize =
5242 ph->p_memsz < DEFAULT_STACK_MAX ?
5243 ph->p_memsz : DEFAULT_STACK_MAX;
5244 }
5245 }
5246 if (ph->p_type != PT_LOAD) {
5247 continue;
5248 }
5249 nsegs++;
5250 if (ph->p_vaddr < addr_min) {
5251 addr_min = ph->p_vaddr;
5252 off_start = ph->p_offset;
5253 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5254 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5255 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5256 }
5257 if (ph->p_vaddr + ph->p_memsz > addr_max) {
5258 addr_max = ph->p_vaddr + ph->p_memsz;
5259 }
5260 }
5261 if (!task->dyn) {
5262 LD_LOGE("Error mapping library: !task->dyn dynamic section not found task->name=%{public}s", task->name);
5263 goto noexec;
5264 }
5265 if (DL_FDPIC && !(task->eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
5266 task->p->loadmap = calloc(1, sizeof(struct fdpic_loadmap) + nsegs * sizeof(struct fdpic_loadseg));
5267 if (!task->p->loadmap) {
5268 LD_LOGE("Error mapping library: calloc failed errno=%{public}d nsegs=%{public}d", errno, nsegs);
5269 goto error;
5270 }
5271 task->p->loadmap->nsegs = nsegs;
5272 for (ph = task->ph0, i = 0; i < nsegs; ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5273 if (ph->p_type != PT_LOAD) {
5274 continue;
5275 }
5276 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5277 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5278 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5279 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1),
5280 prot, MAP_PRIVATE,
5281 task->fd, ph->p_offset & -PAGE_SIZE + task->file_offset);
5282 if (map == MAP_FAILED) {
5283 unmap_library(task->p);
5284 LD_LOGE("Error mapping library: PT_LOAD mmap failed task->name=%{public}s errno=%{public}d map_len=%{public}d",
5285 task->name, errno, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1));
5286 goto error;
5287 }
5288 task->p->loadmap->segs[i].addr = (size_t)map +
5289 (ph->p_vaddr & PAGE_SIZE - 1);
5290 task->p->loadmap->segs[i].p_vaddr = ph->p_vaddr;
5291 task->p->loadmap->segs[i].p_memsz = ph->p_memsz;
5292 i++;
5293 if (prot & PROT_WRITE) {
5294 size_t brk = (ph->p_vaddr & PAGE_SIZE - 1) + ph->p_filesz;
5295 size_t pgbrk = (brk + PAGE_SIZE - 1) & -PAGE_SIZE;
5296 size_t pgend = (brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
5297 if (pgend > pgbrk && mmap_fixed(map + pgbrk,
5298 pgend - pgbrk, prot,
5299 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5300 -1, off_start) == MAP_FAILED)
5301 LD_LOGE("Error mapping library: PROT_WRITE mmap_fixed failed errno=%{public}d", errno);
5302 goto error;
5303 memset(map + brk, 0, pgbrk - brk);
5304 }
5305 }
5306 map = (void *)task->p->loadmap->segs[0].addr;
5307 map_len = 0;
5308 goto done_mapping;
5309 }
5310 addr_max += PAGE_SIZE - 1;
5311 addr_max &= -PAGE_SIZE;
5312 addr_min &= -PAGE_SIZE;
5313 off_start &= -PAGE_SIZE;
5314 map_len = addr_max - addr_min + off_start;
5315 start_addr = addr_min;
5316
5317 hugepage_enabled = get_transparent_hugepages_supported();
5318 if (hugepage_enabled) {
5319 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(task->ph0, task->eh->e_phnum);
5320
5321 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
5322 }
5323
5324 if (reserved_params) {
5325 if (map_len > reserved_params->reserved_size) {
5326 if (reserved_params->must_use_reserved) {
5327 LD_LOGE("Error mapping library: map len is larger than reserved address task->name=%{public}s", task->name);
5328 goto error;
5329 }
5330 } else {
5331 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
5332 map_flags |= MAP_FIXED;
5333 }
5334 }
5335
5336 /* we will find a mapping_align aligned address as the start of dso
5337 * so we need a tmp_map_len as map_len + mapping_align to make sure
5338 * we have enough space to shift the dso to the correct location. */
5339 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
5340 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
5341
5342 /* map the whole load segments with PROT_READ first for security consideration. */
5343 prot = PROT_READ;
5344
5345 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
5346 if (reserved_params) {
5347 map = DL_NOMMU_SUPPORT
5348 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5349 : mmap((void *)start_addr, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
5350 if (map == MAP_FAILED) {
5351 LD_LOGE("Error mapping library: reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5352 " task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
5353 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5354 goto error;
5355 }
5356 if (reserved_params && map_len < reserved_params->reserved_size) {
5357 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
5358 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
5359 }
5360 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
5361 } else {
5362 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
5363 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
5364 if (temp_map == MAP_FAILED) {
5365 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d tmp_map_len=%{public}d",
5366 errno, tmp_map_len);
5367 goto error;
5368 }
5369
5370 /* find the mapping_align aligned address */
5371 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
5372
5373 map = DL_NOMMU_SUPPORT
5374 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5375 /* use map_len to mmap correct space for the dso with file mapping */
5376 : mmap(real_map, map_len, prot, map_flags | MAP_FIXED, task->fd, off_start + task->file_offset);
5377 if (map == MAP_FAILED || map != real_map) {
5378 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5379 "task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
5380 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5381 goto error;
5382 }
5383
5384 /* Free unused memory.
5385 * |--------------------------tmp_map_len--------------------------|
5386 * ^ ^ ^ ^
5387 * |---unused_part_1---|---------map_len-------|---unused_part_2---|
5388 * temp_map real_map(aligned) temp_map_end
5389 */
5390 unsigned char *temp_map_end = temp_map + tmp_map_len;
5391 size_t unused_part_1 = real_map - temp_map;
5392 size_t unused_part_2 = temp_map_end - (real_map + map_len);
5393 if (unused_part_1 > 0) {
5394 int res1 = munmap(temp_map, unused_part_1);
5395 if (res1 == -1) {
5396 LD_LOGE("munmap unused part 1 failed, errno:%{public}d", errno);
5397 }
5398 }
5399
5400 if (unused_part_2 > 0) {
5401 int res2 = munmap(real_map + map_len, unused_part_2);
5402 if (res2 == -1) {
5403 LD_LOGE("munmap unused part 2 failed, errno:%{public}d", errno);
5404 }
5405 }
5406 }
5407 task->p->map = map;
5408 task->p->map_len = map_len;
5409 /* If the loaded file is not relocatable and the requested address is
5410 * not available, then the load operation must fail. */
5411 if (task->eh->e_type != ET_DYN && addr_min && map != (void *)addr_min) {
5412 LD_LOGE("Error mapping library: ET_DYN task->name=%{public}s", task->name);
5413 errno = EBUSY;
5414 goto error;
5415 }
5416 base = map - addr_min;
5417 task->p->phdr = 0;
5418 task->p->phnum = 0;
5419 for (ph = task->ph0, i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5420 if (ph->p_type == PT_OHOS_RANDOMDATA) {
5421 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
5422 continue;
5423 }
5424 if (ph->p_type != PT_LOAD) {
5425 continue;
5426 }
5427 /* Check if the programs headers are in this load segment, and
5428 * if so, record the address for use by dl_iterate_phdr. */
5429 if (!task->p->phdr && task->eh->e_phoff >= ph->p_offset
5430 && task->eh->e_phoff + task->phsize <= ph->p_offset + ph->p_filesz) {
5431 task->p->phdr = (void *)(base + ph->p_vaddr + (task->eh->e_phoff - ph->p_offset));
5432 task->p->phnum = task->eh->e_phnum;
5433 task->p->phentsize = task->eh->e_phentsize;
5434 }
5435 this_min = ph->p_vaddr & -PAGE_SIZE;
5436 this_max = ph->p_vaddr + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5437 off_start = ph->p_offset & -PAGE_SIZE;
5438 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5439 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5440 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5441 /* Reuse the existing mapping for the lowest-address LOAD */
5442 if (mmap_fixed(
5443 base + this_min,
5444 this_max - this_min,
5445 prot, MAP_PRIVATE | MAP_FIXED,
5446 task->fd,
5447 off_start + task->file_offset) == MAP_FAILED) {
5448 LD_LOGE("Error mapping library: mmap fix failed task->name=%{public}s errno=%{public}d", task->name, errno);
5449 goto error;
5450 }
5451 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
5452 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
5453 if (ph->p_memsz > ph->p_filesz && (ph->p_flags & PF_W)) {
5454 size_t brk = (size_t)base + ph->p_vaddr + ph->p_filesz;
5455 size_t pgbrk = brk + PAGE_SIZE - 1 & -PAGE_SIZE;
5456 size_t zeromap_size = (size_t)base + this_max - pgbrk;
5457 memset((void *)brk, 0, pgbrk - brk & PAGE_SIZE - 1);
5458 if (pgbrk - (size_t)base < this_max && mmap_fixed(
5459 (void *)pgbrk,
5460 zeromap_size,
5461 prot,
5462 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5463 -1,
5464 0) == MAP_FAILED) {
5465 LD_LOGE("Error mapping library: PF_W mmap fix failed errno=%{public}d task->name=%{public}s zeromap_size=%{public}d",
5466 errno, task->name, zeromap_size);
5467 goto error;
5468 }
5469 set_bss_vma_name(task->p->name, (void *)pgbrk, zeromap_size);
5470 }
5471 }
5472 for (i = 0; ((size_t *)(base + task->dyn))[i]; i += NEXT_DYNAMIC_INDEX) {
5473 if (((size_t *)(base + task->dyn))[i] == DT_TEXTREL) {
5474 if (mprotect(map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC) && errno != ENOSYS) {
5475 LD_LOGE("Error mapping library: mprotect failed task->name=%{public}s errno=%{public}d", task->name, errno);
5476 goto error;
5477 }
5478 break;
5479 }
5480 }
5481 done_mapping:
5482 #ifdef USE_ENCAPS
5483 clock_gettime(CLOCK_MONOTONIC, &encaps_time_start);
5484 (void)is_section_exist(task->eh, sizeof(Ehdr), task->fd, ".kernelpermission");
5485 clock_gettime(CLOCK_MONOTONIC, &encaps_time_end);
5486 encpas_cost_time = (encaps_time_end.tv_sec - encaps_time_start.tv_sec) * CLOCK_SECOND_TO_MILLI
5487 + (encaps_time_end.tv_nsec - encaps_time_start.tv_nsec) / CLOCK_NANO_TO_MILLI;
5488 #endif
5489 task->p->base = base;
5490 task->p->dynv = laddr(task->p, task->dyn);
5491 if (task->p->tls.size) {
5492 task->p->tls.image = laddr(task->p, task->tls_image);
5493 }
5494 free(task->allocated_buf);
5495 task->allocated_buf = NULL;
5496 if (task->shdr_allocated_buf != MAP_FAILED) {
5497 munmap(task->shdr_allocated_buf, task->shsize);
5498 task->shdr_allocated_buf = MAP_FAILED;
5499 }
5500 return true;
5501 noexec:
5502 errno = ENOEXEC;
5503 error:
5504 if (map != MAP_FAILED) {
5505 unmap_library(task->p);
5506 }
5507 free(task->allocated_buf);
5508 task->allocated_buf = NULL;
5509 if (task->shdr_allocated_buf != MAP_FAILED) {
5510 munmap(task->shdr_allocated_buf, task->shsize);
5511 task->shdr_allocated_buf = MAP_FAILED;
5512 }
5513 return false;
5514 }
5515
resolve_fd_to_realpath(struct loadtask * task)5516 static bool resolve_fd_to_realpath(struct loadtask *task)
5517 {
5518 char proc_self_fd[32];
5519 static char resolved_path[PATH_MAX];
5520
5521 int ret = snprintf(proc_self_fd, sizeof(proc_self_fd), "/proc/self/fd/%d", task->fd);
5522 if (ret < 0 || ret >= sizeof(proc_self_fd)) {
5523 return false;
5524 }
5525 ssize_t len = readlink(proc_self_fd, resolved_path, sizeof(resolved_path) - 1);
5526 if (len < 0) {
5527 return false;
5528 }
5529 resolved_path[len] = '\0';
5530 strncpy(task->buf, resolved_path, PATH_MAX);
5531
5532 return true;
5533 }
5534
load_library_header(struct loadtask * task)5535 static bool load_library_header(struct loadtask *task)
5536 {
5537 const char *name = task->name;
5538 struct dso *needed_by = task->needed_by;
5539 ns_t *namespace = task->namespace;
5540 bool check_inherited = task->check_inherited;
5541 struct zip_info z_info;
5542
5543 bool map = false;
5544 struct stat st;
5545 size_t alloc_size;
5546 int n_th = 0;
5547 int is_self = 0;
5548
5549 if (!*name) {
5550 errno = EINVAL;
5551 return false;
5552 }
5553
5554 /* Catch and block attempts to reload the implementation itself */
5555 if (name[NAME_INDEX_ZERO] == 'l' && name[NAME_INDEX_ONE] == 'i' && name[NAME_INDEX_TWO] == 'b') {
5556 static const char reserved[] =
5557 "c.pthread.rt.m.dl.util.xnet.";
5558 const char *rp, *next;
5559 for (rp = reserved; *rp; rp = next) {
5560 next = strchr(rp, '.') + 1;
5561 if (strncmp(name + NAME_INDEX_THREE, rp, next - rp) == 0) {
5562 break;
5563 }
5564 }
5565 if (*rp) {
5566 if (ldd_mode) {
5567 /* Track which names have been resolved
5568 * and only report each one once. */
5569 static unsigned reported;
5570 unsigned mask = 1U << (rp - reserved);
5571 if (!(reported & mask)) {
5572 reported |= mask;
5573 dprintf(1, "\t%s => %s (%p)\n",
5574 name, ldso.name,
5575 ldso.base);
5576 }
5577 }
5578 is_self = 1;
5579 }
5580 }
5581 if (!strcmp(name, ldso.name)) {
5582 is_self = 1;
5583 }
5584 if (is_self) {
5585 if (!ldso.prev) {
5586 tail->next = &ldso;
5587 ldso.prev = tail;
5588 tail = &ldso;
5589 ldso.namespace = namespace;
5590 ns_add_dso(namespace, &ldso);
5591 }
5592 task->isloaded = true;
5593 task->p = &ldso;
5594 return true;
5595 }
5596 if (strchr(name, '/')) {
5597 char *separator = strstr(name, ZIP_FILE_PATH_SEPARATOR);
5598 if (separator != NULL) {
5599 int res = open_uncompressed_library_in_zipfile(name, &z_info, separator);
5600 if (!res) {
5601 task->pathname = name;
5602 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5603 LD_LOGE("Open uncompressed library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5604 task->pathname, namespace ? namespace->ns_name : "NULL");
5605 task->fd = -1;
5606 } else {
5607 task->fd = z_info.fd;
5608 task->file_offset = z_info.file_offset;
5609 }
5610 } else {
5611 LD_LOGE("Open uncompressed library in zip file failed, name:%{public}s res:%{public}d", name, res);
5612 return false;
5613 }
5614 } else {
5615 task->pathname = name;
5616 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5617 LD_LOGE("Open absolute_path library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5618 task->pathname, namespace ? namespace->ns_name : "NULL");
5619 task->fd = -1;
5620 } else {
5621 task->fd = open(name, O_RDONLY | O_CLOEXEC);
5622 }
5623 }
5624 } else {
5625 /* Search for the name to see if it's already loaded */
5626 /* Search in namespace */
5627 task->p = find_library_by_name(name, namespace, check_inherited);
5628 if (task->p) {
5629 task->isloaded = true;
5630 LD_LOGD("find_library_by_name(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5631 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5632 return true;
5633 }
5634 if (strlen(name) > NAME_MAX) {
5635 LD_LOGE("load_library name length is larger than NAME_MAX:%{public}s.", name);
5636 return false;
5637 }
5638 task->fd = -1;
5639 if (namespace->env_paths) {
5640 open_library_by_path(name, namespace->env_paths, task, &z_info);
5641 }
5642 for (task->p = needed_by; task->fd == -1 && task->p; task->p = task->p->needed_by) {
5643 if (fixup_rpath(task->p, task->buf, sizeof task->buf) < 0) {
5644 task->fd = INVALID_FD_INHIBIT_FURTHER_SEARCH; /* Inhibit further search. */
5645 }
5646 if (task->p->rpath) {
5647 open_library_by_path(name, task->p->rpath, task, &z_info);
5648 if (task->fd != -1 && resolve_fd_to_realpath(task)) {
5649 if (!is_accessible(namespace, task->buf, g_is_asan, check_inherited)) {
5650 LD_LOGE("Open library: check ns accessible failed, name %{public}s namespace %{public}s.",
5651 name, namespace ? namespace->ns_name : "NULL");
5652 close(task->fd);
5653 task->fd = -1;
5654 }
5655 }
5656 }
5657 }
5658 if (g_is_asan) {
5659 handle_asan_path_open_by_task(task->fd, name, namespace, task, &z_info);
5660 LD_LOGD("load_library handle_asan_path_open_by_task fd:%{public}d.", task->fd);
5661 } else {
5662 if (task->fd == -1 && namespace->lib_paths) {
5663 open_library_by_path(name, namespace->lib_paths, task, &z_info);
5664 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", task->fd);
5665 }
5666 }
5667 task->pathname = task->buf;
5668 }
5669 if (task->fd < 0) {
5670 if (!check_inherited || !namespace->ns_inherits) {
5671 LD_LOGE("Error loading header %{public}s, namespace %{public}s has no inherits, errno=%{public}d",
5672 task->name, namespace->ns_name, errno);
5673 return false;
5674 }
5675 /* Load lib in inherited namespace. Do not check inherited again.*/
5676 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
5677 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
5678 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) {
5679 continue;
5680 }
5681 task->namespace = inherit->inherited_ns;
5682 task->check_inherited = false;
5683 if (load_library_header(task)) {
5684 return true;
5685 }
5686 }
5687 LD_LOGE("Error loading header: can't find library %{public}s in namespace: %{public}s",
5688 task->name, namespace->ns_name);
5689 return false;
5690 }
5691
5692 if (fstat(task->fd, &st) < 0) {
5693 LD_LOGE("Error loading header %{public}s: failed to get file state errno=%{public}d", task->name, errno);
5694 close(task->fd);
5695 task->fd = -1;
5696 return false;
5697 }
5698 /* Search in namespace */
5699 task->p = find_library_by_fstat(&st, namespace, check_inherited, task->file_offset);
5700 if (task->p) {
5701 /* If this library was previously loaded with a
5702 * pathname but a search found the same inode,
5703 * setup its shortname so it can be found by name. */
5704 if (!task->p->shortname && task->pathname != name) {
5705 task->p->shortname = strrchr(task->p->name, '/') + 1;
5706 }
5707 close(task->fd);
5708 task->fd = -1;
5709 task->isloaded = true;
5710 LD_LOGD("find_library_by_fstat(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5711 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5712 return true;
5713 }
5714
5715 map = noload ? 0 : map_library_header(task);
5716 if (!map) {
5717 LD_LOGE("Error loading header %{public}s: failed to map header", task->name);
5718 close(task->fd);
5719 task->fd = -1;
5720 return false;
5721 }
5722
5723 /* Allocate storage for the new DSO. When there is TLS, this
5724 * storage must include a reservation for all pre-existing
5725 * threads to obtain copies of both the new TLS, and an
5726 * extended DTV capable of storing an additional slot for
5727 * the newly-loaded DSO. */
5728 alloc_size = sizeof(struct dso) + strlen(task->pathname) + 1;
5729 if (runtime && task->tls.size) {
5730 size_t per_th = task->tls.size + task->tls.align + sizeof(void *) * (tls_cnt + TLS_CNT_INCREASE);
5731 n_th = libc.threads_minus_1 + 1;
5732 if (n_th > SSIZE_MAX / per_th) {
5733 alloc_size = SIZE_MAX;
5734 } else {
5735 alloc_size += n_th * per_th;
5736 }
5737 }
5738 task->p = calloc(1, alloc_size);
5739 if (!task->p) {
5740 LD_LOGE("Error loading header %{public}s: failed to allocate dso", task->name);
5741 close(task->fd);
5742 task->fd = -1;
5743 return false;
5744 }
5745 task->p->dev = st.st_dev;
5746 task->p->ino = st.st_ino;
5747 task->p->file_offset = task->file_offset;
5748 task->p->needed_by = needed_by;
5749 task->p->name = task->p->buf;
5750 strcpy(task->p->name, task->pathname);
5751 task->p->tls = task->tls;
5752 task->p->dynv = task->dyn_addr;
5753 task->p->strings = task->str_addr;
5754 size_t rpath_offset;
5755 size_t runpath_offset;
5756 if (search_vec(task->p->dynv, &rpath_offset, DT_RPATH))
5757 task->p->rpath_orig = task->p->strings + rpath_offset;
5758 if (search_vec(task->p->dynv, &runpath_offset, DT_RUNPATH))
5759 task->p->rpath_orig = task->p->strings + runpath_offset;
5760
5761 /* Add a shortname only if name arg was not an explicit pathname. */
5762 if (task->pathname != name) {
5763 task->p->shortname = strrchr(task->p->name, '/') + 1;
5764 }
5765
5766 if (task->p->tls.size) {
5767 task->p->tls_id = ++tls_cnt;
5768 task->p->new_dtv = (void *)(-sizeof(size_t) &
5769 (uintptr_t)(task->p->name + strlen(task->p->name) + sizeof(size_t)));
5770 task->p->new_tls = (void *)(task->p->new_dtv + n_th * (tls_cnt + 1));
5771 }
5772
5773 tail->next = task->p;
5774 task->p->prev = tail;
5775 tail = task->p;
5776
5777 /* Add dso to namespace */
5778 task->p->namespace = namespace;
5779 ns_add_dso(namespace, task->p);
5780 return true;
5781 }
5782
task_load_library(struct loadtask * task,struct reserved_address_params * reserved_params)5783 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5784 {
5785 LD_LOGD("load_library loading ns=%{public}s name=%{public}s by_dlopen=%{public}d", task->namespace->ns_name, task->p->name, runtime);
5786 bool map = noload ? 0 : task_map_library(task, reserved_params);
5787 __close(task->fd);
5788 task->fd = -1;
5789 if (!map) {
5790 LD_LOGE("Error loading library %{public}s: failed to map library noload=%{public}d errno=%{public}d",
5791 task->name, noload, errno);
5792 error("Error loading library %s: failed to map library noload=%d errno=%d", task->name, noload, errno);
5793 if (runtime) {
5794 longjmp(*rtld_fail, 1);
5795 }
5796 return;
5797 };
5798
5799 /* Avoid the danger of getting two versions of libc mapped into the
5800 * same process when an absolute pathname was used. The symbols
5801 * checked are chosen to catch both musl and glibc, and to avoid
5802 * false positives from interposition-hack libraries. */
5803 decode_dyn(task->p);
5804 if (find_sym(task->p, "__libc_start_main", 1).sym &&
5805 find_sym(task->p, "stdin", 1).sym) {
5806 do_dlclose(task->p, 0);
5807 task->p = NULL;
5808 free((void*)task->name);
5809 task->name = ld_strdup("libc.so");
5810 task->check_inherited = true;
5811 if (!load_library_header(task)) {
5812 LD_LOGE("Error loading library %{public}s: failed to load libc.so", task->name);
5813 error("Error loading library %s: failed to load libc.so", task->name);
5814 if (runtime) {
5815 longjmp(*rtld_fail, 1);
5816 }
5817 }
5818 return;
5819 }
5820 /* Past this point, if we haven't reached runtime yet, ldso has
5821 * committed either to use the mapped library or to abort execution.
5822 * Unmapping is not possible, so we can safely reclaim gaps. */
5823 if (!runtime) {
5824 reclaim_gaps(task->p);
5825 }
5826 task->p->runtime_loaded = runtime;
5827 if (runtime)
5828 task->p->by_dlopen = 1;
5829
5830 ++gencnt;
5831
5832 if (DL_FDPIC) {
5833 makefuncdescs(task->p);
5834 }
5835
5836 if (ldd_mode) {
5837 dprintf(1, "\t%s => %s (%p)\n", task->name, task->pathname, task->p->base);
5838 }
5839
5840 #ifdef ENABLE_HWASAN
5841 if (libc.load_hook) {
5842 libc.load_hook((long unsigned int)task->p->base, task->p->phdr, task->p->phnum);
5843 }
5844 #endif
5845 }
5846
preload_direct_deps(struct dso * p,ns_t * namespace,struct loadtasks * tasks)5847 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks)
5848 {
5849 size_t i, cnt = 0;
5850 if (p->deps) {
5851 return;
5852 }
5853 /* For head, all preloads are direct pseudo-dependencies.
5854 * Count and include them now to avoid realloc later. */
5855 if (p == head) {
5856 for (struct dso *q = p->next; q; q = q->next) {
5857 cnt++;
5858 }
5859 }
5860 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5861 if (p->dynv[i] == DT_NEEDED) {
5862 cnt++;
5863 }
5864 }
5865 /* Use builtin buffer for apps with no external deps, to
5866 * preserve property of no runtime failure paths. */
5867 p->deps = (p == head && cnt < MIN_DEPS_COUNT) ? builtin_deps :
5868 calloc(cnt + 1, sizeof *p->deps);
5869 if (!p->deps) {
5870 LD_LOGE("Error loading dependencies for %{public}s", p->name);
5871 error("Error loading dependencies for %s", p->name);
5872 if (runtime) {
5873 longjmp(*rtld_fail, 1);
5874 }
5875 }
5876 cnt = 0;
5877 if (p == head) {
5878 for (struct dso *q = p->next; q; q = q->next) {
5879 p->deps[cnt++] = q;
5880 }
5881 }
5882 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5883 if (p->dynv[i] != DT_NEEDED) {
5884 continue;
5885 }
5886 const char* dtneed_name = p->strings + p->dynv[i + 1];
5887 LD_LOGD("load_library %{public}s adding DT_NEEDED task %{public}s namespace(%{public}s)", p->name, dtneed_name, namespace->ns_name);
5888 struct loadtask *task = create_loadtask(dtneed_name, p, namespace, true);
5889 if (!task) {
5890 LD_LOGE("Error loading dependencies %{public}s : create load task failed", p->name);
5891 error("Error loading dependencies for %s : create load task failed", p->name);
5892 if (runtime) {
5893 longjmp(*rtld_fail, 1);
5894 }
5895 continue;
5896 }
5897 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
5898 if (!load_library_header(task)) {
5899 free_task(task);
5900 task = NULL;
5901 LD_LOGE("Error loading shared library %{public}s: (needed by %{public}s)",
5902 p->strings + p->dynv[i + 1],
5903 p->name);
5904 error("Error loading shared library %s: %m (needed by %s)",
5905 p->strings + p->dynv[i + 1], p->name);
5906 if (runtime) {
5907 longjmp(*rtld_fail, 1);
5908 }
5909 continue;
5910 }
5911 p->deps[cnt++] = task->p;
5912 if (task->isloaded) {
5913 free_task(task);
5914 task = NULL;
5915 } else {
5916 append_loadtasks(tasks, task);
5917 }
5918 }
5919 p->deps[cnt] = 0;
5920 p->ndeps_direct = cnt;
5921 for (i = 0; i < p->ndeps_direct; i++) {
5922 add_dso_parent(p->deps[i], p);
5923 }
5924 }
5925
unmap_preloaded_sections(struct loadtasks * tasks)5926 static void unmap_preloaded_sections(struct loadtasks *tasks)
5927 {
5928 struct loadtask *task = NULL;
5929 for (size_t i = 0; i < tasks->length; i++) {
5930 task = get_loadtask(tasks, i);
5931 if (!task) {
5932 continue;
5933 }
5934 if (task->dyn_map_len) {
5935 munmap(task->dyn_map, task->dyn_map_len);
5936 task->dyn_map = NULL;
5937 task->dyn_map_len = 0;
5938 if (task->p) {
5939 task->p->dynv = NULL;
5940 }
5941 }
5942 if (task->str_map_len) {
5943 munmap(task->str_map, task->str_map_len);
5944 task->str_map = NULL;
5945 task->str_map_len = 0;
5946 if (task->p) {
5947 task->p->strings = NULL;
5948 }
5949 }
5950 }
5951 }
5952
preload_deps(struct dso * p,struct loadtasks * tasks)5953 static void preload_deps(struct dso *p, struct loadtasks *tasks)
5954 {
5955 if (p->deps) {
5956 return;
5957 }
5958 for (; p; p = p->next) {
5959 preload_direct_deps(p, p->namespace, tasks);
5960 }
5961 }
5962
run_loadtasks(struct loadtasks * tasks,struct reserved_address_params * reserved_params)5963 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params)
5964 {
5965 struct loadtask *task = NULL;
5966 bool reserved_address = false;
5967 for (size_t i = 0; i < tasks->length; i++) {
5968 task = get_loadtask(tasks, i);
5969 if (task) {
5970 if (reserved_params) {
5971 reserved_address = reserved_params->reserved_address_recursive || (reserved_params->target == task->p);
5972 }
5973 task_load_library(task, reserved_address ? reserved_params : NULL);
5974 }
5975 }
5976 }
5977
assign_tls(struct dso * p)5978 UT_STATIC void assign_tls(struct dso *p)
5979 {
5980 while (p) {
5981 if (p->tls.image) {
5982 tls_align = MAXP2(tls_align, p->tls.align);
5983 #ifdef TLS_ABOVE_TP
5984 p->tls.offset = tls_offset + ((p->tls.align - 1) &
5985 (-tls_offset + (uintptr_t)p->tls.image));
5986 tls_offset = p->tls.offset + p->tls.size;
5987 #else
5988 tls_offset += p->tls.size + p->tls.align - 1;
5989 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
5990 & (p->tls.align - 1);
5991 p->tls.offset = tls_offset;
5992 #endif
5993 if (tls_tail) {
5994 tls_tail->next = &p->tls;
5995 } else {
5996 libc.tls_head = &p->tls;
5997 }
5998 tls_tail = &p->tls;
5999 }
6000
6001 p = p->next;
6002 }
6003 }
6004
load_preload(char * s,ns_t * ns,struct loadtasks * tasks)6005 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks)
6006 {
6007 int tmp;
6008 char *z;
6009
6010 struct loadtask *task = NULL;
6011 for (z = s; *z; s = z) {
6012 for (; *s && (isspace(*s) || *s == ':'); s++) {
6013 ;
6014 }
6015 for (z = s; *z && !isspace(*z) && *z != ':'; z++) {
6016 ;
6017 }
6018 tmp = *z;
6019 *z = 0;
6020 task = create_loadtask(s, NULL, ns, true);
6021 if (!task) {
6022 continue;
6023 }
6024 if (load_library_header(task)) {
6025 if (!task->isloaded) {
6026 append_loadtasks(tasks, task);
6027 task = NULL;
6028 }
6029 }
6030 if (task) {
6031 free_task(task);
6032 }
6033 *z = tmp;
6034 }
6035 }
6036 #endif
6037
serialize_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6038 static int serialize_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6039 {
6040 ssize_t count = dso->relro_end - dso->relro_start;
6041 ssize_t offset = 0;
6042 while (count > 0) {
6043 ssize_t write_size = TEMP_FAILURE_RETRY(write(fd, laddr(dso, dso->relro_start + offset), count));
6044 if (-1 == write_size) {
6045 LD_LOGE("Error serializing relro %{public}s: failed to write GNU_RELRO", dso->name);
6046 return -1;
6047 }
6048 offset += write_size;
6049 count -= write_size;
6050 }
6051
6052 ssize_t size = dso->relro_end - dso->relro_start;
6053 void *map = mmap(
6054 laddr(dso, dso->relro_start),
6055 size,
6056 PROT_READ,
6057 MAP_PRIVATE | MAP_FIXED,
6058 fd,
6059 *file_offset);
6060 if (map == MAP_FAILED) {
6061 LD_LOGE("Error serializing relro %{public}s: failed to map GNU_RELRO", dso->name);
6062 return -1;
6063 }
6064 *file_offset += size;
6065 return 0;
6066 }
6067
map_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)6068 static int map_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
6069 {
6070 ssize_t ext_fd_file_size = 0;
6071 struct stat ext_fd_file_stat;
6072 if (TEMP_FAILURE_RETRY(fstat(fd, &ext_fd_file_stat)) != 0) {
6073 LD_LOGE("Error mapping relro %{public}s: failed to get file state", dso->name);
6074 return -1;
6075 }
6076 ext_fd_file_size = ext_fd_file_stat.st_size;
6077
6078 void *ext_temp_map = MAP_FAILED;
6079 ext_temp_map = mmap(NULL, ext_fd_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
6080 if (ext_temp_map == MAP_FAILED) {
6081 LD_LOGE("Error mapping relro %{public}s: failed to map fd", dso->name);
6082 return -1;
6083 }
6084
6085 char *file_base = (char *)(ext_temp_map) + *file_offset;
6086 char *mem_base = (char *)(laddr(dso, dso->relro_start));
6087 ssize_t start_offset = 0;
6088 ssize_t size = dso->relro_end - dso->relro_start;
6089
6090 if (size > ext_fd_file_size - *file_offset) {
6091 LD_LOGE("Error mapping relro %{public}s: invalid file size", dso->name);
6092 return -1;
6093 }
6094 while (start_offset < size) {
6095 // Find start location.
6096 while (start_offset < size) {
6097 if (memcmp(mem_base + start_offset, file_base + start_offset, PAGE_SIZE) == 0) {
6098 break;
6099 }
6100 start_offset += PAGE_SIZE;
6101 }
6102
6103 // Find end location.
6104 ssize_t end_offset = start_offset;
6105 while (end_offset < size) {
6106 if (memcmp(mem_base + end_offset, file_base + end_offset, PAGE_SIZE) != 0) {
6107 break;
6108 }
6109 end_offset += PAGE_SIZE;
6110 }
6111
6112 // Map pages.
6113 ssize_t map_length = end_offset - start_offset;
6114 ssize_t map_offset = *file_offset + start_offset;
6115 if (map_length > 0) {
6116 void *map = mmap(
6117 mem_base + start_offset, map_length, PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, map_offset);
6118 if (map == MAP_FAILED) {
6119 LD_LOGE("Error mapping relro %{public}s: failed to map GNU_RELRO", dso->name);
6120 munmap(ext_temp_map, ext_fd_file_size);
6121 return -1;
6122 }
6123 }
6124
6125 start_offset = end_offset;
6126 }
6127 *file_offset += size;
6128 munmap(ext_temp_map, ext_fd_file_size);
6129 return 0;
6130 }
6131
handle_relro_sharing(struct dso * p,const dl_extinfo * extinfo,ssize_t * relro_fd_offset)6132 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset)
6133 {
6134 if (extinfo == NULL) {
6135 return;
6136 }
6137 if (extinfo->flag & DL_EXT_WRITE_RELRO) {
6138 LD_LOGD("Serializing GNU_RELRO %{public}s", p->name);
6139 if (serialize_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6140 LD_LOGE("Error serializing GNU_RELRO %{public}s", p->name);
6141 error("Error serializing GNU_RELRO");
6142 if (runtime) longjmp(*rtld_fail, 1);
6143 }
6144 } else if (extinfo->flag & DL_EXT_USE_RELRO) {
6145 LD_LOGD("Mapping GNU_RELRO %{public}s", p->name);
6146 if (map_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
6147 LD_LOGE("Error mapping GNU_RELRO %{public}s", p->name);
6148 error("Error mapping GNU_RELRO");
6149 if (runtime) longjmp(*rtld_fail, 1);
6150 }
6151 }
6152 }
6153
set_bss_vma_name(char * path_name,void * addr,size_t zeromap_size)6154 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size)
6155 {
6156 char so_bss_name[ANON_NAME_MAX_LEN];
6157 if (path_name == NULL) {
6158 snprintf(so_bss_name, ANON_NAME_MAX_LEN, ".bss");
6159 } else {
6160 char *t = strrchr(path_name, '/');
6161 if (t) {
6162 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", ++t);
6163 } else {
6164 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", path_name);
6165 }
6166 }
6167
6168 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, zeromap_size, so_bss_name);
6169 }
6170
find_and_set_bss_name(struct dso * p)6171 static void find_and_set_bss_name(struct dso *p)
6172 {
6173 size_t cnt;
6174 Phdr *ph = p->phdr;
6175 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
6176 if (ph->p_type != PT_LOAD) continue;
6177 size_t seg_start = p->base + ph->p_vaddr;
6178 size_t seg_file_end = seg_start + ph->p_filesz + PAGE_SIZE - 1 & -PAGE_SIZE;
6179 size_t seg_max_addr = seg_start + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
6180 size_t zeromap_size = seg_max_addr - seg_file_end;
6181 if (zeromap_size > 0 && (ph->p_flags & PF_W)) {
6182 set_bss_vma_name(p->name, (void *)seg_file_end, zeromap_size);
6183 }
6184 }
6185 }
6186
sync_with_debugger(void)6187 static void sync_with_debugger(void)
6188 {
6189 debug.ver = 1;
6190 debug.bp = dl_debug_state;
6191 debug.head = NULL;
6192 debug.base = ldso.base;
6193
6194 add_dso_info_to_debug_map(head);
6195
6196 debug.state = RT_CONSISTENT;
6197 _dl_debug_state();
6198 }
6199
notify_addition_to_debugger(struct dso * p)6200 static void notify_addition_to_debugger(struct dso *p)
6201 {
6202 debug.state = RT_ADD;
6203 _dl_debug_state();
6204
6205 add_dso_info_to_debug_map(p);
6206
6207 debug.state = RT_CONSISTENT;
6208 _dl_debug_state();
6209 }
6210
notify_remove_to_debugger(struct dso * p)6211 static void notify_remove_to_debugger(struct dso *p)
6212 {
6213 debug.state = RT_DELETE;
6214 _dl_debug_state();
6215
6216 remove_dso_info_from_debug_map(p);
6217
6218 debug.state = RT_CONSISTENT;
6219 _dl_debug_state();
6220 }
6221
add_dso_info_to_debug_map(struct dso * p)6222 static void add_dso_info_to_debug_map(struct dso *p)
6223 {
6224 for (struct dso *so = p; so != NULL; so = so->next) {
6225 struct dso_debug_info *debug_info = malloc(sizeof(struct dso_debug_info));
6226 if (debug_info == NULL) {
6227 LD_LOGE("malloc error! dso name: %{public}s.", so->name);
6228 continue;
6229 }
6230 #if DL_FDPIC
6231 debug_info->loadmap = so->loadmap;
6232 #else
6233 debug_info->base = so->base;
6234 #endif
6235 debug_info->name = so->name;
6236 debug_info->dynv = so->dynv;
6237 if (debug.head == NULL) {
6238 debug_info->prev = NULL;
6239 debug_info->next = NULL;
6240 debug.head = debug_tail = debug_info;
6241 } else {
6242 debug_info->prev = debug_tail;
6243 debug_info->next = NULL;
6244 debug_tail->next = debug_info;
6245 debug_tail = debug_info;
6246 }
6247 so->debug_info = debug_info;
6248 }
6249 }
6250
remove_dso_info_from_debug_map(struct dso * p)6251 static void remove_dso_info_from_debug_map(struct dso *p)
6252 {
6253 struct dso_debug_info *debug_info = p->debug_info;
6254 if (debug_info == debug_tail) {
6255 debug_tail = debug_tail->prev;
6256 debug_tail->next = NULL;
6257 } else {
6258 debug_info->next->prev = debug_info->prev;
6259 debug_info->prev->next = debug_info->next;
6260 }
6261 free(debug_info);
6262 }
6263
6264 typedef struct dso_handle_node {
6265 void *dso_handle; // Used to located dso.
6266 uint32_t count;
6267 struct dso* dso;
6268 struct dso_handle_node* next;
6269 } dso_handle_node;
6270
6271 static dso_handle_node* dso_handle_list = NULL;
6272
find_dso_handle_node(void * dso_handle)6273 dso_handle_node* find_dso_handle_node(void *dso_handle)
6274 {
6275 dso_handle_node *cur = dso_handle_list;
6276 while(cur) {
6277 if (cur->dso_handle == dso_handle) {
6278 return cur;
6279 }
6280 cur =cur->next;
6281 }
6282 return NULL;
6283 }
6284
add_dso_handle_node(void * dso_handle)6285 void add_dso_handle_node(void *dso_handle)
6286 {
6287 pthread_rwlock_wrlock(&lock);
6288 if (!dso_handle) {
6289 LD_LOGW("[cxa_thread] add_dso_handle_node return because dso_handle is null.\n");
6290 pthread_rwlock_unlock(&lock);
6291 return;
6292 }
6293
6294 dso_handle_node *node = find_dso_handle_node(dso_handle);
6295 if (node) {
6296 node->count++;
6297 LD_LOGD("[cxa_thread] increase dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count);
6298 pthread_rwlock_unlock(&lock);
6299 return;
6300 }
6301 dso_handle_node *cur = __libc_malloc(sizeof(*cur));
6302 if (!cur) {
6303 pthread_rwlock_unlock(&lock);
6304 LD_LOGE("[cxa_thread] alloc dso_handle_node failed.");
6305 error("[cxa_thread]: alloc dso_handle_node failed.");
6306 return;
6307 }
6308
6309 struct dso* p = addr2dso(dso_handle);
6310 if (!p) {
6311 pthread_rwlock_unlock(&lock);
6312 LD_LOGE("[cxa_thread] can't find dso by dso_handle(%{public}p)", dso_handle);
6313 error("[cxa_thread] can't find dso by dso_handle(%p)", dso_handle);
6314 return;
6315 }
6316
6317 // We don't need to care about the so which by_dlopen is false because it will never be unload.
6318 if (p->by_dlopen) {
6319 p->nr_dlopen++;
6320 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6321 p->name, p->name, p->nr_dlopen);
6322 if (p->bfs_built) {
6323 for (size_t i = 0; p->deps[i]; i++) {
6324 p->deps[i]->nr_dlopen++;
6325 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6326 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
6327 }
6328 } else {
6329 // Get all the direct and indirect deps.
6330 extend_bfs_deps(p, 1);
6331 for (size_t i = 0; p->deps_all[i]; i++) {
6332 p->deps_all[i]->nr_dlopen++;
6333 LD_LOGD("[cxa_thread] %{public}s nr_dlopen++ when add dso_handle for %{public}s, nr_dlopen:%{public}d",
6334 p->deps_all[i]->name, p->name, p->deps_all[i]->nr_dlopen);
6335 }
6336 }
6337 }
6338 cur->dso = p;
6339 cur->dso_handle = dso_handle;
6340 cur->count = 1;
6341 cur->next = dso_handle_list;
6342 dso_handle_list = cur;
6343 pthread_rwlock_unlock(&lock);
6344
6345 return;
6346 }
6347
remove_dso_handle_node(void * dso_handle)6348 void remove_dso_handle_node(void *dso_handle)
6349 {
6350 pthread_rwlock_wrlock(&lock);
6351 if (dso_handle == NULL) {
6352 LD_LOGW("[cxa_thread] remove_dso_handle_node return because dso_handle is null.\n");
6353 pthread_rwlock_unlock(&lock);
6354 return;
6355 }
6356
6357 dso_handle_node *node = find_dso_handle_node(dso_handle);
6358 if (node && node->count) {
6359 LD_LOGD("[cxa_thread] decrease dso node count of %{public}s, count:%{public}d ", node->dso->name, node->count - 1);
6360 if ((--node->count) == 0) {
6361 LD_LOGD("[cxa_thread] call do_dlclose(%{public}s) when count is 0", node->dso->name);
6362 do_dlclose(node->dso, 1);
6363 // Invalidate current node.
6364 node->dso_handle = NULL;
6365 }
6366 pthread_rwlock_unlock(&lock);
6367 return;
6368 } else {
6369 LD_LOGE("[cxa_thread] can't find matched dso handle node by %{public}p, count:%{public}d", dso_handle, node->count);
6370 error("[cxa_thread] can't find matched dso handle node by %p, count:%d", dso_handle, node->count);
6371 }
6372 pthread_rwlock_unlock(&lock);
6373
6374 return;
6375 }