1 #define _GNU_SOURCE
2 #define SYSCALL_NO_TLS 1
3
4 #include "dynlink.h"
5
6 #include <stdbool.h>
7 #include <stdlib.h>
8 #include <stdarg.h>
9 #include <stddef.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <elf.h>
14 #include <sys/mman.h>
15 #include <limits.h>
16 #include <fcntl.h>
17 #include <sys/stat.h>
18 #include <errno.h>
19 #include <link.h>
20 #include <setjmp.h>
21 #include <pthread.h>
22 #include <ctype.h>
23 #include <dlfcn.h>
24 #include <semaphore.h>
25 #include <sys/membarrier.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <sys/prctl.h>
29 #include <sys/queue.h>
30
31 #include "cfi.h"
32 #include "dlfcn_ext.h"
33 #include "dynlink_rand.h"
34 #include "ld_log.h"
35 #include "libc.h"
36 #include "namespace.h"
37 #include "ns_config.h"
38 #include "pthread_impl.h"
39 #include "fork_impl.h"
40 #include "strops.h"
41 #include "trace/trace_marker.h"
42
43 #ifdef OHOS_ENABLE_PARAMETER
44 #include "sys_param.h"
45 #endif
46 #ifdef LOAD_ORDER_RANDOMIZATION
47 #include "zip_archive.h"
48 #endif
49
50 #define malloc __libc_malloc
51 #define calloc __libc_calloc
52 #define realloc __libc_realloc
53 #define free __libc_free
54
55 static void error(const char *, ...);
56
57 #define MAXP2(a,b) (-(-(a)&-(b)))
58 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
59 #define GNU_HASH_FILTER(ght, ghm, gho) \
60 const size_t *bloomwords = (const void *)(ght+4); \
61 size_t f = bloomwords[gho & (ght[2]-1)]; \
62 if (!(f & ghm)) continue; \
63 f >>= (gh >> ght[3]) % (8 * sizeof f); \
64 if (!(f & 1)) continue;
65
66 #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m)))
67 #define countof(a) ((sizeof (a))/(sizeof (a)[0]))
68 #define DSO_FLAGS_NODELETE 0x1
69
70 #ifdef HANDLE_RANDOMIZATION
71 #define NEXT_DYNAMIC_INDEX 2
72 #define MIN_DEPS_COUNT 2
73 #define NAME_INDEX_ZERO 0
74 #define NAME_INDEX_ONE 1
75 #define NAME_INDEX_TWO 2
76 #define NAME_INDEX_THREE 3
77 #define TLS_CNT_INCREASE 3
78 #define INVALID_FD_INHIBIT_FURTHER_SEARCH (-2)
79 #endif
80
81 #define PARENTS_BASE_CAPACITY 8
82 #define RELOC_CAN_SEARCH_DSO_BASE_CAPACITY 32
83 #define ANON_NAME_MAX_LEN 70
84
85 #define KPMD_SIZE (1UL << 21)
86 #define HUGEPAGES_SUPPORTED_STR_SIZE (32)
87
88 #ifdef UNIT_TEST_STATIC
89 #define UT_STATIC
90 #else
91 #define UT_STATIC static
92 #endif
93
94 /* Used for dlclose */
95 #define UNLOAD_NR_DLOPEN_CHECK 1
96 #define UNLOAD_COMMON_CHECK 2
97 #define UNLOAD_ALL_CHECK 3
98 struct dso_entry {
99 struct dso *dso;
100 TAILQ_ENTRY(dso_entry) entries;
101 };
102
103 struct debug {
104 int ver;
105 void *head;
106 void (*bp)(void);
107 int state;
108 void *base;
109 };
110
111 struct reserved_address_params {
112 void* start_addr;
113 size_t reserved_size;
114 bool must_use_reserved;
115 bool reserved_address_recursive;
116 #ifdef LOAD_ORDER_RANDOMIZATION
117 struct dso *target;
118 #endif
119 };
120
121 typedef void (*stage3_func)(size_t *, size_t *, size_t *);
122
123 static struct builtin_tls {
124 char c[8];
125 struct pthread pt;
126 void *space[16];
127 } builtin_tls[1];
128 #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
129
130 #define ADDEND_LIMIT 4096
131 static size_t *saved_addends, *apply_addends_to;
132 static bool g_is_asan;
133 static struct dso ldso;
134 static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head;
135 static struct dso_debug_info *debug_tail = NULL;
136 static char *env_path, *sys_path;
137 static unsigned long long gencnt;
138 static int runtime;
139 static int ldd_mode;
140 static int ldso_fail;
141 static int noload;
142 static int shutting_down;
143 static jmp_buf *rtld_fail;
144 static pthread_rwlock_t lock;
145 static struct debug debug;
146 static struct tls_module *tls_tail;
147 static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
148 static size_t static_tls_cnt;
149 static pthread_mutex_t init_fini_lock;
150 static pthread_cond_t ctor_cond;
151 static struct dso *builtin_deps[2];
152 static struct dso *const no_deps[1];
153 static struct dso *builtin_ctor_queue[4];
154 static struct dso **main_ctor_queue;
155 static struct fdpic_loadmap *app_loadmap;
156 static struct fdpic_dummy_loadmap app_dummy_loadmap;
157
158 struct debug *_dl_debug_addr = &debug;
159
160 extern hidden int __malloc_replaced;
161
162 hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
163
164 extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void);
165
166 #ifdef USE_GWP_ASAN
167 extern bool init_gwp_asan_by_libc(bool force_init);
168 #endif
169
170 weak_alias(__init_array_start, __init_array_end);
171 weak_alias(__fini_array_start, __fini_array_end);
172 #ifdef DFX_SIGNAL_LIBC
__InstallSignalHandler()173 UT_STATIC void __InstallSignalHandler()
174 {
175 }
176 weak_alias(__InstallSignalHandler, DFX_InstallSignalHandler);
177 #endif
178
179 #ifdef HANDLE_RANDOMIZATION
180 static int do_dlclose(struct dso *p);
181 #endif
182
183 #ifdef LOAD_ORDER_RANDOMIZATION
184 static bool map_library_header(struct loadtask *task);
185 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params);
186 static bool resolve_fd_to_realpath(struct loadtask *task);
187 static bool load_library_header(struct loadtask *task);
188 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params);
189 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks);
190 static void unmap_preloaded_sections(struct loadtasks *tasks);
191 static void preload_deps(struct dso *p, struct loadtasks *tasks);
192 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params);
193 UT_STATIC void assign_tls(struct dso *p);
194 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks);
195 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info);
196 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task, struct zip_info *z_info);
197 #endif
198
199 /* Sharing relro */
200 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset);
201
202 /* asan path open */
203 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size);
204
205 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size);
206
207 static void find_and_set_bss_name(struct dso *p);
208
209 /* lldb debug function */
210 static void sync_with_debugger();
211 static void notify_addition_to_debugger(struct dso *p);
212 static void notify_remove_to_debugger(struct dso *p);
213 static void add_dso_info_to_debug_map(struct dso *p);
214 static void remove_dso_info_from_debug_map(struct dso *p);
215
216 /* add namespace function */
217 static void get_sys_path(ns_configor *conf);
218 static void dlclose_ns(struct dso *p);
get_app_path(char * path,size_t size)219 static bool get_app_path(char *path, size_t size)
220 {
221 int l = 0;
222 l = readlink("/proc/self/exe", path, size);
223 if (l < 0 || l >= size) {
224 LD_LOGD("get_app_path readlink failed!");
225 return false;
226 }
227 path[l] = 0;
228 LD_LOGD("get_app_path path:%{public}s.", path);
229 return true;
230 }
231
init_default_namespace(struct dso * app)232 static void init_default_namespace(struct dso *app)
233 {
234 ns_t *default_ns = get_default_ns();
235 memset(default_ns, 0, sizeof *default_ns);
236 ns_set_name(default_ns, NS_DEFAULT_NAME);
237 if (env_path) ns_set_env_paths(default_ns, env_path);
238 ns_set_lib_paths(default_ns, sys_path);
239 ns_set_separated(default_ns, false);
240 app->namespace = default_ns;
241 ns_add_dso(default_ns, app);
242 LD_LOGD("init_default_namespace default_namespace:"
243 "nsname: default ,"
244 "lib_paths:%{public}s ,"
245 "env_path:%{public}s ,"
246 "separated: false.",
247 sys_path, env_path);
248 return;
249 }
250
set_ns_attrs(ns_t * ns,ns_configor * conf)251 UT_STATIC void set_ns_attrs(ns_t *ns, ns_configor *conf)
252 {
253 if(!ns || !conf) {
254 return;
255 }
256
257 char *lib_paths, *asan_lib_paths, *permitted_paths, *asan_permitted_paths, *allowed_libs;
258
259 ns_set_separated(ns, conf->get_separated(ns->ns_name));
260
261 lib_paths = conf->get_lib_paths(ns->ns_name);
262 if (lib_paths) ns_set_lib_paths(ns, lib_paths);
263
264 asan_lib_paths = conf->get_asan_lib_paths(ns->ns_name);
265 if (asan_lib_paths) ns_set_asan_lib_paths(ns, asan_lib_paths);
266
267 permitted_paths = conf->get_permitted_paths(ns->ns_name);
268 if (permitted_paths) ns_set_permitted_paths(ns, permitted_paths);
269
270 asan_permitted_paths = conf->get_asan_permitted_paths(ns->ns_name);
271 if (asan_permitted_paths) ns_set_asan_permitted_paths(ns, asan_permitted_paths);
272
273 allowed_libs = conf->get_allowed_libs(ns->ns_name);
274 if (allowed_libs) ns_set_allowed_libs(ns, allowed_libs);
275
276 LD_LOGD("set_ns_attrs :"
277 "ns_name: %{public}s ,"
278 "separated:%{public}d ,"
279 "lib_paths:%{public}s ,"
280 "asan_lib_paths:%{public}s ,"
281 "permitted_paths:%{public}s ,"
282 "asan_permitted_paths:%{public}s ,"
283 "allowed_libs: %{public}s .",
284 ns->ns_name, ns->separated, ns->lib_paths, ns->asan_lib_paths, permitted_paths,
285 asan_permitted_paths, allowed_libs);
286 }
287
set_ns_inherits(ns_t * ns,ns_configor * conf)288 UT_STATIC void set_ns_inherits(ns_t *ns, ns_configor *conf)
289 {
290 if(!ns || !conf) {
291 return;
292 }
293
294 strlist *inherits = conf->get_inherits(ns->ns_name);
295 if (inherits) {
296 for (size_t i=0; i<inherits->num; i++) {
297 ns_t *inherited_ns = find_ns_by_name(inherits->strs[i]);
298 if (inherited_ns) {
299 char *shared_libs = conf->get_inherit_shared_libs(ns->ns_name, inherited_ns->ns_name);
300 ns_add_inherit(ns, inherited_ns, shared_libs);
301 LD_LOGD("set_ns_inherits :"
302 "ns_name: %{public}s ,"
303 "separated:%{public}d ,"
304 "lib_paths:%{public}s ,"
305 "asan_lib_paths:%{public}s ,",
306 inherited_ns->ns_name, inherited_ns->separated, inherited_ns->lib_paths,
307 inherited_ns->asan_lib_paths);
308 }
309 }
310 strlist_free(inherits);
311 } else {
312 LD_LOGD("set_ns_inherits inherits is NULL!");
313 }
314 }
315
init_namespace(struct dso * app)316 static void init_namespace(struct dso *app)
317 {
318 char app_path[PATH_MAX+1];
319 if (!get_app_path(app_path, sizeof app_path)) {
320 strcpy(app_path, app->name);
321 }
322 char *t = strrchr(app_path, '/');
323 if (t) {
324 *t = 0;
325 } else {
326 app_path[0] = '.';
327 app_path[1] = 0;
328 }
329
330 nslist *nsl = nslist_init();
331 ns_configor *conf = configor_init();
332 char file_path[sizeof "/etc/ld-musl-namespace-" + sizeof (LDSO_ARCH) + sizeof ".ini" + 1] = {0};
333 (void)snprintf(file_path, sizeof file_path, "/etc/ld-musl-namespace-%s.ini", LDSO_ARCH);
334 LD_LOGI("init_namespace file_path:%{public}s", file_path);
335 trace_marker_reset();
336 trace_marker_begin(HITRACE_TAG_MUSL, "parse linker config", file_path);
337 int ret = conf->parse(file_path, app_path);
338 if (ret < 0) {
339 LD_LOGE("init_namespace ini file parse failed!");
340 /* Init_default_namespace is required even if the ini file parsing fails */
341 if (!sys_path) get_sys_path(conf);
342 init_default_namespace(app);
343 configor_free();
344 trace_marker_end(HITRACE_TAG_MUSL);
345 return;
346 }
347
348 /* sys_path needs to be parsed through ini file */
349 if (!sys_path) get_sys_path(conf);
350 init_default_namespace(app);
351
352 /* Init default namespace */
353 ns_t *d_ns = get_default_ns();
354 set_ns_attrs(d_ns, conf);
355
356 /* Init other namespace */
357 if (!nsl) {
358 LD_LOGE("init nslist fail!");
359 configor_free();
360 trace_marker_end(HITRACE_TAG_MUSL);
361 return;
362 }
363 strlist *s_ns = conf->get_namespaces();
364 if (s_ns) {
365 for (size_t i=0; i<s_ns->num; i++) {
366 ns_t *ns = ns_alloc();
367 ns_set_name(ns, s_ns->strs[i]);
368 set_ns_attrs(ns, conf);
369 ns_add_dso(ns, app);
370 nslist_add_ns(ns);
371 }
372 strlist_free(s_ns);
373 }
374 /* Set inherited namespace */
375 set_ns_inherits(d_ns, conf);
376 for (size_t i = 0; i < nsl->num; i++) {
377 set_ns_inherits(nsl->nss[i], conf);
378 }
379 configor_free();
380 trace_marker_end(HITRACE_TAG_MUSL);
381 return;
382 }
383
384 /* Compute load address for a virtual address in a given dso. */
385 #if DL_FDPIC
laddr(const struct dso * p,size_t v)386 void *laddr(const struct dso *p, size_t v)
387 {
388 size_t j=0;
389 if (!p->loadmap) return p->base + v;
390 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
391 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
392 }
laddr_pg(const struct dso * p,size_t v)393 static void *laddr_pg(const struct dso *p, size_t v)
394 {
395 size_t j=0;
396 size_t pgsz = PAGE_SIZE;
397 if (!p->loadmap) return p->base + v;
398 for (j=0; ; j++) {
399 size_t a = p->loadmap->segs[j].p_vaddr;
400 size_t b = a + p->loadmap->segs[j].p_memsz;
401 a &= -pgsz;
402 b += pgsz-1;
403 b &= -pgsz;
404 if (v-a<b-a) break;
405 }
406 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
407 }
fdbarrier(void * p)408 static void (*fdbarrier(void *p))()
409 {
410 void (*fd)();
411 __asm__("" : "=r"(fd) : "0"(p));
412 return fd;
413 }
414 #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
415 laddr(p, v), (p)->got }))
416 #else
417 #define laddr(p, v) (void *)((p)->base + (v))
418 #define laddr_pg(p, v) laddr(p, v)
419 #define fpaddr(p, v) ((void (*)())laddr(p, v))
420 #endif
421
decode_vec(size_t * v,size_t * a,size_t cnt)422 static void decode_vec(size_t *v, size_t *a, size_t cnt)
423 {
424 size_t i;
425 for (i=0; i<cnt; i++) a[i] = 0;
426 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
427 if (v[0] < 8*sizeof(long)) {
428 a[0] |= 1UL<<v[0];
429 }
430 a[v[0]] = v[1];
431 }
432 }
433
search_vec(size_t * v,size_t * r,size_t key)434 static int search_vec(size_t *v, size_t *r, size_t key)
435 {
436 for (; v[0]!=key; v+=2)
437 if (!v[0]) return 0;
438 *r = v[1];
439 return 1;
440 }
441
check_vna_hash(Verdef * def,int16_t vsym,uint32_t vna_hash)442 UT_STATIC int check_vna_hash(Verdef *def, int16_t vsym, uint32_t vna_hash)
443 {
444 int matched = 0;
445
446 vsym &= 0x7fff;
447 Verdef *verdef = def;
448 for(;;) {
449 if ((verdef->vd_ndx & 0x7fff) == vsym) {
450 if (vna_hash == verdef->vd_hash) {
451 matched = 1;
452 }
453 break;
454 }
455 if (matched) {
456 break;
457 }
458 if (verdef->vd_next == 0) {
459 break;
460 }
461 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
462 }
463 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
464 if (!matched) {
465 LD_LOGD("check_vna_hash no matched found. vsym=%{public}d vna_hash=%{public}x", vsym, vna_hash);
466 }
467 #endif
468 return matched;
469 }
470
check_verinfo(Verdef * def,int16_t * versym,uint32_t index,struct verinfo * verinfo,char * strings)471 UT_STATIC int check_verinfo(Verdef *def, int16_t *versym, uint32_t index, struct verinfo *verinfo, char *strings)
472 {
473 /* if the versym and verinfo is null , then not need version. */
474 if (!versym || !def) {
475 if (strlen(verinfo->v) == 0) {
476 return 1;
477 } else {
478 LD_LOGD("check_verinfo versym or def is null and verinfo->v exist, s:%{public}s v:%{public}s.",
479 verinfo->s, verinfo->v);
480 return 0;
481 }
482 }
483
484 int16_t vsym = versym[index];
485
486 /* find the verneed symbol. */
487 if (verinfo->use_vna_hash) {
488 if (vsym != VER_NDX_LOCAL && versym != VER_NDX_GLOBAL) {
489 return check_vna_hash(def, vsym, verinfo->vna_hash);
490 }
491 }
492
493 /* if the version length is zero and vsym not less than zero, then library hava default version symbol. */
494 if (strlen(verinfo->v) == 0) {
495 if (vsym >= 0) {
496 return 1;
497 } else {
498 LD_LOGD("check_verinfo not default version. vsym:%{public}d s:%{public}s", vsym, verinfo->s);
499 return 0;
500 }
501 }
502
503 /* find the version of symbol. */
504 vsym &= 0x7fff;
505 for (;;) {
506 if (!(def->vd_flags & VER_FLG_BASE) && (def->vd_ndx & 0x7fff) == vsym) {
507 break;
508 }
509 if (def->vd_next == 0) {
510 return 0;
511 }
512 def = (Verdef *)((char *)def + def->vd_next);
513 }
514
515 Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux);
516
517 int ret = !strcmp(verinfo->v, strings + aux->vda_name);
518 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
519 if (!ret) {
520 LD_LOGD("check_verinfo version not match. s=%{public}s v=%{public}s vsym=%{public}d vda_name=%{public}s",
521 verinfo->s, verinfo->v, vsym, strings + aux->vda_name);
522 }
523 #endif
524 return ret;
525 }
526
sysv_hash(const char * s0)527 static struct sym_info_pair sysv_hash(const char *s0)
528 {
529 struct sym_info_pair s_info_p;
530 const unsigned char *s = (void *)s0;
531 uint_fast32_t h = 0;
532 while (*s) {
533 h = 16*h + *s++;
534 h ^= h>>24 & 0xf0;
535 }
536 s_info_p.sym_h = h & 0xfffffff;
537 s_info_p.sym_l = (char *)s - s0;
538 return s_info_p;
539 }
540
gnu_hash(const char * s0)541 struct sym_info_pair gnu_hash(const char *s0)
542 {
543 struct sym_info_pair s_info_p;
544 const unsigned char *s = (void *)s0;
545 uint_fast32_t h = 5381;
546 for (; *s; s++)
547 h += h*32 + *s;
548 s_info_p.sym_h = h;
549 s_info_p.sym_l = (char *)s - s0;
550 return s_info_p;
551 }
552
sysv_lookup(struct verinfo * verinfo,struct sym_info_pair s_info_p,struct dso * dso)553 static Sym *sysv_lookup(struct verinfo *verinfo, struct sym_info_pair s_info_p, struct dso *dso)
554 {
555 size_t i;
556 uint32_t h = s_info_p.sym_h;
557 Sym *syms = dso->syms;
558 Elf_Symndx *hashtab = dso->hashtab;
559 char *strings = dso->strings;
560 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
561 if ((!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
562 && (!memcmp(verinfo->s, strings+syms[i].st_name, s_info_p.sym_l))) {
563 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
564 continue;
565 }
566
567 return syms+i;
568 }
569
570 }
571 LD_LOGD("sysv_lookup not find the symbol, "
572 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
573 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
574 return 0;
575 }
576
gnu_lookup(struct sym_info_pair s_info_p,uint32_t * hashtab,struct dso * dso,struct verinfo * verinfo)577 static Sym *gnu_lookup(struct sym_info_pair s_info_p, uint32_t *hashtab, struct dso *dso, struct verinfo *verinfo)
578 {
579 uint32_t h1 = s_info_p.sym_h;
580 uint32_t nbuckets = hashtab[0];
581 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
582 uint32_t i = buckets[h1 % nbuckets];
583
584 if (!i) {
585 LD_LOGD("gnu_lookup symbol not found (bloom filter), so:%{public}s s:%{public}s", dso->name, verinfo->s);
586 return 0;
587 }
588
589 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
590
591 for (h1 |= 1; ; i++) {
592 uint32_t h2 = *hashval++;
593 if ((h1 == (h2|1)) && (!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
594 && !memcmp(verinfo->s, dso->strings + dso->syms[i].st_name, s_info_p.sym_l)) {
595 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
596 continue;
597 }
598
599 return dso->syms+i;
600 }
601
602 if (h2 & 1) break;
603 }
604
605 LD_LOGD("gnu_lookup symbol not found, "
606 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
607 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
608 return 0;
609 }
610
check_sym_accessible(struct dso * dso,ns_t * ns)611 static bool check_sym_accessible(struct dso *dso, ns_t *ns)
612 {
613 if (!dso || !dso->namespace || !ns) {
614 LD_LOGD("check_sym_accessible invalid parameter!");
615 return false;
616 }
617 if (dso->namespace == ns) {
618 return true;
619 }
620 for (int i = 0; i < dso->parents_count; i++) {
621 if (dso->parents[i]->namespace == ns) {
622 return true;
623 }
624 }
625 LD_LOGD(
626 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
627 return false;
628 }
629
is_dso_accessible(struct dso * dso,ns_t * ns)630 static inline bool is_dso_accessible(struct dso *dso, ns_t *ns)
631 {
632 if (dso->namespace == ns) {
633 return true;
634 }
635 for (int i = 0; i < dso->parents_count; i++) {
636 if (dso->parents[i]->namespace == ns) {
637 return true;
638 }
639 }
640 LD_LOGD(
641 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
642 return false;
643 }
644
find_dso_parent(struct dso * p,struct dso * target)645 static int find_dso_parent(struct dso *p, struct dso *target)
646 {
647 int index = -1;
648 for (int i = 0; i < p->parents_count; i++) {
649 if (p->parents[i] == target) {
650 index = i;
651 break;
652 }
653 }
654 return index;
655 }
656
add_dso_parent(struct dso * p,struct dso * parent)657 static void add_dso_parent(struct dso *p, struct dso *parent)
658 {
659 int index = find_dso_parent(p, parent);
660 if (index != -1) {
661 return;
662 }
663 if (p->parents_count + 1 > p->parents_capacity) {
664 if (p->parents_capacity == 0) {
665 p->parents = (struct dso **)malloc(sizeof(struct dso *) * PARENTS_BASE_CAPACITY);
666 if (!p->parents) {
667 return;
668 }
669 p->parents_capacity = PARENTS_BASE_CAPACITY;
670 } else {
671 struct dso ** realloced = (struct dso **)realloc(
672 p->parents, sizeof(struct dso *) * (p->parents_capacity + PARENTS_BASE_CAPACITY));
673 if (!realloced) {
674 return;
675 }
676 p->parents = realloced;
677 p->parents_capacity += PARENTS_BASE_CAPACITY;
678 }
679 }
680 p->parents[p->parents_count] = parent;
681 p->parents_count++;
682 }
683
remove_dso_parent(struct dso * p,struct dso * parent)684 static void remove_dso_parent(struct dso *p, struct dso *parent)
685 {
686 int index = find_dso_parent(p, parent);
687 if (index == -1) {
688 return;
689 }
690 int i;
691 for (i = 0; i < index; i++) {
692 p->parents[i] = p->parents[i];
693 }
694 for (i = index; i < p->parents_count - 1; i++) {
695 p->parents[i] = p->parents[i + 1];
696 }
697 p->parents_count--;
698 }
699
add_reloc_can_search_dso(struct dso * p,struct dso * can_search_so)700 static void add_reloc_can_search_dso(struct dso *p, struct dso *can_search_so)
701 {
702 if (p->reloc_can_search_dso_count + 1 > p->reloc_can_search_dso_capacity) {
703 if (p->reloc_can_search_dso_capacity == 0) {
704 p->reloc_can_search_dso_list =
705 (struct dso **)malloc(sizeof(struct dso *) * RELOC_CAN_SEARCH_DSO_BASE_CAPACITY);
706 if (!p->reloc_can_search_dso_list) {
707 return;
708 }
709 p->reloc_can_search_dso_capacity = RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
710 } else {
711 struct dso ** realloced = (struct dso **)realloc(
712 p->reloc_can_search_dso_list,
713 sizeof(struct dso *) * (p->reloc_can_search_dso_capacity + RELOC_CAN_SEARCH_DSO_BASE_CAPACITY));
714 if (!realloced) {
715 return;
716 }
717 p->reloc_can_search_dso_list = realloced;
718 p->reloc_can_search_dso_capacity += RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
719 }
720 }
721 p->reloc_can_search_dso_list[p->reloc_can_search_dso_count] = can_search_so;
722 p->reloc_can_search_dso_count++;
723 }
724
free_reloc_can_search_dso(struct dso * p)725 static void free_reloc_can_search_dso(struct dso *p)
726 {
727 if (p->reloc_can_search_dso_list) {
728 free(p->reloc_can_search_dso_list);
729 p->reloc_can_search_dso_list = NULL;
730 p->reloc_can_search_dso_count = 0;
731 p->reloc_can_search_dso_capacity = 0;
732 }
733 }
734
735 /* The list of so that can be accessed during relocation include:
736 * - The is_global flag of the so is true which means accessible by default.
737 * Global so includes exe, ld preload so and ldso.
738 * - We only check whether ns is accessible for the so if is_reloc_head_so_dep is true.
739 *
740 * How to set is_reloc_head_so_dep:
741 * When dlopen A, we set is_reloc_head_so_dep to true for
742 * all direct and indirect dependent sos of A, including A itself. */
add_can_search_so_list_in_dso(struct dso * dso_relocating,struct dso * start_check_dso)743 static void add_can_search_so_list_in_dso(struct dso *dso_relocating, struct dso *start_check_dso) {
744 struct dso *p = start_check_dso;
745 for (; p; p = p->syms_next) {
746 if (p->is_global) {
747 add_reloc_can_search_dso(dso_relocating, p);
748 continue;
749 }
750
751 if (p->is_reloc_head_so_dep) {
752 if (dso_relocating->namespace && check_sym_accessible(p, dso_relocating->namespace)) {
753 add_reloc_can_search_dso(dso_relocating, p);
754 }
755 }
756 }
757
758 return;
759 }
760
761 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
762 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
763
764 #ifndef ARCH_SYM_REJECT_UND
765 #define ARCH_SYM_REJECT_UND(s) 0
766 #endif
767
768 #if defined(__GNUC__)
769 __attribute__((always_inline))
770 #endif
771
find_sym_impl(struct dso * dso,struct verinfo * verinfo,struct sym_info_pair s_info_g,int need_def,ns_t * ns)772 struct symdef find_sym_impl(
773 struct dso *dso, struct verinfo *verinfo, struct sym_info_pair s_info_g, int need_def, ns_t *ns)
774 {
775 Sym *sym;
776 struct sym_info_pair s_info_s = {0, 0};
777 uint32_t *ght;
778 uint32_t gh = s_info_g.sym_h;
779 uint32_t gho = gh / (8 * sizeof(size_t));
780 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
781 struct symdef def = {0};
782 if (ns && !check_sym_accessible(dso, ns))
783 return def;
784
785 if ((ght = dso->ghashtab)) {
786 const size_t *bloomwords = (const void *)(ght + 4);
787 size_t f = bloomwords[gho & (ght[2] - 1)];
788 if (!(f & ghm))
789 return def;
790
791 f >>= (gh >> ght[3]) % (8 * sizeof f);
792 if (!(f & 1))
793 return def;
794
795 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
796 } else {
797 if (!s_info_s.sym_h)
798 s_info_s = sysv_hash(verinfo->s);
799
800 sym = sysv_lookup(verinfo, s_info_s, dso);
801 }
802
803 if (!sym)
804 return def;
805
806 if (!sym->st_shndx)
807 if (need_def || (sym->st_info & 0xf) == STT_TLS || ARCH_SYM_REJECT_UND(sym))
808 return def;
809
810 if (!sym->st_value)
811 if ((sym->st_info & 0xf) != STT_TLS)
812 return def;
813
814 if (!(1 << (sym->st_info & 0xf) & OK_TYPES))
815 return def;
816
817 if (!(1 << (sym->st_info >> 4) & OK_BINDS))
818 return def;
819
820 def.sym = sym;
821 def.dso = dso;
822 return def;
823 }
824
find_sym2(struct dso * dso,struct verinfo * verinfo,int need_def,int use_deps,ns_t * ns)825 static inline struct symdef find_sym2(struct dso *dso, struct verinfo *verinfo, int need_def, int use_deps, ns_t *ns)
826 {
827 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
828 struct sym_info_pair s_info_s = {0, 0};
829 uint32_t gh = s_info_g.sym_h, gho = gh / (8*sizeof(size_t)), *ght;
830 size_t ghm = 1ul << gh % (8*sizeof(size_t));
831 struct symdef def = {0};
832 struct dso **deps = use_deps ? dso->deps : 0;
833 for (; dso; dso=use_deps ? *deps++ : dso->syms_next) {
834 Sym *sym;
835 if (ns && !check_sym_accessible(dso, ns)) {
836 continue;
837 }
838 if ((ght = dso->ghashtab)) {
839 GNU_HASH_FILTER(ght, ghm, gho)
840 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
841 } else {
842 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
843 sym = sysv_lookup(verinfo, s_info_s, dso);
844 }
845
846 if (!sym) continue;
847 if (!sym->st_shndx)
848 if (need_def || (sym->st_info&0xf) == STT_TLS
849 || ARCH_SYM_REJECT_UND(sym))
850 continue;
851 if (!sym->st_value)
852 if ((sym->st_info&0xf) != STT_TLS)
853 continue;
854 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
855 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
856 def.sym = sym;
857 def.dso = dso;
858 break;
859 }
860 return def;
861 }
862
find_sym_by_deps(struct dso * dso,struct verinfo * verinfo,int need_def,ns_t * ns)863 static inline struct symdef find_sym_by_deps(struct dso *dso, struct verinfo *verinfo, int need_def, ns_t *ns)
864 {
865 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
866 struct sym_info_pair s_info_s = {0, 0};
867 uint32_t h = 0, gh = s_info_g.sym_h, gho = gh / (8*sizeof(size_t)), *ght;
868 size_t ghm = 1ul << gh % (8*sizeof(size_t));
869 struct symdef def = {0};
870 struct dso **deps = dso->deps;
871 for (; dso; dso=*deps++) {
872 Sym *sym;
873 if (!is_dso_accessible(dso, ns)) {
874 continue;
875 }
876 if ((ght = dso->ghashtab)) {
877 GNU_HASH_FILTER(ght, ghm, gho)
878 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
879 } else {
880 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
881 sym = sysv_lookup(verinfo, s_info_s, dso);
882 }
883
884 if (!sym) continue;
885 if (!sym->st_shndx)
886 if (need_def || (sym->st_info&0xf) == STT_TLS
887 || ARCH_SYM_REJECT_UND(sym))
888 continue;
889 if (!sym->st_value)
890 if ((sym->st_info&0xf) != STT_TLS)
891 continue;
892 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
893 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
894 def.sym = sym;
895 def.dso = dso;
896 break;
897 }
898 return def;
899 }
900
find_sym_by_saved_so_list(int sym_type,struct dso * dso,struct verinfo * verinfo,int need_def,struct dso * dso_relocating)901 static inline struct symdef find_sym_by_saved_so_list(
902 int sym_type, struct dso *dso, struct verinfo *verinfo, int need_def, struct dso *dso_relocating)
903 {
904 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
905 struct sym_info_pair s_info_s = {0, 0};
906 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
907 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
908 struct symdef def = {0};
909 // skip head dso.
910 int start_search_index = sym_type==REL_COPY ? 1 : 0;
911 struct dso *dso_searching = 0;
912 for (int i = start_search_index; i < dso_relocating->reloc_can_search_dso_count; i++) {
913 dso_searching = dso_relocating->reloc_can_search_dso_list[i];
914 Sym *sym;
915 if ((ght = dso_searching->ghashtab)) {
916 GNU_HASH_FILTER(ght, ghm, gho)
917 sym = gnu_lookup(s_info_g, ght, dso_searching, verinfo);
918 } else {
919 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
920 sym = sysv_lookup(verinfo, s_info_s, dso_searching);
921 }
922 if (!sym) continue;
923 if (!sym->st_shndx)
924 if (need_def || (sym->st_info&0xf) == STT_TLS
925 || ARCH_SYM_REJECT_UND(sym))
926 continue;
927 if (!sym->st_value)
928 if ((sym->st_info&0xf) != STT_TLS)
929 continue;
930 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
931 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
932 def.sym = sym;
933 def.dso = dso_searching;
934 break;
935 }
936 return def;
937 }
938
find_sym(struct dso * dso,const char * s,int need_def)939 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
940 {
941 struct verinfo verinfo = { .s = s, .v = "", .use_vna_hash = false };
942 return find_sym2(dso, &verinfo, need_def, 0, NULL);
943 }
944
get_vna_hash(struct dso * dso,int sym_index,uint32_t * vna_hash)945 static bool get_vna_hash(struct dso *dso, int sym_index, uint32_t *vna_hash)
946 {
947 if (!dso->versym || !dso->verneed) {
948 return false;
949 }
950
951 uint16_t vsym = dso->versym[sym_index];
952 if (vsym == VER_NDX_LOCAL || vsym == VER_NDX_GLOBAL) {
953 return false;
954 }
955
956 bool result = false;
957 Verneed *verneed = dso->verneed;
958 Vernaux *vernaux;
959 vsym &= 0x7fff;
960
961 for(;;) {
962 vernaux = (Vernaux *)((char *)verneed + verneed->vn_aux);
963
964 for (size_t cnt = 0; cnt < verneed->vn_cnt; cnt++) {
965 if ((vernaux->vna_other & 0x7fff) == vsym) {
966 result = true;
967 *vna_hash = vernaux->vna_hash;
968 break;
969 }
970
971 vernaux = (Vernaux *)((char *)vernaux + vernaux->vna_next);
972 }
973
974 if (result) {
975 break;
976 }
977
978 if (verneed->vn_next == 0) {
979 break;
980 }
981
982 verneed = (Verneed *)((char *)verneed + verneed->vn_next);
983 }
984 return result;
985 }
986
get_verinfo(struct dso * dso,int sym_index,struct verinfo * vinfo)987 static void get_verinfo(struct dso *dso, int sym_index, struct verinfo *vinfo)
988 {
989 char *strings = dso->strings;
990 // try to get version number from .gnu.version
991 int16_t vsym = dso->versym[sym_index];
992 Verdef *verdef = dso->verdef;
993 vsym &= 0x7fff;
994 if (!verdef) {
995 return;
996 }
997 int version_found = 0;
998 for (;;) {
999 if (!verdef) {
1000 break;
1001 }
1002 if (!(verdef->vd_flags & VER_FLG_BASE) && (verdef->vd_ndx & 0x7fff) == vsym) {
1003 version_found = 1;
1004 break;
1005 }
1006 if (verdef->vd_next == 0) {
1007 break;
1008 }
1009 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
1010 }
1011 if (version_found) {
1012 Verdaux *aux = (Verdaux *)((char *)verdef + verdef->vd_aux);
1013 if (aux && aux->vda_name && strings && (dso->strings + aux->vda_name)) {
1014 vinfo->v = dso->strings + aux->vda_name;
1015 }
1016 }
1017 }
1018
do_relocs(struct dso * dso,size_t * rel,size_t rel_size,size_t stride)1019 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
1020 {
1021 unsigned char *base = dso->base;
1022 Sym *syms = dso->syms;
1023 char *strings = dso->strings;
1024 Sym *sym;
1025 const char *name;
1026 void *ctx;
1027 int type;
1028 int sym_index;
1029 struct symdef def;
1030 size_t *reloc_addr;
1031 size_t sym_val;
1032 size_t tls_val;
1033 size_t addend;
1034 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
1035
1036 if (dso == &ldso) {
1037 /* Only ldso's REL table needs addend saving/reuse. */
1038 if (rel == apply_addends_to)
1039 reuse_addends = 1;
1040 skip_relative = 1;
1041 }
1042
1043 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
1044 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue;
1045 type = R_TYPE(rel[1]);
1046 if (type == REL_NONE) continue;
1047 reloc_addr = laddr(dso, rel[0]);
1048
1049 if (stride > 2) {
1050 addend = rel[2];
1051 } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) {
1052 addend = 0;
1053 } else if (reuse_addends) {
1054 /* Save original addend in stage 2 where the dso
1055 * chain consists of just ldso; otherwise read back
1056 * saved addend since the inline one was clobbered. */
1057 if (head==&ldso)
1058 saved_addends[save_slot] = *reloc_addr;
1059 addend = saved_addends[save_slot++];
1060 } else {
1061 addend = *reloc_addr;
1062 }
1063
1064 sym_index = R_SYM(rel[1]);
1065 if (sym_index) {
1066 sym = syms + sym_index;
1067 name = strings + sym->st_name;
1068 ctx = type==REL_COPY ? head->syms_next : head;
1069 struct verinfo vinfo = { .s = name, .v = ""};
1070
1071 vinfo.use_vna_hash = get_vna_hash(dso, sym_index, &vinfo.vna_hash);
1072 if (!vinfo.use_vna_hash && dso->versym && (dso->versym[sym_index] & 0x7fff) >= 0) {
1073 get_verinfo(dso, sym_index, &vinfo);
1074 }
1075 if (dso->cache_sym_index == sym_index) {
1076 def = (struct symdef){ .dso = dso->cache_dso, .sym = dso->cache_sym };
1077 } else {
1078 def = (sym->st_info>>4) == STB_LOCAL
1079 ? (struct symdef){ .dso = dso, .sym = sym }
1080 : dso != &ldso ? find_sym_by_saved_so_list(type, ctx, &vinfo, type==REL_PLT, dso)
1081 : find_sym2(ctx, &vinfo, type==REL_PLT, 0, dso->namespace);
1082 dso->cache_sym_index = sym_index;
1083 dso->cache_dso = def.dso;
1084 dso->cache_sym = def.sym;
1085 }
1086
1087 if (!def.sym && (sym->st_shndx != SHN_UNDEF
1088 || sym->st_info>>4 != STB_WEAK)) {
1089 if (dso->lazy && (type==REL_PLT || type==REL_GOT)) {
1090 dso->lazy[3*dso->lazy_cnt+0] = rel[0];
1091 dso->lazy[3*dso->lazy_cnt+1] = rel[1];
1092 dso->lazy[3*dso->lazy_cnt+2] = addend;
1093 dso->lazy_cnt++;
1094 continue;
1095 }
1096 LD_LOGE("relocating failed: symbol not found. "
1097 "dso=%{public}s s=%{public}s use_vna_hash=%{public}d van_hash=%{public}x",
1098 dso->name, name, vinfo.use_vna_hash, vinfo.vna_hash);
1099 error("Error relocating %s: %s: symbol not found",
1100 dso->name, name);
1101 if (runtime) longjmp(*rtld_fail, 1);
1102 continue;
1103 }
1104 } else {
1105 sym = 0;
1106 def.sym = 0;
1107 def.dso = dso;
1108 }
1109
1110 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0;
1111 tls_val = def.sym ? def.sym->st_value : 0;
1112
1113 if ((type == REL_TPOFF || type == REL_TPOFF_NEG)
1114 && def.dso->tls_id > static_tls_cnt) {
1115 error("Error relocating %s: %s: initial-exec TLS "
1116 "resolves to dynamic definition in %s",
1117 dso->name, name, def.dso->name);
1118 longjmp(*rtld_fail, 1);
1119 }
1120
1121 switch(type) {
1122 case REL_OFFSET:
1123 addend -= (size_t)reloc_addr;
1124 case REL_SYMBOLIC:
1125 case REL_GOT:
1126 case REL_PLT:
1127 *reloc_addr = sym_val + addend;
1128 break;
1129 case REL_USYMBOLIC:
1130 memcpy(reloc_addr, &(size_t){sym_val + addend}, sizeof(size_t));
1131 break;
1132 case REL_RELATIVE:
1133 *reloc_addr = (size_t)base + addend;
1134 break;
1135 case REL_SYM_OR_REL:
1136 if (sym) *reloc_addr = sym_val + addend;
1137 else *reloc_addr = (size_t)base + addend;
1138 break;
1139 case REL_COPY:
1140 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
1141 break;
1142 case REL_OFFSET32:
1143 *(uint32_t *)reloc_addr = sym_val + addend
1144 - (size_t)reloc_addr;
1145 break;
1146 case REL_FUNCDESC:
1147 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
1148 + (def.sym - def.dso->syms)) : 0;
1149 break;
1150 case REL_FUNCDESC_VAL:
1151 if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val;
1152 else *reloc_addr = sym_val;
1153 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
1154 break;
1155 case REL_DTPMOD:
1156 *reloc_addr = def.dso->tls_id;
1157 break;
1158 case REL_DTPOFF:
1159 *reloc_addr = tls_val + addend - DTP_OFFSET;
1160 break;
1161 #ifdef TLS_ABOVE_TP
1162 case REL_TPOFF:
1163 *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
1164 break;
1165 #else
1166 case REL_TPOFF:
1167 *reloc_addr = tls_val - def.dso->tls.offset + addend;
1168 break;
1169 case REL_TPOFF_NEG:
1170 *reloc_addr = def.dso->tls.offset - tls_val + addend;
1171 break;
1172 #endif
1173 case REL_TLSDESC:
1174 if (stride<3) addend = reloc_addr[1];
1175 if (def.dso->tls_id > static_tls_cnt) {
1176 struct td_index *new = malloc(sizeof *new);
1177 if (!new) {
1178 error(
1179 "Error relocating %s: cannot allocate TLSDESC for %s",
1180 dso->name, sym ? name : "(local)" );
1181 longjmp(*rtld_fail, 1);
1182 }
1183 new->next = dso->td_index;
1184 dso->td_index = new;
1185 new->args[0] = def.dso->tls_id;
1186 new->args[1] = tls_val + addend - DTP_OFFSET;
1187 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
1188 reloc_addr[1] = (size_t)new;
1189 } else {
1190 reloc_addr[0] = (size_t)__tlsdesc_static;
1191 #ifdef TLS_ABOVE_TP
1192 reloc_addr[1] = tls_val + def.dso->tls.offset
1193 + TPOFF_K + addend;
1194 #else
1195 reloc_addr[1] = tls_val - def.dso->tls.offset
1196 + addend;
1197 #endif
1198 }
1199 #ifdef TLSDESC_BACKWARDS
1200 /* Some archs (32-bit ARM at least) invert the order of
1201 * the descriptor members. Fix them up here. */
1202 size_t tmp = reloc_addr[0];
1203 reloc_addr[0] = reloc_addr[1];
1204 reloc_addr[1] = tmp;
1205 #endif
1206 break;
1207 default:
1208 error("Error relocating %s: unsupported relocation type %d",
1209 dso->name, type);
1210 if (runtime) longjmp(*rtld_fail, 1);
1211 continue;
1212 }
1213 }
1214 }
1215
redo_lazy_relocs()1216 static void redo_lazy_relocs()
1217 {
1218 struct dso *p = lazy_head, *next;
1219 lazy_head = 0;
1220 for (; p; p=next) {
1221 next = p->lazy_next;
1222 size_t size = p->lazy_cnt*3*sizeof(size_t);
1223 p->lazy_cnt = 0;
1224 do_relocs(p, p->lazy, size, 3);
1225 if (p->lazy_cnt) {
1226 p->lazy_next = lazy_head;
1227 lazy_head = p;
1228 } else {
1229 free(p->lazy);
1230 p->lazy = 0;
1231 p->lazy_next = 0;
1232 }
1233 }
1234 }
1235
1236 /* A huge hack: to make up for the wastefulness of shared libraries
1237 * needing at least a page of dirty memory even if they have no global
1238 * data, we reclaim the gaps at the beginning and end of writable maps
1239 * and "donate" them to the heap. */
1240
reclaim(struct dso * dso,size_t start,size_t end)1241 static void reclaim(struct dso *dso, size_t start, size_t end)
1242 {
1243 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
1244 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
1245 if (start >= end) return;
1246 char *base = laddr_pg(dso, start);
1247 __malloc_donate(base, base+(end-start));
1248 }
1249
reclaim_gaps(struct dso * dso)1250 static void reclaim_gaps(struct dso *dso)
1251 {
1252 Phdr *ph = dso->phdr;
1253 size_t phcnt = dso->phnum;
1254
1255 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
1256 if (ph->p_type!=PT_LOAD) continue;
1257 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
1258 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
1259 reclaim(dso, ph->p_vaddr+ph->p_memsz,
1260 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
1261 }
1262 }
1263
read_loop(int fd,void * p,size_t n)1264 static ssize_t read_loop(int fd, void *p, size_t n)
1265 {
1266 for (size_t i=0; i<n; ) {
1267 ssize_t l = read(fd, (char *)p+i, n-i);
1268 if (l<0) {
1269 if (errno==EINTR) continue;
1270 else return -1;
1271 }
1272 if (l==0) return i;
1273 i += l;
1274 }
1275 return n;
1276 }
1277
mmap_fixed(void * p,size_t n,int prot,int flags,int fd,off_t off)1278 UT_STATIC void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
1279 {
1280 static int no_map_fixed;
1281 char *q;
1282 if (!n) return p;
1283 if (!no_map_fixed) {
1284 q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
1285 if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
1286 return q;
1287 no_map_fixed = 1;
1288 }
1289 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
1290 if (flags & MAP_ANONYMOUS) {
1291 memset(p, 0, n);
1292 return p;
1293 }
1294 ssize_t r;
1295 if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
1296 for (q=p; n; q+=r, off+=r, n-=r) {
1297 r = read(fd, q, n);
1298 if (r < 0 && errno != EINTR) return MAP_FAILED;
1299 if (!r) {
1300 memset(q, 0, n);
1301 break;
1302 }
1303 }
1304 return p;
1305 }
1306
unmap_library(struct dso * dso)1307 UT_STATIC void unmap_library(struct dso *dso)
1308 {
1309 if (dso->loadmap) {
1310 size_t i;
1311 for (i=0; i<dso->loadmap->nsegs; i++) {
1312 if (!dso->loadmap->segs[i].p_memsz)
1313 continue;
1314 if (!is_dlclose_debug_enable()) {
1315 munmap((void *)dso->loadmap->segs[i].addr,
1316 dso->loadmap->segs[i].p_memsz);
1317 } else {
1318 (void)mprotect((void *)dso->loadmap->segs[i].addr,
1319 dso->loadmap->segs[i].p_memsz, PROT_NONE);
1320 }
1321 }
1322 free(dso->loadmap);
1323 } else if (dso->map && dso->map_len) {
1324 if (!is_dlclose_debug_enable()) {
1325 munmap(dso->map, dso->map_len);
1326 } else {
1327 mprotect(dso->map, dso->map_len, PROT_NONE);
1328 }
1329 }
1330 }
1331
get_random(void * buf,size_t buflen)1332 UT_STATIC bool get_random(void *buf, size_t buflen)
1333 {
1334 int ret;
1335 int fd = open("/dev/urandom", O_RDONLY);
1336 if (fd < 0) {
1337 return false;
1338 }
1339
1340 ret = read(fd, buf, buflen);
1341 if (ret < 0) {
1342 close(fd);
1343 return false;
1344 }
1345
1346 close(fd);
1347 return true;
1348 }
1349
fill_random_data(void * buf,size_t buflen)1350 UT_STATIC void fill_random_data(void *buf, size_t buflen)
1351 {
1352 uint64_t x;
1353 int i;
1354 int pos = 0;
1355 struct timespec ts;
1356 /* Try to use urandom to get the random number first */
1357 if (!get_random(buf, buflen)) {
1358 /* Can't get random number from /dev/urandom, generate from addr based on ASLR and time */
1359 for (i = 1; i <= (buflen / sizeof(x)); i++) {
1360 (void)clock_gettime(CLOCK_REALTIME, &ts);
1361 x = (((uint64_t)get_random) << 32) ^ (uint64_t)fill_random_data ^ ts.tv_nsec;
1362 memcpy((char *)buf + pos, &x, sizeof(x));
1363 pos += sizeof(x);
1364 }
1365 }
1366 return;
1367 }
1368
get_transparent_hugepages_supported(void)1369 static bool get_transparent_hugepages_supported(void)
1370 {
1371 int fd = -1;
1372 ssize_t read_size = 0;
1373 bool enable = false;
1374 char buf[HUGEPAGES_SUPPORTED_STR_SIZE] = {'0'};
1375
1376 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
1377 if (fd < 0)
1378 goto done;
1379
1380 read_size = read(fd, buf, HUGEPAGES_SUPPORTED_STR_SIZE - 1);
1381 if (read_size < 0)
1382 goto close_fd;
1383
1384 buf[HUGEPAGES_SUPPORTED_STR_SIZE - 1] = '\0';
1385 if (strstr(buf, "[never]") == NULL)
1386 enable = true;
1387
1388 close_fd:
1389 close(fd);
1390 done:
1391 return enable;
1392 }
1393
phdr_table_get_maxinum_alignment(Phdr * phdr_table,size_t phdr_count)1394 static size_t phdr_table_get_maxinum_alignment(Phdr *phdr_table, size_t phdr_count)
1395 {
1396 #if defined(__LP64__)
1397 size_t maxinum_alignment = PAGE_SIZE;
1398 size_t i = 0;
1399
1400 for (i = 0; i < phdr_count; ++i) {
1401 const Phdr *phdr = &phdr_table[i];
1402
1403 /* p_align must be 0, 1, or a positive, integral power of two */
1404 if ((phdr->p_type != PT_LOAD) || ((phdr->p_align & (phdr->p_align - 1)) != 0))
1405 continue;
1406
1407 if (phdr->p_align > maxinum_alignment)
1408 maxinum_alignment = phdr->p_align;
1409 }
1410
1411 return maxinum_alignment;
1412 #else
1413 return PAGE_SIZE;
1414 #endif
1415 }
1416
map_library(int fd,struct dso * dso,struct reserved_address_params * reserved_params)1417 UT_STATIC void *map_library(int fd, struct dso *dso, struct reserved_address_params *reserved_params)
1418 {
1419 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
1420 void *allocated_buf=0;
1421 size_t phsize;
1422 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
1423 size_t this_min, this_max;
1424 size_t nsegs = 0;
1425 off_t off_start;
1426 Ehdr *eh;
1427 Phdr *ph, *ph0;
1428 unsigned prot;
1429 unsigned char *map=MAP_FAILED, *base;
1430 size_t dyn=0;
1431 size_t tls_image=0;
1432 size_t i;
1433 int map_flags = MAP_PRIVATE;
1434 size_t start_addr;
1435 size_t start_alignment = PAGE_SIZE;
1436 bool hugepage_enabled = false;
1437
1438 ssize_t l = read(fd, buf, sizeof buf);
1439 eh = buf;
1440 if (l<0) return 0;
1441 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
1442 goto noexec;
1443 phsize = eh->e_phentsize * eh->e_phnum;
1444 if (phsize > sizeof buf - sizeof *eh) {
1445 allocated_buf = malloc(phsize);
1446 if (!allocated_buf) return 0;
1447 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
1448 if (l < 0) goto error;
1449 if (l != phsize) goto noexec;
1450 ph = ph0 = allocated_buf;
1451 } else if (eh->e_phoff + phsize > l) {
1452 l = pread(fd, buf+1, phsize, eh->e_phoff);
1453 if (l < 0) goto error;
1454 if (l != phsize) goto noexec;
1455 ph = ph0 = (void *)(buf + 1);
1456 } else {
1457 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
1458 }
1459 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1460 if (ph->p_type == PT_DYNAMIC) {
1461 dyn = ph->p_vaddr;
1462 } else if (ph->p_type == PT_TLS) {
1463 tls_image = ph->p_vaddr;
1464 dso->tls.align = ph->p_align;
1465 dso->tls.len = ph->p_filesz;
1466 dso->tls.size = ph->p_memsz;
1467 } else if (ph->p_type == PT_GNU_RELRO) {
1468 dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
1469 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
1470 } else if (ph->p_type == PT_GNU_STACK) {
1471 if (!runtime && ph->p_memsz > __default_stacksize) {
1472 __default_stacksize =
1473 ph->p_memsz < DEFAULT_STACK_MAX ?
1474 ph->p_memsz : DEFAULT_STACK_MAX;
1475 }
1476 }
1477 if (ph->p_type != PT_LOAD) continue;
1478 nsegs++;
1479 if (ph->p_vaddr < addr_min) {
1480 addr_min = ph->p_vaddr;
1481 off_start = ph->p_offset;
1482 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1483 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1484 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1485 }
1486 if (ph->p_vaddr+ph->p_memsz > addr_max) {
1487 addr_max = ph->p_vaddr+ph->p_memsz;
1488 }
1489 }
1490 if (!dyn) goto noexec;
1491 if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
1492 dso->loadmap = calloc(1, sizeof *dso->loadmap
1493 + nsegs * sizeof *dso->loadmap->segs);
1494 if (!dso->loadmap) goto error;
1495 dso->loadmap->nsegs = nsegs;
1496 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
1497 if (ph->p_type != PT_LOAD) continue;
1498 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1499 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1500 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1501 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
1502 prot, MAP_PRIVATE,
1503 fd, ph->p_offset & -PAGE_SIZE);
1504 if (map == MAP_FAILED) {
1505 unmap_library(dso);
1506 goto error;
1507 }
1508 dso->loadmap->segs[i].addr = (size_t)map +
1509 (ph->p_vaddr & PAGE_SIZE-1);
1510 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
1511 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
1512 i++;
1513 if (prot & PROT_WRITE) {
1514 size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
1515 + ph->p_filesz;
1516 size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
1517 size_t pgend = brk + ph->p_memsz - ph->p_filesz
1518 + PAGE_SIZE-1 & -PAGE_SIZE;
1519 if (pgend > pgbrk && mmap_fixed(map+pgbrk,
1520 pgend-pgbrk, prot,
1521 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
1522 -1, off_start) == MAP_FAILED)
1523 goto error;
1524 memset(map + brk, 0, pgbrk-brk);
1525 }
1526 }
1527 map = (void *)dso->loadmap->segs[0].addr;
1528 map_len = 0;
1529 goto done_mapping;
1530 }
1531 addr_max += PAGE_SIZE-1;
1532 addr_max &= -PAGE_SIZE;
1533 addr_min &= -PAGE_SIZE;
1534 off_start &= -PAGE_SIZE;
1535 map_len = addr_max - addr_min + off_start;
1536 start_addr = addr_min;
1537
1538 hugepage_enabled = get_transparent_hugepages_supported();
1539 if (hugepage_enabled) {
1540 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(ph0, eh->e_phnum);
1541
1542 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
1543 }
1544
1545 if (reserved_params) {
1546 if (map_len > reserved_params->reserved_size) {
1547 if (reserved_params->must_use_reserved) {
1548 goto error;
1549 }
1550 } else {
1551 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
1552 map_flags |= MAP_FIXED;
1553 }
1554 }
1555
1556 /* we will find a mapping_align aligned address as the start of dso
1557 * so we need a tmp_map_len as map_len + mapping_align to make sure
1558 * we have enough space to shift the dso to the correct location. */
1559 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
1560 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
1561
1562 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
1563 if (reserved_params) {
1564 map = DL_NOMMU_SUPPORT
1565 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1566 : mmap((void *)start_addr, map_len, prot, map_flags, fd, off_start);
1567 if (map == MAP_FAILED) {
1568 goto error;
1569 }
1570 if (reserved_params && map_len < reserved_params->reserved_size) {
1571 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
1572 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
1573 }
1574 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
1575 } else {
1576 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
1577 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1578 if (temp_map == MAP_FAILED) {
1579 goto error;
1580 }
1581
1582 /* find the mapping_align aligned address */
1583 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
1584
1585 /* mummap the space we mmap before so that we can mmap correct space again */
1586 munmap(temp_map, tmp_map_len);
1587
1588 map = DL_NOMMU_SUPPORT
1589 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1590 /* use map_len to mmap correct space for the dso with file mapping */
1591 : mmap(real_map, map_len, prot, map_flags, fd, off_start);
1592 if (map == MAP_FAILED) {
1593 goto error;
1594 }
1595 }
1596 dso->map = map;
1597 dso->map_len = map_len;
1598 /* If the loaded file is not relocatable and the requested address is
1599 * not available, then the load operation must fail. */
1600 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
1601 errno = EBUSY;
1602 goto error;
1603 }
1604 base = map - addr_min;
1605 dso->phdr = 0;
1606 dso->phnum = 0;
1607 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1608 if (ph->p_type == PT_OHOS_RANDOMDATA) {
1609 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
1610 continue;
1611 }
1612 if (ph->p_type != PT_LOAD) continue;
1613 /* Check if the programs headers are in this load segment, and
1614 * if so, record the address for use by dl_iterate_phdr. */
1615 if (!dso->phdr && eh->e_phoff >= ph->p_offset
1616 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
1617 dso->phdr = (void *)(base + ph->p_vaddr
1618 + (eh->e_phoff-ph->p_offset));
1619 dso->phnum = eh->e_phnum;
1620 dso->phentsize = eh->e_phentsize;
1621 }
1622 this_min = ph->p_vaddr & -PAGE_SIZE;
1623 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
1624 off_start = ph->p_offset & -PAGE_SIZE;
1625 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1626 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1627 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1628 /* Reuse the existing mapping for the lowest-address LOAD */
1629 if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
1630 if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
1631 goto error;
1632 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
1633 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
1634 if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
1635 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
1636 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
1637 size_t zeromap_size = (size_t)base+this_max-pgbrk;
1638 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
1639 if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, zeromap_size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
1640 goto error;
1641 set_bss_vma_name(dso->name, (void *)pgbrk, zeromap_size);
1642 }
1643 }
1644 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
1645 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
1646 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
1647 && errno != ENOSYS)
1648 goto error;
1649 break;
1650 }
1651 done_mapping:
1652 dso->base = base;
1653 dso->dynv = laddr(dso, dyn);
1654 if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
1655 free(allocated_buf);
1656 return map;
1657 noexec:
1658 errno = ENOEXEC;
1659 error:
1660 if (map!=MAP_FAILED) unmap_library(dso);
1661 free(allocated_buf);
1662 return 0;
1663 }
1664
path_open(const char * name,const char * s,char * buf,size_t buf_size)1665 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
1666 {
1667 size_t l;
1668 int fd;
1669 for (;;) {
1670 s += strspn(s, ":\n");
1671 l = strcspn(s, ":\n");
1672 if (l-1 >= INT_MAX) return -1;
1673 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
1674 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
1675 switch (errno) {
1676 case ENOENT:
1677 case ENOTDIR:
1678 case EACCES:
1679 case ENAMETOOLONG:
1680 break;
1681 default:
1682 /* Any negative value but -1 will inhibit
1683 * futher path search. */
1684 return -2;
1685 }
1686 }
1687 s += l;
1688 }
1689 }
1690
fixup_rpath(struct dso * p,char * buf,size_t buf_size)1691 UT_STATIC int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
1692 {
1693 size_t n, l;
1694 const char *s, *t, *origin;
1695 char *d;
1696 if (p->rpath || !p->rpath_orig) return 0;
1697 if (!strchr(p->rpath_orig, '$')) {
1698 p->rpath = p->rpath_orig;
1699 return 0;
1700 }
1701 n = 0;
1702 s = p->rpath_orig;
1703 while ((t=strchr(s, '$'))) {
1704 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
1705 return 0;
1706 s = t+1;
1707 n++;
1708 }
1709 if (n > SSIZE_MAX/PATH_MAX) return 0;
1710
1711 if (p->kernel_mapped) {
1712 /* $ORIGIN searches cannot be performed for the main program
1713 * when it is suid/sgid/AT_SECURE. This is because the
1714 * pathname is under the control of the caller of execve.
1715 * For libraries, however, $ORIGIN can be processed safely
1716 * since the library's pathname came from a trusted source
1717 * (either system paths or a call to dlopen). */
1718 if (libc.secure)
1719 return 0;
1720 l = readlink("/proc/self/exe", buf, buf_size);
1721 if (l == -1) switch (errno) {
1722 case ENOENT:
1723 case ENOTDIR:
1724 case EACCES:
1725 break;
1726 default:
1727 return -1;
1728 }
1729 if (l >= buf_size)
1730 return 0;
1731 buf[l] = 0;
1732 origin = buf;
1733 } else {
1734 origin = p->name;
1735 }
1736 t = strrchr(origin, '/');
1737 if (t) {
1738 l = t-origin;
1739 } else {
1740 /* Normally p->name will always be an absolute or relative
1741 * pathname containing at least one '/' character, but in the
1742 * case where ldso was invoked as a command to execute a
1743 * program in the working directory, app.name may not. Fix. */
1744 origin = ".";
1745 l = 1;
1746 }
1747 /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */
1748 if (libc.secure && *origin != '/')
1749 return 0;
1750 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
1751 if (!p->rpath) return -1;
1752
1753 d = p->rpath;
1754 s = p->rpath_orig;
1755 while ((t=strchr(s, '$'))) {
1756 memcpy(d, s, t-s);
1757 d += t-s;
1758 memcpy(d, origin, l);
1759 d += l;
1760 /* It was determined previously that the '$' is followed
1761 * either by "ORIGIN" or "{ORIGIN}". */
1762 s = t + 7 + 2*(t[1]=='{');
1763 }
1764 strcpy(d, s);
1765 return 0;
1766 }
1767
decode_dyn(struct dso * p)1768 static void decode_dyn(struct dso *p)
1769 {
1770 size_t dyn[DYN_CNT];
1771 size_t flags1 = 0;
1772 decode_vec(p->dynv, dyn, DYN_CNT);
1773 search_vec(p->dynv, &flags1, DT_FLAGS_1);
1774 if (flags1 & DF_1_GLOBAL) {
1775 LD_LOGI("Add DF_1_GLOBAL for %{public}s", p->name);
1776 p->is_global = true;
1777 }
1778 if (flags1 & DF_1_NODELETE) {
1779 p->flags |= DSO_FLAGS_NODELETE;
1780 }
1781 p->syms = laddr(p, dyn[DT_SYMTAB]);
1782 p->strings = laddr(p, dyn[DT_STRTAB]);
1783 if (dyn[0]&(1<<DT_HASH))
1784 p->hashtab = laddr(p, dyn[DT_HASH]);
1785 if (dyn[0]&(1<<DT_RPATH))
1786 p->rpath_orig = p->strings + dyn[DT_RPATH];
1787 if (dyn[0]&(1<<DT_RUNPATH))
1788 p->rpath_orig = p->strings + dyn[DT_RUNPATH];
1789 if (dyn[0]&(1<<DT_PLTGOT))
1790 p->got = laddr(p, dyn[DT_PLTGOT]);
1791 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
1792 p->ghashtab = laddr(p, *dyn);
1793 if (search_vec(p->dynv, dyn, DT_VERSYM))
1794 p->versym = laddr(p, *dyn);
1795 if (search_vec(p->dynv, dyn, DT_VERDEF))
1796 p->verdef = laddr(p, *dyn);
1797 if (search_vec(p->dynv, dyn, DT_VERNEED))
1798 p->verneed = laddr(p, *dyn);
1799 }
1800
count_syms(struct dso * p)1801 UT_STATIC size_t count_syms(struct dso *p)
1802 {
1803 if (p->hashtab) return p->hashtab[1];
1804
1805 size_t nsym, i;
1806 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1807 uint32_t *hashval;
1808 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
1809 if (buckets[i] > nsym)
1810 nsym = buckets[i];
1811 }
1812 if (nsym) {
1813 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
1814 do nsym++;
1815 while (!(*hashval++ & 1));
1816 }
1817 return nsym;
1818 }
1819
dl_mmap(size_t n)1820 static void *dl_mmap(size_t n)
1821 {
1822 void *p;
1823 int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE;
1824 #ifdef SYS_mmap2
1825 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0);
1826 #else
1827 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0);
1828 #endif
1829 return (unsigned long)p > -4096UL ? 0 : p;
1830 }
1831
makefuncdescs(struct dso * p)1832 static void makefuncdescs(struct dso *p)
1833 {
1834 static int self_done;
1835 size_t nsym = count_syms(p);
1836 size_t i, size = nsym * sizeof(*p->funcdescs);
1837
1838 if (!self_done) {
1839 p->funcdescs = dl_mmap(size);
1840 self_done = 1;
1841 } else {
1842 p->funcdescs = malloc(size);
1843 }
1844 if (!p->funcdescs) {
1845 if (!runtime) a_crash();
1846 error("Error allocating function descriptors for %s", p->name);
1847 longjmp(*rtld_fail, 1);
1848 }
1849 for (i=0; i<nsym; i++) {
1850 if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) {
1851 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value);
1852 p->funcdescs[i].got = p->got;
1853 } else {
1854 p->funcdescs[i].addr = 0;
1855 p->funcdescs[i].got = 0;
1856 }
1857 }
1858 }
1859
get_sys_path(ns_configor * conf)1860 static void get_sys_path(ns_configor *conf)
1861 {
1862 LD_LOGD("get_sys_path g_is_asan:%{public}d", g_is_asan);
1863 /* Use ini file's system paths when Asan is not enabled */
1864 if (!g_is_asan) {
1865 sys_path = conf->get_sys_paths();
1866 } else {
1867 /* Use ini file's asan system paths when the Asan is enabled
1868 * Merge two strings when both sys_paths and asan_sys_paths are valid */
1869 sys_path = conf->get_asan_sys_paths();
1870 char *sys_path_default = conf->get_sys_paths();
1871 if (!sys_path) {
1872 sys_path = sys_path_default;
1873 } else if (sys_path_default) {
1874 size_t newlen = strlen(sys_path) + strlen(sys_path_default) + 2;
1875 char *new_syspath = malloc(newlen);
1876 memset(new_syspath, 0, newlen);
1877 strcpy(new_syspath, sys_path);
1878 strcat(new_syspath, ":");
1879 strcat(new_syspath, sys_path_default);
1880 sys_path = new_syspath;
1881 }
1882 }
1883 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib:/lib64";
1884 LD_LOGD("get_sys_path sys_path:%{public}s", sys_path);
1885 }
1886
search_dso_by_name(const char * name,const ns_t * ns)1887 static struct dso *search_dso_by_name(const char *name, const ns_t *ns) {
1888 LD_LOGD("search_dso_by_name name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
1889 for (size_t i = 0; i < ns->ns_dsos->num; i++){
1890 struct dso *p = ns->ns_dsos->dsos[i];
1891 if (p->shortname && !strcmp(p->shortname, name)) {
1892 LD_LOGD("search_dso_by_name found name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
1893 return p;
1894 }
1895 }
1896 return NULL;
1897 }
1898
search_dso_by_fstat(const struct stat * st,const ns_t * ns,uint64_t file_offset)1899 static struct dso *search_dso_by_fstat(const struct stat *st, const ns_t *ns, uint64_t file_offset) {
1900 LD_LOGD("search_dso_by_fstat ns_name:%{public}s", ns ? ns->ns_name : "NULL");
1901 for (size_t i = 0; i < ns->ns_dsos->num; i++){
1902 struct dso *p = ns->ns_dsos->dsos[i];
1903 if (p->dev == st->st_dev && p->ino == st->st_ino && p->file_offset == file_offset) {
1904 LD_LOGD("search_dso_by_fstat found dev:%{public}lu, ino:%{public}lu, ns_name:%{public}s",
1905 st->st_dev, st->st_ino, ns ? ns->ns_name : "NULL");
1906 return p;
1907 }
1908 }
1909 return NULL;
1910 }
1911
app_has_same_name_so(const char * so_name,const ns_t * ns)1912 static inline int app_has_same_name_so(const char *so_name, const ns_t *ns)
1913 {
1914 int fd = -1;
1915 /* Only check system app. */
1916 if (((ns->flag & LOCAL_NS_PREFERED) != 0) && ns->lib_paths) {
1917 char tmp_buf[PATH_MAX+1];
1918 fd = path_open(so_name, ns->lib_paths, tmp_buf, sizeof tmp_buf);
1919 }
1920 return fd;
1921 }
1922
1923 /* Find loaded so by name */
find_library_by_name(const char * name,const ns_t * ns,bool check_inherited)1924 static struct dso *find_library_by_name(const char *name, const ns_t *ns, bool check_inherited)
1925 {
1926 LD_LOGD("find_library_by_name name:%{public}s, ns_name:%{public}s, check_inherited:%{public}d",
1927 name,
1928 ns ? ns->ns_name : "NULL",
1929 !!check_inherited);
1930 struct dso *p = search_dso_by_name(name, ns);
1931 if (p) return p;
1932 if (check_inherited && ns->ns_inherits) {
1933 for (size_t i = 0; i < ns->ns_inherits->num; i++){
1934 ns_inherit * inherit = ns->ns_inherits->inherits[i];
1935 p = search_dso_by_name(name, inherit->inherited_ns);
1936 if (p && is_sharable(inherit, name)) {
1937 if (app_has_same_name_so(name, ns) != -1) {
1938 return NULL;
1939 }
1940 return p;
1941 }
1942 }
1943 }
1944 return NULL;
1945 }
1946 /* Find loaded so by file stat */
find_library_by_fstat(const struct stat * st,const ns_t * ns,bool check_inherited,uint64_t file_offset)1947 UT_STATIC struct dso *find_library_by_fstat(const struct stat *st, const ns_t *ns, bool check_inherited, uint64_t file_offset) {
1948 LD_LOGD("find_library_by_fstat ns_name:%{public}s, check_inherited :%{public}d",
1949 ns ? ns->ns_name : "NULL",
1950 !!check_inherited);
1951 struct dso *p = search_dso_by_fstat(st, ns, file_offset);
1952 if (p) return p;
1953 if (check_inherited && ns->ns_inherits) {
1954 for (size_t i = 0; i < ns->ns_inherits->num; i++){
1955 ns_inherit *inherit = ns->ns_inherits->inherits[i];
1956 p = search_dso_by_fstat(st, inherit->inherited_ns, file_offset);
1957 if (p && is_sharable(inherit, p->shortname)) return p;
1958 }
1959 }
1960 return NULL;
1961 }
1962
1963 #ifndef LOAD_ORDER_RANDOMIZATION
1964 /* add namespace function */
load_library(const char * name,struct dso * needed_by,ns_t * namespace,bool check_inherited,struct reserved_address_params * reserved_params)1965 struct dso *load_library(
1966 const char *name, struct dso *needed_by, ns_t *namespace, bool check_inherited, struct reserved_address_params *reserved_params)
1967 {
1968 char buf[PATH_MAX+1];
1969 const char *pathname;
1970 unsigned char *map;
1971 struct dso *p, temp_dso = {0};
1972 int fd;
1973 struct stat st;
1974 size_t alloc_size;
1975 int n_th = 0;
1976 int is_self = 0;
1977
1978 if (!*name) {
1979 errno = EINVAL;
1980 return 0;
1981 }
1982
1983 /* Catch and block attempts to reload the implementation itself */
1984 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
1985 static const char reserved[] =
1986 "c.pthread.rt.m.dl.util.xnet.";
1987 const char *rp, *next;
1988 for (rp=reserved; *rp; rp=next) {
1989 next = strchr(rp, '.') + 1;
1990 if (strncmp(name+3, rp, next-rp) == 0)
1991 break;
1992 }
1993 if (*rp) {
1994 if (ldd_mode) {
1995 /* Track which names have been resolved
1996 * and only report each one once. */
1997 static unsigned reported;
1998 unsigned mask = 1U<<(rp-reserved);
1999 if (!(reported & mask)) {
2000 reported |= mask;
2001 dprintf(1, "\t%s => %s (%p)\n",
2002 name, ldso.name,
2003 ldso.base);
2004 }
2005 }
2006 is_self = 1;
2007 }
2008 }
2009 if (!strcmp(name, ldso.name)) is_self = 1;
2010 if (is_self) {
2011 if (!ldso.prev) {
2012 tail->next = &ldso;
2013 ldso.prev = tail;
2014 tail = &ldso;
2015 ldso.namespace = namespace;
2016 ns_add_dso(namespace, &ldso);
2017 }
2018 return &ldso;
2019 }
2020 if (strchr(name, '/')) {
2021 pathname = name;
2022
2023 if (!is_accessible(namespace, pathname, g_is_asan, check_inherited)) {
2024 fd = -1;
2025 LD_LOGD("load_library is_accessible return false,fd = -1");
2026 } else {
2027 fd = open(name, O_RDONLY|O_CLOEXEC);
2028 LD_LOGD("load_library is_accessible return true, open file fd:%{public}d .", fd);
2029 }
2030 } else {
2031 /* Search for the name to see if it's already loaded */
2032 /* Search in namespace */
2033 p = find_library_by_name(name, namespace, check_inherited);
2034 if (p) {
2035 LD_LOGD("load_library find_library_by_name found p, return it!");
2036 return p;
2037 }
2038 if (strlen(name) > NAME_MAX) {
2039 LD_LOGE("load_library name exceeding the maximum length, return 0!");
2040 return 0;
2041 }
2042 fd = -1;
2043 if (namespace->env_paths) fd = path_open(name, namespace->env_paths, buf, sizeof buf);
2044 for (p = needed_by; fd == -1 && p; p = p->needed_by) {
2045 if (fixup_rpath(p, buf, sizeof buf) < 0) {
2046 LD_LOGD("load_library Inhibit further search,fd = -2.");
2047 fd = -2; /* Inhibit further search. */
2048 }
2049 if (p->rpath) {
2050 fd = path_open(name, p->rpath, buf, sizeof buf);
2051 LD_LOGD("load_library p->rpath path_open fd:%{public}d.", fd);
2052 }
2053
2054 }
2055 if (g_is_asan) {
2056 fd = handle_asan_path_open(fd, name, namespace, buf, sizeof buf);
2057 LD_LOGD("load_library handle_asan_path_open fd:%{public}d.", fd);
2058 } else {
2059 if (fd == -1 && namespace->lib_paths) {
2060 fd = path_open(name, namespace->lib_paths, buf, sizeof buf);
2061 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", fd);
2062 }
2063 }
2064 pathname = buf;
2065 LD_LOGD("load_library lib_paths pathname:%{public}s.", pathname);
2066 }
2067 if (fd < 0) {
2068 if (!check_inherited || !namespace->ns_inherits) return 0;
2069 /* Load lib in inherited namespace. Do not check inherited again.*/
2070 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
2071 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
2072 if (strchr(name, '/')==0 && !is_sharable(inherit, name)) continue;
2073 p = load_library(name, needed_by, inherit->inherited_ns, false, reserved_params);
2074 if (p) {
2075 LD_LOGD("load_library search in inherited, found p ,inherited_ns name:%{public}s",
2076 inherit->inherited_ns->ns_name);
2077 return p;
2078 }
2079 }
2080 return 0;
2081 }
2082 if (fstat(fd, &st) < 0) {
2083 close(fd);
2084 LD_LOGE("load_library fstat < 0,return 0!");
2085 return 0;
2086 }
2087 /* Search in namespace */
2088 p = find_library_by_fstat(&st, namespace, check_inherited, 0);
2089 if (p) {
2090 /* If this library was previously loaded with a
2091 * pathname but a search found the same inode,
2092 * setup its shortname so it can be found by name. */
2093 if (!p->shortname && pathname != name)
2094 p->shortname = strrchr(p->name, '/')+1;
2095 close(fd);
2096 LD_LOGD("load_library find_library_by_fstat, found p and return it!");
2097 return p;
2098 }
2099 map = noload ? 0 : map_library(fd, &temp_dso, reserved_params);
2100 close(fd);
2101 if (!map) return 0;
2102
2103 /* Avoid the danger of getting two versions of libc mapped into the
2104 * same process when an absolute pathname was used. The symbols
2105 * checked are chosen to catch both musl and glibc, and to avoid
2106 * false positives from interposition-hack libraries. */
2107 decode_dyn(&temp_dso);
2108 if (find_sym(&temp_dso, "__libc_start_main", 1).sym &&
2109 find_sym(&temp_dso, "stdin", 1).sym) {
2110 unmap_library(&temp_dso);
2111 return load_library("libc.so", needed_by, namespace, true, reserved_params);
2112 }
2113 /* Past this point, if we haven't reached runtime yet, ldso has
2114 * committed either to use the mapped library or to abort execution.
2115 * Unmapping is not possible, so we can safely reclaim gaps. */
2116 if (!runtime) reclaim_gaps(&temp_dso);
2117
2118 /* Allocate storage for the new DSO. When there is TLS, this
2119 * storage must include a reservation for all pre-existing
2120 * threads to obtain copies of both the new TLS, and an
2121 * extended DTV capable of storing an additional slot for
2122 * the newly-loaded DSO. */
2123 alloc_size = sizeof *p + strlen(pathname) + 1;
2124 if (runtime && temp_dso.tls.image) {
2125 size_t per_th = temp_dso.tls.size + temp_dso.tls.align
2126 + sizeof(void *) * (tls_cnt+3);
2127 n_th = libc.threads_minus_1 + 1;
2128 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
2129 else alloc_size += n_th * per_th;
2130 }
2131 p = calloc(1, alloc_size);
2132 if (!p) {
2133 unmap_library(&temp_dso);
2134 return 0;
2135 }
2136 memcpy(p, &temp_dso, sizeof temp_dso);
2137 p->dev = st.st_dev;
2138 p->ino = st.st_ino;
2139 p->needed_by = needed_by;
2140 p->name = p->buf;
2141 p->runtime_loaded = runtime;
2142 strcpy(p->name, pathname);
2143 /* Add a shortname only if name arg was not an explicit pathname. */
2144 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
2145 if (p->tls.image) {
2146 p->tls_id = ++tls_cnt;
2147 tls_align = MAXP2(tls_align, p->tls.align);
2148 #ifdef TLS_ABOVE_TP
2149 p->tls.offset = tls_offset + ( (p->tls.align-1) &
2150 (-tls_offset + (uintptr_t)p->tls.image) );
2151 tls_offset = p->tls.offset + p->tls.size;
2152 #else
2153 tls_offset += p->tls.size + p->tls.align - 1;
2154 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
2155 & (p->tls.align-1);
2156 p->tls.offset = tls_offset;
2157 #endif
2158 p->new_dtv = (void *)(-sizeof(size_t) &
2159 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
2160 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
2161 if (tls_tail) tls_tail->next = &p->tls;
2162 else libc.tls_head = &p->tls;
2163 tls_tail = &p->tls;
2164 }
2165
2166 tail->next = p;
2167 p->prev = tail;
2168 tail = p;
2169
2170 /* Add dso to namespace */
2171 p->namespace = namespace;
2172 ns_add_dso(namespace, p);
2173 if (runtime)
2174 p->by_dlopen = 1;
2175
2176 if (DL_FDPIC) makefuncdescs(p);
2177
2178 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
2179
2180 return p;
2181 }
2182
load_direct_deps(struct dso * p,ns_t * namespace,struct reserved_address_params * reserved_params)2183 static void load_direct_deps(struct dso *p, ns_t *namespace, struct reserved_address_params *reserved_params)
2184 {
2185 size_t i, cnt=0;
2186
2187 if (p->deps) return;
2188 /* For head, all preloads are direct pseudo-dependencies.
2189 * Count and include them now to avoid realloc later. */
2190 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2191 cnt++;
2192 for (i=0; p->dynv[i]; i+=2)
2193 if (p->dynv[i] == DT_NEEDED) cnt++;
2194 /* Use builtin buffer for apps with no external deps, to
2195 * preserve property of no runtime failure paths. */
2196 p->deps = (p==head && cnt<2) ? builtin_deps :
2197 calloc(cnt+1, sizeof *p->deps);
2198 if (!p->deps) {
2199 error("Error loading dependencies for %s", p->name);
2200 if (runtime) longjmp(*rtld_fail, 1);
2201 }
2202 cnt=0;
2203 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2204 p->deps[cnt++] = q;
2205 for (i=0; p->dynv[i]; i+=2) {
2206 if (p->dynv[i] != DT_NEEDED) continue;
2207 struct dso *dep = load_library(p->strings + p->dynv[i+1], p, namespace, true, reserved_params);
2208 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
2209 if (!dep) {
2210 error("Error loading shared library %s: %m (needed by %s)",
2211 p->strings + p->dynv[i+1], p->name);
2212 if (runtime) longjmp(*rtld_fail, 1);
2213 continue;
2214 }
2215 p->deps[cnt++] = dep;
2216 }
2217 p->deps[cnt] = 0;
2218 p->ndeps_direct = cnt;
2219 for (i = 0; i < p->ndeps_direct; i++) {
2220 add_dso_parent(p->deps[i], p);
2221 }
2222 }
2223
load_deps(struct dso * p,struct reserved_address_params * reserved_params)2224 static void load_deps(struct dso *p, struct reserved_address_params *reserved_params)
2225 {
2226 if (p->deps) return;
2227 for (; p; p=p->next)
2228 load_direct_deps(p, p->namespace, reserved_params);
2229 }
2230 #endif
2231
extend_bfs_deps(struct dso * p)2232 static void extend_bfs_deps(struct dso *p)
2233 {
2234 size_t i, j, cnt, ndeps_all;
2235 struct dso **tmp;
2236
2237 /* Can't use realloc if the original p->deps was allocated at
2238 * program entry and malloc has been replaced, or if it's
2239 * the builtin non-allocated trivial main program deps array. */
2240 int no_realloc = (__malloc_replaced && !p->runtime_loaded)
2241 || p->deps == builtin_deps;
2242
2243 if (p->bfs_built) return;
2244 ndeps_all = p->ndeps_direct;
2245
2246 /* Mark existing (direct) deps so they won't be duplicated. */
2247 for (i=0; p->deps[i]; i++)
2248 p->deps[i]->mark = 1;
2249
2250 /* For each dependency already in the list, copy its list of direct
2251 * dependencies to the list, excluding any items already in the
2252 * list. Note that the list this loop iterates over will grow during
2253 * the loop, but since duplicates are excluded, growth is bounded. */
2254 for (i=0; p->deps[i]; i++) {
2255 struct dso *dep = p->deps[i];
2256 for (j=cnt=0; j<dep->ndeps_direct; j++)
2257 if (!dep->deps[j]->mark) cnt++;
2258 tmp = no_realloc ?
2259 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2260 realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1));
2261 if (!tmp) {
2262 error("Error recording dependencies for %s", p->name);
2263 if (runtime) longjmp(*rtld_fail, 1);
2264 continue;
2265 }
2266 if (no_realloc) {
2267 memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1));
2268 no_realloc = 0;
2269 }
2270 p->deps = tmp;
2271 for (j=0; j<dep->ndeps_direct; j++) {
2272 if (dep->deps[j]->mark) continue;
2273 dep->deps[j]->mark = 1;
2274 p->deps[ndeps_all++] = dep->deps[j];
2275 }
2276 p->deps[ndeps_all] = 0;
2277 }
2278 p->bfs_built = 1;
2279 for (p=head; p; p=p->next)
2280 p->mark = 0;
2281 }
2282
2283 #ifndef LOAD_ORDER_RANDOMIZATION
load_preload(char * s,ns_t * ns)2284 static void load_preload(char *s, ns_t *ns)
2285 {
2286 int tmp;
2287 char *z;
2288 for (z=s; *z; s=z) {
2289 for ( ; *s && (isspace(*s) || *s==':'); s++);
2290 for (z=s; *z && !isspace(*z) && *z!=':'; z++);
2291 tmp = *z;
2292 *z = 0;
2293 load_library(s, 0, ns, true, NULL);
2294 *z = tmp;
2295 }
2296 }
2297 #endif
2298
add_syms(struct dso * p)2299 static void add_syms(struct dso *p)
2300 {
2301 if (!p->syms_next && syms_tail != p) {
2302 syms_tail->syms_next = p;
2303 syms_tail = p;
2304 }
2305 }
2306
revert_syms(struct dso * old_tail)2307 static void revert_syms(struct dso *old_tail)
2308 {
2309 struct dso *p, *next;
2310 /* Chop off the tail of the list of dsos that participate in
2311 * the global symbol table, reverting them to RTLD_LOCAL. */
2312 for (p=old_tail; p; p=next) {
2313 next = p->syms_next;
2314 p->syms_next = 0;
2315 }
2316 syms_tail = old_tail;
2317 }
2318
do_mips_relocs(struct dso * p,size_t * got)2319 static void do_mips_relocs(struct dso *p, size_t *got)
2320 {
2321 size_t i, j, rel[2];
2322 unsigned char *base = p->base;
2323 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO);
2324 if (p==&ldso) {
2325 got += i;
2326 } else {
2327 while (i--) *got++ += (size_t)base;
2328 }
2329 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
2330 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
2331 Sym *sym = p->syms + j;
2332 rel[0] = (unsigned char *)got - base;
2333 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
2334 rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT);
2335 do_relocs(p, rel, sizeof rel, 2);
2336 }
2337 }
2338
sleb128_decoder(uint8_t * current,uint8_t * end,size_t * value)2339 static uint8_t* sleb128_decoder(uint8_t* current, uint8_t* end, size_t* value)
2340 {
2341 size_t result = 0;
2342 static const size_t size = CHAR_BIT * sizeof(result);
2343
2344 size_t shift = 0;
2345 uint8_t byte;
2346
2347 do {
2348 if (current >= end) {
2349 a_crash();
2350 }
2351
2352 byte = *current++;
2353 result |= ((size_t)(byte & 127) << shift);
2354 shift += 7;
2355 } while (byte & 128);
2356
2357 if (shift < size && (byte & 64)) {
2358 result |= -((size_t)(1) << shift);
2359 }
2360
2361 *value = result;
2362
2363 return current;
2364 }
2365
do_android_relocs(struct dso * p,size_t dt_name,size_t dt_size)2366 static void do_android_relocs(struct dso *p, size_t dt_name, size_t dt_size)
2367 {
2368 size_t android_rel_addr = 0, android_rel_size = 0;
2369 uint8_t *android_rel_curr, *android_rel_end;
2370
2371 search_vec(p->dynv, &android_rel_addr, dt_name);
2372 search_vec(p->dynv, &android_rel_size, dt_size);
2373
2374 if (!android_rel_addr || (android_rel_size < 4)) {
2375 return;
2376 }
2377
2378 android_rel_curr = laddr(p, android_rel_addr);
2379 if (memcmp(android_rel_curr, "APS2", ANDROID_REL_SIGN_SIZE)) {
2380 return;
2381 }
2382
2383 android_rel_curr += ANDROID_REL_SIGN_SIZE;
2384 android_rel_size -= ANDROID_REL_SIGN_SIZE;
2385
2386 android_rel_end = android_rel_curr + android_rel_size;
2387
2388 size_t relocs_num;
2389 size_t rel[3] = {0};
2390
2391 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &relocs_num);
2392 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[0]);
2393
2394 for (size_t i = 0; i < relocs_num;) {
2395
2396 size_t group_size, group_flags;
2397
2398 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_size);
2399 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_flags);
2400
2401 size_t group_r_offset_delta = 0;
2402
2403 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2404 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_r_offset_delta);
2405 }
2406
2407 if (group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) {
2408 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2409 }
2410
2411 const size_t addend_flags = group_flags & (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG);
2412
2413 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2414 } else if (addend_flags == (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG)) {
2415 size_t addend;
2416 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2417 rel[2] += addend;
2418 } else {
2419 rel[2] = 0;
2420 }
2421
2422 for (size_t j = 0; j < group_size; j++) {
2423 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2424 rel[0] += group_r_offset_delta;
2425 } else {
2426 size_t offset_detla;
2427 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &offset_detla);
2428
2429 rel[0] += offset_detla;
2430 }
2431
2432 if ((group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) == 0) {
2433 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2434 }
2435
2436 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2437 size_t addend;
2438 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2439 rel[2] += addend;
2440 }
2441
2442 if (dt_name == DT_ANDROID_REL) {
2443 do_relocs(p, rel, sizeof(size_t)*2, 2);
2444 } else {
2445 do_relocs(p, rel, sizeof(size_t)*3, 3);
2446 }
2447 }
2448
2449 i += group_size;
2450 }
2451 }
2452
do_relr_relocs(struct dso * dso,size_t * relr,size_t relr_size)2453 static void do_relr_relocs(struct dso *dso, size_t *relr, size_t relr_size)
2454 {
2455 unsigned char *base = dso->base;
2456 size_t *reloc_addr;
2457 for (; relr_size; relr++, relr_size-=sizeof(size_t))
2458 if ((relr[0]&1) == 0) {
2459 reloc_addr = laddr(dso, relr[0]);
2460 *reloc_addr++ += (size_t)base;
2461 } else {
2462 int i = 0;
2463 for (size_t bitmap=relr[0]; (bitmap>>=1); i++)
2464 if (bitmap&1)
2465 reloc_addr[i] += (size_t)base;
2466 reloc_addr += 8*sizeof(size_t)-1;
2467 }
2468 }
2469
reloc_all(struct dso * p,const dl_extinfo * extinfo)2470 static void reloc_all(struct dso *p, const dl_extinfo *extinfo)
2471 {
2472 ssize_t relro_fd_offset = 0;
2473 size_t dyn[DYN_CNT];
2474 for (; p; p=p->next) {
2475 if (p->relocated) continue;
2476 if (p != &ldso) {
2477 add_can_search_so_list_in_dso(p, head);
2478 }
2479 decode_vec(p->dynv, dyn, DYN_CNT);
2480 if (NEED_MIPS_GOT_RELOCS)
2481 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT]));
2482 do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
2483 2+(dyn[DT_PLTREL]==DT_RELA));
2484 do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2);
2485 do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3);
2486 if (!DL_FDPIC)
2487 do_relr_relocs(p, laddr(p, dyn[DT_RELR]), dyn[DT_RELRSZ]);
2488
2489 do_android_relocs(p, DT_ANDROID_REL, DT_ANDROID_RELSZ);
2490 do_android_relocs(p, DT_ANDROID_RELA, DT_ANDROID_RELASZ);
2491
2492 if (head != &ldso && p->relro_start != p->relro_end &&
2493 mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ)
2494 && errno != ENOSYS) {
2495 error("Error relocating %s: RELRO protection failed: %m",
2496 p->name);
2497 if (runtime) longjmp(*rtld_fail, 1);
2498 }
2499 /* Handle serializing/mapping the RELRO segment */
2500 handle_relro_sharing(p, extinfo, &relro_fd_offset);
2501
2502 p->relocated = 1;
2503 free_reloc_can_search_dso(p);
2504 }
2505 }
2506
kernel_mapped_dso(struct dso * p)2507 static void kernel_mapped_dso(struct dso *p)
2508 {
2509 size_t min_addr = -1, max_addr = 0, cnt;
2510 Phdr *ph = p->phdr;
2511 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
2512 if (ph->p_type == PT_DYNAMIC) {
2513 p->dynv = laddr(p, ph->p_vaddr);
2514 } else if (ph->p_type == PT_GNU_RELRO) {
2515 p->relro_start = ph->p_vaddr & -PAGE_SIZE;
2516 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
2517 } else if (ph->p_type == PT_GNU_STACK) {
2518 if (!runtime && ph->p_memsz > __default_stacksize) {
2519 __default_stacksize =
2520 ph->p_memsz < DEFAULT_STACK_MAX ?
2521 ph->p_memsz : DEFAULT_STACK_MAX;
2522 }
2523 }
2524 if (ph->p_type != PT_LOAD) continue;
2525 if (ph->p_vaddr < min_addr)
2526 min_addr = ph->p_vaddr;
2527 if (ph->p_vaddr+ph->p_memsz > max_addr)
2528 max_addr = ph->p_vaddr+ph->p_memsz;
2529 }
2530 min_addr &= -PAGE_SIZE;
2531 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
2532 p->map = p->base + min_addr;
2533 p->map_len = max_addr - min_addr;
2534 p->kernel_mapped = 1;
2535 }
2536
__libc_exit_fini()2537 void __libc_exit_fini()
2538 {
2539 struct dso *p;
2540 size_t dyn[DYN_CNT];
2541 pthread_t self = __pthread_self();
2542
2543 /* Take both locks before setting shutting_down, so that
2544 * either lock is sufficient to read its value. The lock
2545 * order matches that in dlopen to avoid deadlock. */
2546 pthread_rwlock_wrlock(&lock);
2547 pthread_mutex_lock(&init_fini_lock);
2548 shutting_down = 1;
2549 pthread_rwlock_unlock(&lock);
2550 for (p=fini_head; p; p=p->fini_next) {
2551 while (p->ctor_visitor && p->ctor_visitor!=self)
2552 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2553 if (!p->constructed) continue;
2554 decode_vec(p->dynv, dyn, DYN_CNT);
2555 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
2556 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
2557 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n;
2558 while (n--) ((void (*)(void))*--fn)();
2559 }
2560 #ifndef NO_LEGACY_INITFINI
2561 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
2562 fpaddr(p, dyn[DT_FINI])();
2563 #endif
2564 }
2565 }
2566
__ldso_atfork(int who)2567 void __ldso_atfork(int who)
2568 {
2569 if (who<0) {
2570 pthread_rwlock_wrlock(&lock);
2571 pthread_mutex_lock(&init_fini_lock);
2572 } else {
2573 pthread_mutex_unlock(&init_fini_lock);
2574 pthread_rwlock_unlock(&lock);
2575 }
2576 }
2577
queue_ctors(struct dso * dso)2578 static struct dso **queue_ctors(struct dso *dso)
2579 {
2580 size_t cnt, qpos, spos, i;
2581 struct dso *p, **queue, **stack;
2582
2583 if (ldd_mode) return 0;
2584
2585 /* Bound on queue size is the total number of indirect deps.
2586 * If a bfs deps list was built, we can use it. Otherwise,
2587 * bound by the total number of DSOs, which is always safe and
2588 * is reasonable we use it (for main app at startup). */
2589 if (dso->bfs_built) {
2590 for (cnt=0; dso->deps[cnt]; cnt++)
2591 dso->deps[cnt]->mark = 0;
2592 cnt++; /* self, not included in deps */
2593 } else {
2594 for (cnt=0, p=head; p; cnt++, p=p->next)
2595 p->mark = 0;
2596 }
2597 cnt++; /* termination slot */
2598 if (dso==head && cnt <= countof(builtin_ctor_queue))
2599 queue = builtin_ctor_queue;
2600 else
2601 queue = calloc(cnt, sizeof *queue);
2602
2603 if (!queue) {
2604 error("Error allocating constructor queue: %m\n");
2605 if (runtime) longjmp(*rtld_fail, 1);
2606 return 0;
2607 }
2608
2609 /* Opposite ends of the allocated buffer serve as an output queue
2610 * and a working stack. Setup initial stack with just the argument
2611 * dso and initial queue empty... */
2612 stack = queue;
2613 qpos = 0;
2614 spos = cnt;
2615 stack[--spos] = dso;
2616 dso->next_dep = 0;
2617 dso->mark = 1;
2618
2619 /* Then perform pseudo-DFS sort, but ignoring circular deps. */
2620 while (spos<cnt) {
2621 p = stack[spos++];
2622 while (p->next_dep < p->ndeps_direct) {
2623 if (p->deps[p->next_dep]->mark) {
2624 p->next_dep++;
2625 } else {
2626 stack[--spos] = p;
2627 p = p->deps[p->next_dep];
2628 p->next_dep = 0;
2629 p->mark = 1;
2630 }
2631 }
2632 queue[qpos++] = p;
2633 }
2634 queue[qpos] = 0;
2635 for (i=0; i<qpos; i++) queue[i]->mark = 0;
2636
2637 return queue;
2638 }
2639
do_init_fini(struct dso ** queue)2640 static void do_init_fini(struct dso **queue)
2641 {
2642 struct dso *p;
2643 size_t dyn[DYN_CNT], i;
2644 pthread_t self = __pthread_self();
2645
2646 pthread_mutex_lock(&init_fini_lock);
2647 for (i=0; (p=queue[i]); i++) {
2648 while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down)
2649 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2650 if (p->ctor_visitor || p->constructed)
2651 continue;
2652 p->ctor_visitor = self;
2653
2654 decode_vec(p->dynv, dyn, DYN_CNT);
2655
2656 pthread_mutex_unlock(&init_fini_lock);
2657
2658 #ifndef NO_LEGACY_INITFINI
2659 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
2660 fpaddr(p, dyn[DT_INIT])();
2661 #endif
2662 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
2663 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
2664 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]);
2665 if (p != &ldso) {
2666 trace_marker_begin(HITRACE_TAG_MUSL, "calling constructors: ", p->name);
2667 }
2668 while (n--) ((void (*)(void))*fn++)();
2669 if (p != &ldso) {
2670 trace_marker_end(HITRACE_TAG_MUSL);
2671 }
2672 }
2673
2674 pthread_mutex_lock(&init_fini_lock);
2675 /*
2676 * while use recursive lock, update the fini_next/fini_head
2677 * after the possiable recursive call in DT_INIT_ARRAY.
2678 */
2679 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
2680 p->fini_next = fini_head;
2681 fini_head = p;
2682 }
2683
2684 p->ctor_visitor = 0;
2685 p->constructed = 1;
2686 pthread_cond_broadcast(&ctor_cond);
2687 }
2688 pthread_mutex_unlock(&init_fini_lock);
2689 }
2690
__libc_start_init(void)2691 void __libc_start_init(void)
2692 {
2693 do_init_fini(main_ctor_queue);
2694 if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
2695 free(main_ctor_queue);
2696 main_ctor_queue = 0;
2697 }
2698
dl_debug_state(void)2699 static void dl_debug_state(void)
2700 {
2701 }
2702
2703 weak_alias(dl_debug_state, _dl_debug_state);
2704
__init_tls(size_t * auxv)2705 void __init_tls(size_t *auxv)
2706 {
2707 }
2708
update_tls_size()2709 static void update_tls_size()
2710 {
2711 libc.tls_cnt = tls_cnt;
2712 libc.tls_align = tls_align;
2713 libc.tls_size = ALIGN(
2714 (1+tls_cnt) * sizeof(void *) +
2715 tls_offset +
2716 sizeof(struct pthread) +
2717 tls_align * 2,
2718 tls_align);
2719 }
2720
install_new_tls(void)2721 static void install_new_tls(void)
2722 {
2723 sigset_t set;
2724 pthread_t self = __pthread_self(), td;
2725 struct dso *dtv_provider = container_of(tls_tail, struct dso, tls);
2726 uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv;
2727 struct dso *p;
2728 size_t i, j;
2729 size_t old_cnt = self->dtv[0];
2730
2731 __block_app_sigs(&set);
2732 __tl_lock();
2733 /* Copy existing dtv contents from all existing threads. */
2734 for (i=0, td=self; !i || td!=self; i++, td=td->next) {
2735 memcpy(newdtv+i, td->dtv,
2736 (old_cnt+1)*sizeof(uintptr_t));
2737 newdtv[i][0] = tls_cnt;
2738 }
2739 /* Install new dtls into the enlarged, uninstalled dtv copies. */
2740 for (p=head; ; p=p->next) {
2741 if (p->tls_id <= old_cnt) continue;
2742 unsigned char *mem = p->new_tls;
2743 for (j=0; j<i; j++) {
2744 unsigned char *new = mem;
2745 new += ((uintptr_t)p->tls.image - (uintptr_t)mem)
2746 & (p->tls.align-1);
2747 memcpy(new, p->tls.image, p->tls.len);
2748 newdtv[j][p->tls_id] =
2749 (uintptr_t)new + DTP_OFFSET;
2750 mem += p->tls.size + p->tls.align;
2751 }
2752 if (p->tls_id == tls_cnt) break;
2753 }
2754
2755 /* Broadcast barrier to ensure contents of new dtv is visible
2756 * if the new dtv pointer is. The __membarrier function has a
2757 * fallback emulation using signals for kernels that lack the
2758 * feature at the syscall level. */
2759
2760 __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
2761
2762 /* Install new dtv for each thread. */
2763 for (j=0, td=self; !j || td!=self; j++, td=td->next) {
2764 td->dtv = newdtv[j];
2765 }
2766
2767 __tl_unlock();
2768 __restore_sigs(&set);
2769 }
2770
2771 /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
2772 * following stage 2 and stage 3 functions via primitive symbolic lookup
2773 * since it does not have access to their addresses to begin with. */
2774
2775 /* Stage 2 of the dynamic linker is called after relative relocations
2776 * have been processed. It can make function calls to static functions
2777 * and access string literals and static data, but cannot use extern
2778 * symbols. Its job is to perform symbolic relocations on the dynamic
2779 * linker itself, but some of the relocations performed may need to be
2780 * replaced later due to copy relocations in the main program. */
2781
__dls2(unsigned char * base,size_t * sp)2782 hidden void __dls2(unsigned char *base, size_t *sp)
2783 {
2784 size_t *auxv;
2785 for (auxv=sp+1+*sp+1; *auxv; auxv++);
2786 auxv++;
2787 if (DL_FDPIC) {
2788 void *p1 = (void *)sp[-2];
2789 void *p2 = (void *)sp[-1];
2790 if (!p1) {
2791 size_t aux[AUX_CNT];
2792 decode_vec(auxv, aux, AUX_CNT);
2793 if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE];
2794 else ldso.base = (void *)(aux[AT_PHDR] & -4096);
2795 }
2796 app_loadmap = p2 ? p1 : 0;
2797 ldso.loadmap = p2 ? p2 : p1;
2798 ldso.base = laddr(&ldso, 0);
2799 } else {
2800 ldso.base = base;
2801 }
2802 size_t aux[AUX_CNT];
2803 decode_vec(auxv, aux, AUX_CNT);
2804 libc.page_size = aux[AT_PAGESZ];
2805 Ehdr *ehdr = (void *)ldso.base;
2806 ldso.name = ldso.shortname = "libc.so";
2807 ldso.phnum = ehdr->e_phnum;
2808 ldso.phdr = laddr(&ldso, ehdr->e_phoff);
2809 ldso.phentsize = ehdr->e_phentsize;
2810 ldso.is_global = true;
2811 kernel_mapped_dso(&ldso);
2812 decode_dyn(&ldso);
2813
2814 if (DL_FDPIC) makefuncdescs(&ldso);
2815
2816 /* Prepare storage for to save clobbered REL addends so they
2817 * can be reused in stage 3. There should be very few. If
2818 * something goes wrong and there are a huge number, abort
2819 * instead of risking stack overflow. */
2820 size_t dyn[DYN_CNT];
2821 decode_vec(ldso.dynv, dyn, DYN_CNT);
2822 size_t *rel = laddr(&ldso, dyn[DT_REL]);
2823 size_t rel_size = dyn[DT_RELSZ];
2824 size_t symbolic_rel_cnt = 0;
2825 apply_addends_to = rel;
2826 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
2827 if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++;
2828 if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash();
2829 size_t addends[symbolic_rel_cnt+1];
2830 saved_addends = addends;
2831
2832 head = &ldso;
2833 reloc_all(&ldso, NULL);
2834
2835 ldso.relocated = 0;
2836
2837 /* Call dynamic linker stage-2b, __dls2b, looking it up
2838 * symbolically as a barrier against moving the address
2839 * load across the above relocation processing. */
2840 struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0);
2841 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp, auxv, aux);
2842 else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp, auxv, aux);
2843 }
2844
2845 /* Stage 2b sets up a valid thread pointer, which requires relocations
2846 * completed in stage 2, and on which stage 3 is permitted to depend.
2847 * This is done as a separate stage, with symbolic lookup as a barrier,
2848 * so that loads of the thread pointer and &errno can be pure/const and
2849 * thereby hoistable. */
2850
__dls2b(size_t * sp,size_t * auxv,size_t * aux)2851 void __dls2b(size_t *sp, size_t *auxv, size_t *aux)
2852 {
2853 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
2854 * use during dynamic linking. If possible it will also serve as the
2855 * thread pointer at runtime. */
2856 search_vec(auxv, &__hwcap, AT_HWCAP);
2857 libc.auxv = auxv;
2858 libc.tls_size = sizeof builtin_tls;
2859 libc.tls_align = tls_align;
2860 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
2861 a_crash();
2862 }
2863 __pthread_self()->stack = (void *)(sp + 1);
2864 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
2865 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp, auxv, aux);
2866 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp, auxv, aux);
2867 }
2868
2869 /* Stage 3 of the dynamic linker is called with the dynamic linker/libc
2870 * fully functional. Its job is to load (if not already loaded) and
2871 * process dependencies and relocations for the main application and
2872 * transfer control to its entry point. */
2873
__dls3(size_t * sp,size_t * auxv,size_t * aux)2874 void __dls3(size_t *sp, size_t *auxv, size_t *aux)
2875 {
2876 static struct dso app, vdso;
2877 size_t i;
2878 char *env_preload=0;
2879 char *replace_argv0=0;
2880 size_t vdso_base;
2881 int argc = *sp;
2882 char **argv = (void *)(sp+1);
2883 char **argv_orig = argv;
2884 char **envp = argv+argc+1;
2885
2886 /* Find aux vector just past environ[] and use it to initialize
2887 * global data that may be needed before we can make syscalls. */
2888 __environ = envp;
2889 search_vec(auxv, &__sysinfo, AT_SYSINFO);
2890 __pthread_self()->sysinfo = __sysinfo;
2891 libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
2892 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]);
2893
2894 /* Only trust user/env if kernel says we're not suid/sgid */
2895 if (!libc.secure) {
2896 env_path = getenv("LD_LIBRARY_PATH");
2897 env_preload = getenv("LD_PRELOAD");
2898 }
2899 #ifdef OHOS_ENABLE_PARAMETER
2900 InitParameterClient();
2901 #endif
2902 /* If the main program was already loaded by the kernel,
2903 * AT_PHDR will point to some location other than the dynamic
2904 * linker's program headers. */
2905 if (aux[AT_PHDR] != (size_t)ldso.phdr) {
2906 size_t interp_off = 0;
2907 size_t tls_image = 0;
2908 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
2909 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR];
2910 app.phnum = aux[AT_PHNUM];
2911 app.phentsize = aux[AT_PHENT];
2912 for (i=aux[AT_PHNUM]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT])) {
2913 if (phdr->p_type == PT_PHDR)
2914 app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
2915 else if (phdr->p_type == PT_INTERP)
2916 interp_off = (size_t)phdr->p_vaddr;
2917 else if (phdr->p_type == PT_TLS) {
2918 tls_image = phdr->p_vaddr;
2919 app.tls.len = phdr->p_filesz;
2920 app.tls.size = phdr->p_memsz;
2921 app.tls.align = phdr->p_align;
2922 }
2923 }
2924 if (DL_FDPIC) app.loadmap = app_loadmap;
2925 if (app.tls.size) app.tls.image = laddr(&app, tls_image);
2926 if (interp_off) ldso.name = laddr(&app, interp_off);
2927 if ((aux[0] & (1UL<<AT_EXECFN))
2928 && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
2929 app.name = (char *)aux[AT_EXECFN];
2930 else
2931 app.name = argv[0];
2932 kernel_mapped_dso(&app);
2933 } else {
2934 int fd;
2935 char *ldname = argv[0];
2936 size_t l = strlen(ldname);
2937 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
2938 argv++;
2939 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
2940 char *opt = argv[0]+2;
2941 *argv++ = (void *)-1;
2942 if (!*opt) {
2943 break;
2944 } else if (!memcmp(opt, "list", 5)) {
2945 ldd_mode = 1;
2946 } else if (!memcmp(opt, "library-path", 12)) {
2947 if (opt[12]=='=') env_path = opt+13;
2948 else if (opt[12]) *argv = 0;
2949 else if (*argv) env_path = *argv++;
2950 } else if (!memcmp(opt, "preload", 7)) {
2951 if (opt[7]=='=') env_preload = opt+8;
2952 else if (opt[7]) *argv = 0;
2953 else if (*argv) env_preload = *argv++;
2954 } else if (!memcmp(opt, "argv0", 5)) {
2955 if (opt[5]=='=') replace_argv0 = opt+6;
2956 else if (opt[5]) *argv = 0;
2957 else if (*argv) replace_argv0 = *argv++;
2958 } else {
2959 argv[0] = 0;
2960 }
2961 }
2962 argv[-1] = (void *)(argc - (argv-argv_orig));
2963 if (!argv[0]) {
2964 dprintf(2, "musl libc (" LDSO_ARCH ")\n"
2965 "Version %s\n"
2966 "Dynamic Program Loader\n"
2967 "Usage: %s [options] [--] pathname%s\n",
2968 __libc_version, ldname,
2969 ldd_mode ? "" : " [args]");
2970 _exit(1);
2971 }
2972 fd = open(argv[0], O_RDONLY);
2973 if (fd < 0) {
2974 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
2975 _exit(1);
2976 }
2977 Ehdr *ehdr = map_library(fd, &app, NULL);
2978 if (!ehdr) {
2979 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
2980 _exit(1);
2981 }
2982 close(fd);
2983 ldso.name = ldname;
2984 app.name = argv[0];
2985 aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry);
2986 /* Find the name that would have been used for the dynamic
2987 * linker had ldd not taken its place. */
2988 if (ldd_mode) {
2989 for (i=0; i<app.phnum; i++) {
2990 if (app.phdr[i].p_type == PT_INTERP)
2991 ldso.name = laddr(&app, app.phdr[i].p_vaddr);
2992 }
2993 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
2994 }
2995 }
2996 if (app.tls.size) {
2997 libc.tls_head = tls_tail = &app.tls;
2998 app.tls_id = tls_cnt = 1;
2999 #ifdef TLS_ABOVE_TP
3000 app.tls.offset = GAP_ABOVE_TP;
3001 app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image)
3002 & (app.tls.align-1);
3003 tls_offset = app.tls.offset + app.tls.size;
3004 #else
3005 tls_offset = app.tls.offset = app.tls.size
3006 + ( -((uintptr_t)app.tls.image + app.tls.size)
3007 & (app.tls.align-1) );
3008 #endif
3009 tls_align = MAXP2(tls_align, app.tls.align);
3010 }
3011 decode_dyn(&app);
3012 if (DL_FDPIC) {
3013 makefuncdescs(&app);
3014 if (!app.loadmap) {
3015 app.loadmap = (void *)&app_dummy_loadmap;
3016 app.loadmap->nsegs = 1;
3017 app.loadmap->segs[0].addr = (size_t)app.map;
3018 app.loadmap->segs[0].p_vaddr = (size_t)app.map
3019 - (size_t)app.base;
3020 app.loadmap->segs[0].p_memsz = app.map_len;
3021 }
3022 argv[-3] = (void *)app.loadmap;
3023 }
3024 app.is_global = true;
3025
3026 /* Initial dso chain consists only of the app. */
3027 head = tail = syms_tail = &app;
3028
3029 /* Donate unused parts of app and library mapping to malloc */
3030 reclaim_gaps(&app);
3031 reclaim_gaps(&ldso);
3032
3033 find_and_set_bss_name(&app);
3034 find_and_set_bss_name(&ldso);
3035
3036 /* Load preload/needed libraries, add symbols to global namespace. */
3037 ldso.deps = (struct dso **)no_deps;
3038 /* Init g_is_asan */
3039 g_is_asan = false;
3040 LD_LOGD("__dls3 ldso.name:%{public}s.", ldso.name);
3041 /* Through ldso Name to judge whether the Asan function is enabled */
3042 if (strstr(ldso.name, "-asan")) {
3043 g_is_asan = true;
3044 LD_LOGD("__dls3 g_is_asan is true.");
3045 }
3046 /* Init all namespaces by config file. there is a default namespace always*/
3047 init_namespace(&app);
3048
3049 #ifdef LOAD_ORDER_RANDOMIZATION
3050 struct loadtasks *tasks = create_loadtasks();
3051 if (!tasks) {
3052 _exit(1);
3053 }
3054 if (env_preload) {
3055 load_preload(env_preload, get_default_ns(), tasks);
3056 }
3057 for (struct dso *q=head; q; q=q->next) {
3058 q->is_global = true;
3059 }
3060 preload_deps(&app, tasks);
3061 unmap_preloaded_sections(tasks);
3062 shuffle_loadtasks(tasks);
3063 run_loadtasks(tasks, NULL);
3064 free_loadtasks(tasks);
3065 assign_tls(app.next);
3066 #else
3067 if (env_preload) load_preload(env_preload, get_default_ns());
3068 for (struct dso *q=head; q; q=q->next) {
3069 q->is_global = true;
3070 }
3071 load_deps(&app, NULL);
3072 #endif
3073
3074 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of app, including app self. */
3075 for (struct dso *p=head; p; p=p->next) {
3076 p->is_reloc_head_so_dep = true;
3077 add_syms(p);
3078 }
3079
3080 /* Attach to vdso, if provided by the kernel, last so that it does
3081 * not become part of the global namespace. */
3082 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) {
3083 Ehdr *ehdr = (void *)vdso_base;
3084 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
3085 vdso.phnum = ehdr->e_phnum;
3086 vdso.phentsize = ehdr->e_phentsize;
3087 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
3088 if (phdr->p_type == PT_DYNAMIC)
3089 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
3090 if (phdr->p_type == PT_LOAD)
3091 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
3092 }
3093 vdso.name = "";
3094 vdso.shortname = "linux-gate.so.1";
3095 vdso.relocated = 1;
3096 vdso.deps = (struct dso **)no_deps;
3097 decode_dyn(&vdso);
3098 vdso.prev = tail;
3099 tail->next = &vdso;
3100 tail = &vdso;
3101 vdso.namespace = get_default_ns();
3102 ns_add_dso(vdso.namespace, &vdso);
3103 }
3104
3105 for (i=0; app.dynv[i]; i+=2) {
3106 if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG)
3107 app.dynv[i+1] = (size_t)&debug;
3108 if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) {
3109 size_t *ptr = (size_t *) app.dynv[i+1];
3110 *ptr = (size_t)&debug;
3111 }
3112 }
3113
3114 /* This must be done before final relocations, since it calls
3115 * malloc, which may be provided by the application. Calling any
3116 * application code prior to the jump to its entry point is not
3117 * valid in our model and does not work with FDPIC, where there
3118 * are additional relocation-like fixups that only the entry point
3119 * code can see to perform. */
3120 main_ctor_queue = queue_ctors(&app);
3121
3122 /* Initial TLS must also be allocated before final relocations
3123 * might result in calloc being a call to application code. */
3124 update_tls_size();
3125 void *initial_tls = builtin_tls;
3126 if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) {
3127 initial_tls = calloc(libc.tls_size, 1);
3128 if (!initial_tls) {
3129 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
3130 argv[0], libc.tls_size);
3131 _exit(127);
3132 }
3133 }
3134 static_tls_cnt = tls_cnt;
3135
3136 /* The main program must be relocated LAST since it may contain
3137 * copy relocations which depend on libraries' relocations. */
3138 reloc_all(app.next, NULL);
3139 reloc_all(&app, NULL);
3140 for (struct dso *q=head; q; q=q->next) {
3141 q->is_reloc_head_so_dep = false;
3142 }
3143
3144 /* Actual copying to new TLS needs to happen after relocations,
3145 * since the TLS images might have contained relocated addresses. */
3146 if (initial_tls != builtin_tls) {
3147 pthread_t self = __pthread_self();
3148 pthread_t td = __copy_tls(initial_tls);
3149 if (__init_tp(td) < 0) {
3150 a_crash();
3151 }
3152 td->tsd = self->tsd;
3153 } else {
3154 size_t tmp_tls_size = libc.tls_size;
3155 pthread_t self = __pthread_self();
3156 /* Temporarily set the tls size to the full size of
3157 * builtin_tls so that __copy_tls will use the same layout
3158 * as it did for before. Then check, just to be safe. */
3159 libc.tls_size = sizeof builtin_tls;
3160 if (__copy_tls((void*)builtin_tls) != self) a_crash();
3161 libc.tls_size = tmp_tls_size;
3162 }
3163
3164 if (init_cfi_shadow(head, &ldso) == CFI_FAILED) {
3165 error("[%s] init_cfi_shadow failed: %m", __FUNCTION__);
3166 }
3167
3168 if (ldso_fail) _exit(127);
3169 if (ldd_mode) _exit(0);
3170
3171 /* Determine if malloc was interposed by a replacement implementation
3172 * so that calloc and the memalign family can harden against the
3173 * possibility of incomplete replacement. */
3174 if (find_sym(head, "malloc", 1).dso != &ldso)
3175 __malloc_replaced = 1;
3176 if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
3177 __aligned_alloc_replaced = 1;
3178
3179 /* Switch to runtime mode: any further failures in the dynamic
3180 * linker are a reportable failure rather than a fatal startup
3181 * error. */
3182 runtime = 1;
3183
3184 sync_with_debugger();
3185
3186 if (replace_argv0) argv[0] = replace_argv0;
3187
3188 #ifdef USE_GWP_ASAN
3189 init_gwp_asan_by_libc(false);
3190 #endif
3191
3192 #ifdef DFX_SIGNAL_LIBC
3193 DFX_InstallSignalHandler();
3194 #endif
3195 errno = 0;
3196
3197 CRTJMP((void *)aux[AT_ENTRY], argv-1);
3198 for(;;);
3199 }
3200
prepare_lazy(struct dso * p)3201 static void prepare_lazy(struct dso *p)
3202 {
3203 size_t dyn[DYN_CNT], n, flags1=0;
3204 decode_vec(p->dynv, dyn, DYN_CNT);
3205 search_vec(p->dynv, &flags1, DT_FLAGS_1);
3206 if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW))
3207 return;
3208 n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1;
3209 if (NEED_MIPS_GOT_RELOCS) {
3210 size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
3211 size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
3212 n += i-j;
3213 }
3214 p->lazy = calloc(n, 3*sizeof(size_t));
3215 if (!p->lazy) {
3216 error("Error preparing lazy relocation for %s: %m", p->name);
3217 longjmp(*rtld_fail, 1);
3218 }
3219 p->lazy_next = lazy_head;
3220 lazy_head = p;
3221 }
3222
dlopen_post(struct dso * p,int mode)3223 static void *dlopen_post(struct dso* p, int mode) {
3224 if (p == NULL) {
3225 return p;
3226 }
3227 bool is_dlclose_debug = false;
3228 if (is_dlclose_debug_enable()) {
3229 is_dlclose_debug = true;
3230 }
3231 p->nr_dlopen++;
3232 if (is_dlclose_debug) {
3233 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d ",
3234 p->name, p->name, p->nr_dlopen);
3235 }
3236 if (p->bfs_built) {
3237 for (int i = 0; p->deps[i]; i++) {
3238 p->deps[i]->nr_dlopen++;
3239 if (is_dlclose_debug) {
3240 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d",
3241 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
3242 }
3243 if (mode & RTLD_NODELETE) {
3244 p->deps[i]->flags |= DSO_FLAGS_NODELETE;
3245 }
3246 }
3247 }
3248
3249 #ifdef HANDLE_RANDOMIZATION
3250 void *handle = assign_valid_handle(p);
3251 if (handle == NULL) {
3252 LD_LOGE("dlopen_post: generate random handle failed");
3253 do_dlclose(p);
3254 }
3255
3256 return handle;
3257 #endif
3258
3259 return p;
3260 }
3261
3262 static char *dlopen_permitted_list[] =
3263 {
3264 "default",
3265 "ndk",
3266 };
3267
3268 #define PERMITIED_TARGET "nweb_ns"
in_permitted_list(char * caller,char * target)3269 static bool in_permitted_list(char *caller, char *target)
3270 {
3271 for (int i = 0; i < sizeof(dlopen_permitted_list)/sizeof(char*); i++) {
3272 if (strcmp(dlopen_permitted_list[i], caller) == 0) {
3273 return true;
3274 }
3275 }
3276
3277 if (strcmp(PERMITIED_TARGET, target) == 0) {
3278 return true;
3279 }
3280
3281 return false;
3282 }
3283
is_permitted(const void * caller_addr,char * target)3284 static bool is_permitted(const void *caller_addr, char *target)
3285 {
3286 struct dso *caller;
3287 ns_t *ns;
3288 caller = (struct dso *)addr2dso((size_t)caller_addr);
3289 if ((caller == NULL) || (caller->namespace == NULL)) {
3290 LD_LOGE("caller ns get error");
3291 return false;
3292 }
3293
3294 ns = caller->namespace;
3295 if (in_permitted_list(ns->ns_name, target) == false) {
3296 LD_LOGE("caller ns: %{public}s have no permission, target is %{public}s", ns->ns_name, target);
3297 return false;
3298 }
3299
3300 return true;
3301 }
3302
3303 /* add namespace function */
dlopen_impl(const char * file,int mode,const char * namespace,const void * caller_addr,const dl_extinfo * extinfo)3304 static void *dlopen_impl(
3305 const char *file, int mode, const char *namespace, const void *caller_addr, const dl_extinfo *extinfo)
3306 {
3307 struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next;
3308 struct tls_module *orig_tls_tail;
3309 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
3310 size_t i;
3311 int cs;
3312 jmp_buf jb;
3313 struct dso **volatile ctor_queue = 0;
3314 ns_t *ns;
3315 struct dso *caller;
3316 bool reserved_address = false;
3317 bool reserved_address_recursive = false;
3318 struct reserved_address_params reserved_params = {0};
3319 #ifdef LOAD_ORDER_RANDOMIZATION
3320 struct loadtasks *tasks = NULL;
3321 struct loadtask *task = NULL;
3322 bool is_task_appended = false;
3323 #endif
3324
3325 if (!file) {
3326 LD_LOGD("dlopen_impl file is null, return head.");
3327 return dlopen_post(head, mode);
3328 }
3329
3330 if (extinfo) {
3331 reserved_address_recursive = extinfo->flag & DL_EXT_RESERVED_ADDRESS_RECURSIVE;
3332 if (extinfo->flag & DL_EXT_RESERVED_ADDRESS) {
3333 reserved_address = true;
3334 reserved_params.start_addr = extinfo->reserved_addr;
3335 reserved_params.reserved_size = extinfo->reserved_size;
3336 reserved_params.must_use_reserved = true;
3337 reserved_params.reserved_address_recursive = reserved_address_recursive;
3338 } else if (extinfo->flag & DL_EXT_RESERVED_ADDRESS_HINT) {
3339 reserved_address = true;
3340 reserved_params.start_addr = extinfo->reserved_addr;
3341 reserved_params.reserved_size = extinfo->reserved_size;
3342 reserved_params.must_use_reserved = false;
3343 reserved_params.reserved_address_recursive = reserved_address_recursive;
3344 }
3345 }
3346
3347 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
3348 pthread_rwlock_wrlock(&lock);
3349 __inhibit_ptc();
3350 trace_marker_reset();
3351 trace_marker_begin(HITRACE_TAG_MUSL, "dlopen: ", file);
3352
3353 /* When namespace does not exist, use caller's namespce
3354 * and when caller does not exist, use default namespce. */
3355 caller = (struct dso *)addr2dso((size_t)caller_addr);
3356 ns = find_ns_by_name(namespace);
3357 if (!ns) ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3358
3359 p = 0;
3360 if (shutting_down) {
3361 error("Cannot dlopen while program is exiting.");
3362 goto end;
3363 }
3364 orig_tls_tail = tls_tail;
3365 orig_tls_cnt = tls_cnt;
3366 orig_tls_offset = tls_offset;
3367 orig_tls_align = tls_align;
3368 orig_lazy_head = lazy_head;
3369 orig_syms_tail = syms_tail;
3370 orig_tail = tail;
3371 noload = mode & RTLD_NOLOAD;
3372
3373 rtld_fail = &jb;
3374 if (setjmp(*rtld_fail)) {
3375 /* Clean up anything new that was (partially) loaded */
3376 revert_syms(orig_syms_tail);
3377 for (p=orig_tail->next; p; p=next) {
3378 next = p->next;
3379 while (p->td_index) {
3380 void *tmp = p->td_index->next;
3381 free(p->td_index);
3382 p->td_index = tmp;
3383 }
3384 free(p->funcdescs);
3385 if (p->rpath != p->rpath_orig)
3386 free(p->rpath);
3387 if (p->deps) {
3388 for (int i = 0; i < p->ndeps_direct; i++) {
3389 remove_dso_parent(p->deps[i], p);
3390 }
3391 }
3392 free(p->deps);
3393 dlclose_ns(p);
3394 unmap_library(p);
3395 if (p->parents) {
3396 free(p->parents);
3397 }
3398 free_reloc_can_search_dso(p);
3399 }
3400 for (p=orig_tail->next; p; p=next) {
3401 next = p->next;
3402 free(p);
3403 }
3404 free(ctor_queue);
3405 ctor_queue = 0;
3406 if (!orig_tls_tail) libc.tls_head = 0;
3407 tls_tail = orig_tls_tail;
3408 if (tls_tail) tls_tail->next = 0;
3409 tls_cnt = orig_tls_cnt;
3410 tls_offset = orig_tls_offset;
3411 tls_align = orig_tls_align;
3412 lazy_head = orig_lazy_head;
3413 tail = orig_tail;
3414 tail->next = 0;
3415 p = 0;
3416 goto end;
3417 } else {
3418 #ifdef LOAD_ORDER_RANDOMIZATION
3419 tasks = create_loadtasks();
3420 if (!tasks) {
3421 LD_LOGE("dlopen_impl create loadtasks failed");
3422 goto end;
3423 }
3424 task = create_loadtask(file, head, ns, true);
3425 if (!task) {
3426 LD_LOGE("dlopen_impl create loadtask failed");
3427 goto end;
3428 }
3429 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3430 if (!load_library_header(task)) {
3431 error(noload ?
3432 "Library %s is not already loaded" :
3433 "Error loading shared library %s: %m",
3434 file);
3435 LD_LOGE("dlopen_impl load library header failed for %{public}s", task->name);
3436 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3437 goto end;
3438 }
3439 if (reserved_address) {
3440 reserved_params.target = task->p;
3441 }
3442 }
3443 if (!task->p) {
3444 LD_LOGE("dlopen_impl load library failed for %{public}s", task->name);
3445 error(noload ?
3446 "Library %s is not already loaded" :
3447 "Error loading shared library %s: %m",
3448 file);
3449 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3450 goto end;
3451 }
3452 if (!task->isloaded) {
3453 is_task_appended = append_loadtasks(tasks, task);
3454 }
3455 preload_deps(task->p, tasks);
3456 unmap_preloaded_sections(tasks);
3457 if (!reserved_address_recursive) {
3458 shuffle_loadtasks(tasks);
3459 }
3460 run_loadtasks(tasks, reserved_address ? &reserved_params : NULL);
3461 p = task->p;
3462 if (!task->isloaded) {
3463 assign_tls(p);
3464 }
3465 if (!is_task_appended) {
3466 free_task(task);
3467 task = NULL;
3468 }
3469 free_loadtasks(tasks);
3470 tasks = NULL;
3471 #else
3472 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3473 p = load_library(file, head, ns, true, reserved_address ? &reserved_params : NULL);
3474 }
3475
3476 if (!p) {
3477 error(noload ?
3478 "Library %s is not already loaded" :
3479 "Error loading shared library %s: %m",
3480 file);
3481 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3482 goto end;
3483 }
3484 /* First load handling */
3485 load_deps(p, reserved_address && reserved_address_recursive ? &reserved_params : NULL);
3486 #endif
3487 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3488 extend_bfs_deps(p);
3489 pthread_mutex_lock(&init_fini_lock);
3490 int constructed = p->constructed;
3491 pthread_mutex_unlock(&init_fini_lock);
3492 if (!constructed) ctor_queue = queue_ctors(p);
3493 if (!p->relocated && (mode & RTLD_LAZY)) {
3494 prepare_lazy(p);
3495 for (i=0; p->deps[i]; i++)
3496 if (!p->deps[i]->relocated)
3497 prepare_lazy(p->deps[i]);
3498 }
3499 if (!p->relocated || (mode & RTLD_GLOBAL)) {
3500 /* Make new symbols global, at least temporarily, so we can do
3501 * relocations. If not RTLD_GLOBAL, this is reverted below. */
3502 add_syms(p);
3503 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of p, including p self. */
3504 p->is_reloc_head_so_dep = true;
3505 for (i=0; p->deps[i]; i++) {
3506 p->deps[i]->is_reloc_head_so_dep = true;
3507 add_syms(p->deps[i]);
3508 }
3509 }
3510 struct dso *reloc_head_so = p;
3511 trace_marker_begin(HITRACE_TAG_MUSL, "linking: entry so", p->name);
3512 if (!p->relocated) {
3513 reloc_all(p, extinfo);
3514 }
3515 trace_marker_end(HITRACE_TAG_MUSL);
3516 reloc_head_so->is_reloc_head_so_dep = false;
3517 for (size_t i=0; reloc_head_so->deps[i]; i++) {
3518 reloc_head_so->deps[i]->is_reloc_head_so_dep = false;
3519 }
3520
3521 /* If RTLD_GLOBAL was not specified, undo any new additions
3522 * to the global symbol table. This is a nop if the library was
3523 * previously loaded and already global. */
3524 if (!(mode & RTLD_GLOBAL))
3525 revert_syms(orig_syms_tail);
3526
3527 /* Processing of deferred lazy relocations must not happen until
3528 * the new libraries are committed; otherwise we could end up with
3529 * relocations resolved to symbol definitions that get removed. */
3530 redo_lazy_relocs();
3531
3532 if (map_dso_to_cfi_shadow(p) == CFI_FAILED) {
3533 error("[%s] map_dso_to_cfi_shadow failed: %m", __FUNCTION__);
3534 longjmp(*rtld_fail, 1);
3535 }
3536
3537 if (mode & RTLD_NODELETE) {
3538 p->flags |= DSO_FLAGS_NODELETE;
3539 }
3540
3541 update_tls_size();
3542 if (tls_cnt != orig_tls_cnt)
3543 install_new_tls();
3544
3545 if (orig_tail != tail) {
3546 notify_addition_to_debugger(orig_tail->next);
3547 }
3548
3549 orig_tail = tail;
3550
3551 p = dlopen_post(p, mode);
3552 end:
3553 #ifdef LOAD_ORDER_RANDOMIZATION
3554 if (!is_task_appended) {
3555 free_task(task);
3556 }
3557 free_loadtasks(tasks);
3558 #endif
3559 __release_ptc();
3560 if (p) gencnt++;
3561 pthread_rwlock_unlock(&lock);
3562 if (ctor_queue) {
3563 do_init_fini(ctor_queue);
3564 free(ctor_queue);
3565 }
3566 pthread_setcancelstate(cs, 0);
3567 trace_marker_end(HITRACE_TAG_MUSL); // "dlopen: " trace end.
3568 return p;
3569 }
3570
dlopen(const char * file,int mode)3571 void *dlopen(const char *file, int mode)
3572 {
3573 const void *caller_addr = __builtin_return_address(0);
3574 musl_log_reset();
3575 ld_log_reset();
3576 LD_LOGI("dlopen file:%{public}s, mode:%{public}x ,caller_addr:%{public}p .", file, mode, caller_addr);
3577 return dlopen_impl(file, mode, NULL, caller_addr, NULL);
3578 }
3579
dlns_init(Dl_namespace * dlns,const char * name)3580 void dlns_init(Dl_namespace *dlns, const char *name)
3581 {
3582 if (!dlns) {
3583 return;
3584 }
3585 if (!name) {
3586 dlns->name[0] = 0;
3587 return;
3588 }
3589
3590 const void *caller_addr = __builtin_return_address(0);
3591 if (is_permitted(caller_addr, name) == false) {
3592 return;
3593 }
3594
3595 snprintf(dlns->name, sizeof dlns->name, name);
3596 LD_LOGI("dlns_init dlns->name:%{public}s .", dlns->name);
3597 }
3598
dlns_get(const char * name,Dl_namespace * dlns)3599 int dlns_get(const char *name, Dl_namespace *dlns)
3600 {
3601 if (!dlns) {
3602 LD_LOGE("dlns_get dlns is null.");
3603 return EINVAL;
3604 }
3605 int ret = 0;
3606 ns_t *ns = NULL;
3607 pthread_rwlock_rdlock(&lock);
3608 if (!name) {
3609 struct dso *caller;
3610 const void *caller_addr = __builtin_return_address(0);
3611 caller = (struct dso *)addr2dso((size_t)caller_addr);
3612 ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3613 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3614 LD_LOGI("dlns_get name is null, current dlns dlns->name:%{public}s.", dlns->name);
3615 } else {
3616 ns = find_ns_by_name(name);
3617 if (ns) {
3618 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3619 LD_LOGI("dlns_get found ns, current dlns dlns->name:%{public}s.", dlns->name);
3620 } else {
3621 LD_LOGI("dlns_get not found ns! name:%{public}s.", name);
3622 ret = ENOKEY;
3623 }
3624 }
3625 pthread_rwlock_unlock(&lock);
3626 return ret;
3627 }
3628
dlopen_ns(Dl_namespace * dlns,const char * file,int mode)3629 void *dlopen_ns(Dl_namespace *dlns, const char *file, int mode)
3630 {
3631 const void *caller_addr = __builtin_return_address(0);
3632 if (is_permitted(caller_addr, dlns->name) == false) {
3633 return NULL;
3634 }
3635
3636 musl_log_reset();
3637 ld_log_reset();
3638 LD_LOGI("dlopen_ns file:%{public}s, mode:%{public}x , caller_addr:%{public}p , dlns->name:%{public}s.",
3639 file,
3640 mode,
3641 caller_addr,
3642 dlns ? dlns->name : "NULL");
3643 return dlopen_impl(file, mode, dlns->name, caller_addr, NULL);
3644 }
3645
dlopen_ns_ext(Dl_namespace * dlns,const char * file,int mode,const dl_extinfo * extinfo)3646 void *dlopen_ns_ext(Dl_namespace *dlns, const char *file, int mode, const dl_extinfo *extinfo)
3647 {
3648 const void *caller_addr = __builtin_return_address(0);
3649 if (is_permitted(caller_addr, dlns->name) == false) {
3650 return NULL;
3651 }
3652
3653 musl_log_reset();
3654 ld_log_reset();
3655 LD_LOGI("dlopen_ns_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , "
3656 "dlns->name:%{public}s. , extinfo->flag:%{public}x",
3657 file,
3658 mode,
3659 caller_addr,
3660 dlns->name,
3661 extinfo ? extinfo->flag : 0);
3662 return dlopen_impl(file, mode, dlns->name, caller_addr, extinfo);
3663 }
3664
dlns_create2(Dl_namespace * dlns,const char * lib_path,int flags)3665 int dlns_create2(Dl_namespace *dlns, const char *lib_path, int flags)
3666 {
3667 if (!dlns) {
3668 LD_LOGE("dlns_create2 dlns is null.");
3669 return EINVAL;
3670 }
3671 ns_t *ns;
3672
3673 pthread_rwlock_wrlock(&lock);
3674 const void *caller_addr = __builtin_return_address(0);
3675 if (is_permitted(caller_addr, dlns->name) == false) {
3676 pthread_rwlock_unlock(&lock);
3677 return EPERM;
3678 }
3679
3680 ns = find_ns_by_name(dlns->name);
3681 if (ns) {
3682 LD_LOGE("dlns_create2 ns is exist.");
3683 pthread_rwlock_unlock(&lock);
3684 return EEXIST;
3685 }
3686 ns = ns_alloc();
3687 if (!ns) {
3688 LD_LOGE("dlns_create2 no memery.");
3689 pthread_rwlock_unlock(&lock);
3690 return ENOMEM;
3691 }
3692 ns_set_name(ns, dlns->name);
3693 ns_set_flag(ns, flags);
3694 ns_add_dso(ns, get_default_ns()->ns_dsos->dsos[0]); /* add main app to this namespace*/
3695 nslist_add_ns(ns); /* add ns to list*/
3696 ns_set_lib_paths(ns, lib_path);
3697
3698 if ((flags & CREATE_INHERIT_DEFAULT) != 0) {
3699 ns_add_inherit(ns, get_default_ns(), NULL);
3700 }
3701
3702 if ((flags & CREATE_INHERIT_CURRENT) != 0) {
3703 struct dso *caller;
3704 caller_addr = __builtin_return_address(0);
3705 caller = (struct dso *)addr2dso((size_t)caller_addr);
3706 if (caller && caller->namespace) {
3707 ns_add_inherit(ns, caller->namespace, NULL);
3708 }
3709 }
3710
3711 LD_LOGI("dlns_create2:"
3712 "ns_name: %{public}s ,"
3713 "separated:%{public}d ,"
3714 "lib_paths:%{public}s ",
3715 ns->ns_name, ns->separated, ns->lib_paths);
3716 pthread_rwlock_unlock(&lock);
3717
3718 return 0;
3719 }
3720
dlns_create(Dl_namespace * dlns,const char * lib_path)3721 int dlns_create(Dl_namespace *dlns, const char *lib_path)
3722 {
3723 LD_LOGI("dlns_create lib_paths:%{public}s", lib_path);
3724 return dlns_create2(dlns, lib_path, CREATE_INHERIT_DEFAULT);
3725 }
3726
dlns_inherit(Dl_namespace * dlns,Dl_namespace * inherited,const char * shared_libs)3727 int dlns_inherit(Dl_namespace *dlns, Dl_namespace *inherited, const char *shared_libs)
3728 {
3729 if (!dlns || !inherited) {
3730 LD_LOGE("dlns_inherit dlns or inherited is null.");
3731 return EINVAL;
3732 }
3733
3734 pthread_rwlock_wrlock(&lock);
3735 const void *caller_addr = __builtin_return_address(0);
3736 if (is_permitted(caller_addr, dlns->name) == false) {
3737 pthread_rwlock_unlock(&lock);
3738 return EPERM;
3739 }
3740
3741 ns_t* ns = find_ns_by_name(dlns->name);
3742 ns_t* ns_inherited = find_ns_by_name(inherited->name);
3743 if (!ns || !ns_inherited) {
3744 LD_LOGE("dlns_inherit ns or ns_inherited is not found.");
3745 pthread_rwlock_unlock(&lock);
3746 return ENOKEY;
3747 }
3748 ns_add_inherit(ns, ns_inherited, shared_libs);
3749 pthread_rwlock_unlock(&lock);
3750
3751 return 0;
3752 }
3753
dlclose_ns(struct dso * p)3754 static void dlclose_ns(struct dso *p)
3755 {
3756 if (!p) return;
3757 ns_t * ns = p->namespace;
3758 if (!ns||!ns->ns_dsos) return;
3759 for (size_t i=0; i<ns->ns_dsos->num; i++) {
3760 if (p == ns->ns_dsos->dsos[i]) {
3761 for (size_t j=i+1; j<ns->ns_dsos->num; j++) {
3762 ns->ns_dsos->dsos[j-1] = ns->ns_dsos->dsos[j];
3763 }
3764 ns->ns_dsos->num--;
3765 return;
3766 }
3767 }
3768 }
3769
__dl_invalid_handle(void * h)3770 hidden int __dl_invalid_handle(void *h)
3771 {
3772 struct dso *p;
3773 for (p=head; p; p=p->next) if (h==p) return 0;
3774 error("Invalid library handle %p", (void *)h);
3775 return 1;
3776 }
3777
addr2dso(size_t a)3778 void *addr2dso(size_t a)
3779 {
3780 struct dso *p;
3781 size_t i;
3782 for (p=head; p; p=p->next) {
3783 if (a < p->map || a - (size_t)p->map >= p->map_len) continue;
3784 Phdr *ph = p->phdr;
3785 size_t phcnt = p->phnum;
3786 size_t entsz = p->phentsize;
3787 size_t base = (size_t)p->base;
3788 for (; phcnt--; ph=(void *)((char *)ph+entsz)) {
3789 if (ph->p_type != PT_LOAD) continue;
3790 if (a-base-ph->p_vaddr < ph->p_memsz)
3791 return p;
3792 }
3793 if (a-(size_t)p->map < p->map_len)
3794 return 0;
3795 }
3796 return 0;
3797 }
3798
do_dlsym(struct dso * p,const char * s,const char * v,void * ra)3799 static void *do_dlsym(struct dso *p, const char *s, const char *v, void *ra)
3800 {
3801 int use_deps = 0;
3802 bool ra2dso = false;
3803 ns_t *ns = NULL;
3804 struct dso *caller = NULL;
3805 if (p == head || p == RTLD_DEFAULT) {
3806 p = head;
3807 ra2dso = true;
3808 } else if (p == RTLD_NEXT) {
3809 p = addr2dso((size_t)ra);
3810 if (!p) p=head;
3811 p = p->next;
3812 ra2dso = true;
3813 #ifndef HANDLE_RANDOMIZATION
3814 } else if (__dl_invalid_handle(p)) {
3815 return 0;
3816 #endif
3817 } else {
3818 use_deps = 1;
3819 ns = p->namespace;
3820 }
3821 if (ra2dso) {
3822 caller = (struct dso *)addr2dso((size_t)ra);
3823 if (caller && caller->namespace) {
3824 ns = caller->namespace;
3825 }
3826 }
3827 trace_marker_begin(HITRACE_TAG_MUSL, "dlsym: ", (s == NULL ? "(NULL)" : s));
3828 struct verinfo verinfo = { .s = s, .v = v, .use_vna_hash = false };
3829 struct symdef def = use_deps ? find_sym_by_deps(p, &verinfo, 0, ns) :
3830 find_sym2(p, &verinfo, 0, use_deps, ns);
3831 trace_marker_end(HITRACE_TAG_MUSL);
3832 if (!def.sym) {
3833 LD_LOGE("do_dlsym failed: symbol not found. so=%{public}s s=%{public}s v=%{public}s", p->name, s, v);
3834 error("Symbol not found: %s, version: %s", s, strlen(v) > 0 ? v : "null");
3835 return 0;
3836 }
3837 if ((def.sym->st_info&0xf) == STT_TLS)
3838 return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET});
3839 if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC)
3840 return def.dso->funcdescs + (def.sym - def.dso->syms);
3841 return laddr(def.dso, def.sym->st_value);
3842 }
3843
3844 extern int invalidate_exit_funcs(struct dso *p);
3845
so_can_unload(struct dso * p,int check_flag)3846 static int so_can_unload(struct dso *p, int check_flag)
3847 {
3848 if ((check_flag & UNLOAD_COMMON_CHECK) != 0) {
3849 if (__dl_invalid_handle(p)) {
3850 LD_LOGE("[dlclose]: invalid handle %{public}p", p);
3851 error("[dlclose]: Handle is invalid.");
3852 return 0;
3853 }
3854
3855 if (!p->by_dlopen) {
3856 LD_LOGD("[dlclose]: skip unload %{public}s because it's not loaded by dlopen", p->name);
3857 return 0;
3858 }
3859
3860 /* dso is marked as RTLD_NODELETE library, do nothing here. */
3861 if ((p->flags & DSO_FLAGS_NODELETE) != 0) {
3862 LD_LOGD("[dlclose]: skip unload %{public}s because flags is RTLD_NODELETE", p->name);
3863 return 0;
3864 }
3865 }
3866
3867 if ((check_flag & UNLOAD_NR_DLOPEN_CHECK) != 0) {
3868 if (p->nr_dlopen > 0) {
3869 LD_LOGD("[dlclose]: skip unload %{public}s because nr_dlopen=%{public}d > 0", p->name, p->nr_dlopen);
3870 return 0;
3871 }
3872 }
3873
3874 return 1;
3875 }
3876
dlclose_post(struct dso * p)3877 static int dlclose_post(struct dso *p)
3878 {
3879 if (p == NULL) {
3880 return -1;
3881 }
3882 #ifdef ENABLE_HWASAN
3883 if (libc.unload_hook) {
3884 libc.unload_hook((unsigned long int)p->base, p->phdr, p->phnum);
3885 }
3886 #endif
3887 unmap_library(p);
3888 if (p->parents) {
3889 free(p->parents);
3890 }
3891 free_reloc_can_search_dso(p);
3892 if (p->tls.size == 0) {
3893 free(p);
3894 }
3895
3896 return 0;
3897 }
3898
dlclose_impl(struct dso * p)3899 static int dlclose_impl(struct dso *p)
3900 {
3901 struct dso *d;
3902
3903 trace_marker_reset();
3904 trace_marker_begin(HITRACE_TAG_MUSL, "dlclose", p->name);
3905
3906 /* remove dso symbols from global list */
3907 if (p->syms_next) {
3908 for (d = head; d->syms_next != p; d = d->syms_next)
3909 ; /* NOP */
3910 d->syms_next = p->syms_next;
3911 } else if (p == syms_tail) {
3912 for (d = head; d->syms_next != p; d = d->syms_next)
3913 ; /* NOP */
3914 d->syms_next = NULL;
3915 syms_tail = d;
3916 }
3917
3918 /* remove dso from lazy list if needed */
3919 if (p == lazy_head) {
3920 lazy_head = p->lazy_next;
3921 } else if (p->lazy_next) {
3922 for (d = lazy_head; d->lazy_next != p; d = d->lazy_next)
3923 ; /* NOP */
3924 d->lazy_next = p->lazy_next;
3925 }
3926
3927 pthread_mutex_lock(&init_fini_lock);
3928 /* remove dso from fini list */
3929 if (p == fini_head) {
3930 fini_head = p->fini_next;
3931 } else if (p->fini_next) {
3932 for (d = fini_head; d->fini_next != p; d = d->fini_next)
3933 ; /* NOP */
3934 d->fini_next = p->fini_next;
3935 }
3936 pthread_mutex_unlock(&init_fini_lock);
3937
3938 /* empty tls image */
3939 if (p->tls.size != 0) {
3940 p->tls.image = NULL;
3941 }
3942
3943 /* remove dso from global dso list */
3944 if (p == tail) {
3945 tail = p->prev;
3946 tail->next = NULL;
3947 } else {
3948 p->next->prev = p->prev;
3949 p->prev->next = p->next;
3950 }
3951
3952 /* remove dso from namespace */
3953 dlclose_ns(p);
3954
3955 /* */
3956 void* handle = find_handle_by_dso(p);
3957 if (handle) {
3958 remove_handle_node(handle);
3959 }
3960
3961 /* after destruct, invalidate atexit funcs which belong to this dso */
3962 #if (defined(FEATURE_ATEXIT_CB_PROTECT))
3963 invalidate_exit_funcs(p);
3964 #endif
3965
3966 notify_remove_to_debugger(p);
3967
3968 unmap_dso_from_cfi_shadow(p);
3969
3970 if (p->lazy != NULL)
3971 free(p->lazy);
3972 if (p->deps != no_deps)
3973 free(p->deps);
3974
3975 trace_marker_end(HITRACE_TAG_MUSL);
3976
3977 return 0;
3978 }
3979
do_dlclose(struct dso * p)3980 static int do_dlclose(struct dso *p)
3981 {
3982 struct dso_entry *ef = NULL;
3983 struct dso_entry *ef_tmp = NULL;
3984 size_t n;
3985 int unload_check_result;
3986 TAILQ_HEAD(unload_queue, dso_entry) unload_queue;
3987 TAILQ_HEAD(need_unload_queue, dso_entry) need_unload_queue;
3988 unload_check_result = so_can_unload(p, UNLOAD_COMMON_CHECK);
3989 if (unload_check_result != 1) {
3990 return unload_check_result;
3991 }
3992 // Unconditionally subtract 1 because unconditionally add 1 at dlopen_post.
3993 if (p->nr_dlopen > 0) {
3994 --(p->nr_dlopen);
3995 } else {
3996 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
3997 p->name, p->name);
3998 return 0;
3999 }
4000
4001 if (p->bfs_built) {
4002 for (int i = 0; p->deps[i]; i++) {
4003 if (p->deps[i]->nr_dlopen > 0) {
4004 p->deps[i]->nr_dlopen--;
4005 } else {
4006 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4007 p->deps[i]->name, p->name);
4008 return 0;
4009 }
4010 }
4011 }
4012 unload_check_result = so_can_unload(p, UNLOAD_NR_DLOPEN_CHECK);
4013 if (unload_check_result != 1) {
4014 return unload_check_result;
4015 }
4016 TAILQ_INIT(&unload_queue);
4017 TAILQ_INIT(&need_unload_queue);
4018 struct dso_entry *start_entry = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4019 start_entry->dso = p;
4020 TAILQ_INSERT_TAIL(&unload_queue, start_entry, entries);
4021
4022 while (!TAILQ_EMPTY(&unload_queue)) {
4023 struct dso_entry *ecur = TAILQ_FIRST(&unload_queue);
4024 struct dso *cur = ecur->dso;
4025 TAILQ_REMOVE(&unload_queue, ecur, entries);
4026 bool already_in_need_unload_queue = false;
4027 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4028 if (ef->dso == cur) {
4029 already_in_need_unload_queue = true;
4030 break;
4031 }
4032 }
4033 if (already_in_need_unload_queue) {
4034 continue;
4035 }
4036 TAILQ_INSERT_TAIL(&need_unload_queue, ecur, entries);
4037 for (int i = 0; i < cur->ndeps_direct; i++) {
4038 remove_dso_parent(cur->deps[i], cur);
4039 if ((cur->deps[i]->parents_count == 0) && (so_can_unload(cur->deps[i], UNLOAD_ALL_CHECK) == 1)) {
4040 bool already_in_unload_queue = false;
4041 TAILQ_FOREACH(ef, &unload_queue, entries) {
4042 if (ef->dso == cur->deps[i]) {
4043 already_in_unload_queue = true;
4044 break;
4045 }
4046 }
4047 if (already_in_unload_queue) {
4048 continue;
4049 }
4050
4051 struct dso_entry *edeps = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4052 edeps->dso = cur->deps[i];
4053 TAILQ_INSERT_TAIL(&unload_queue, edeps, entries);
4054 }
4055 } /* for */
4056 } /* while */
4057
4058 if (is_dlclose_debug_enable()) {
4059 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4060 LD_LOGE("[dlclose]: unload %{public}s succeed when dlclose %{public}s", ef->dso->name, p->name);
4061 }
4062 for (size_t deps_num = 0; p->deps[deps_num]; deps_num++) {
4063 bool ready_to_unload = false;
4064 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4065 if (ef->dso == p->deps[deps_num]) {
4066 ready_to_unload = true;
4067 break;
4068 }
4069 }
4070 if (!ready_to_unload) {
4071 LD_LOGE("[dlclose]: unload %{public}s failed when dlclose %{public}s,"
4072 "nr_dlopen:%{public}d, by_dlopen:%{public}d, parents_count:%{public}d",
4073 p->deps[deps_num]->name, p->name, p->deps[deps_num]->nr_dlopen,
4074 p->deps[deps_num]->by_dlopen, p->deps[deps_num]->parents_count);
4075 }
4076 }
4077 }
4078
4079 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4080 dlclose_impl(ef->dso);
4081 }
4082
4083 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4084 /* call destructors if needed */
4085 pthread_mutex_lock(&init_fini_lock);
4086 int constructed = ef->dso->constructed;
4087 pthread_mutex_unlock(&init_fini_lock);
4088
4089 if (constructed) {
4090 size_t dyn[DYN_CNT];
4091 decode_vec(ef->dso->dynv, dyn, DYN_CNT);
4092 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
4093 n = dyn[DT_FINI_ARRAYSZ] / sizeof(size_t);
4094 size_t *fn = (size_t *)laddr(ef->dso, dyn[DT_FINI_ARRAY]) + n;
4095 trace_marker_begin(HITRACE_TAG_MUSL, "calling destructors:", ef->dso->name);
4096
4097 pthread_rwlock_unlock(&lock);
4098 while (n--)
4099 ((void (*)(void))*--fn)();
4100 pthread_rwlock_wrlock(&lock);
4101
4102 trace_marker_end(HITRACE_TAG_MUSL);
4103 }
4104 pthread_mutex_lock(&init_fini_lock);
4105 ef->dso->constructed = 0;
4106 pthread_mutex_unlock(&init_fini_lock);
4107 }
4108 }
4109 // Unload all sos at the end because weak symbol may cause later unloaded so to access the previous so's function.
4110 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4111 dlclose_post(ef->dso);
4112 }
4113 // Free dso_entry.
4114 TAILQ_FOREACH_SAFE(ef, &need_unload_queue, entries, ef_tmp) {
4115 if (ef) {
4116 free(ef);
4117 }
4118 }
4119
4120 return 0;
4121 }
4122
__dlclose(void * p)4123 hidden int __dlclose(void *p)
4124 {
4125 int rc;
4126 pthread_rwlock_wrlock(&lock);
4127 #ifdef HANDLE_RANDOMIZATION
4128 struct dso *dso = find_dso_by_handle(p);
4129 if (dso == NULL) {
4130 errno = EINVAL;
4131 error("Handle is invalid.");
4132 LD_LOGE("Handle is not find.");
4133 pthread_rwlock_unlock(&lock);
4134 return -1;
4135 }
4136 rc = do_dlclose(dso);
4137 #else
4138 rc = do_dlclose(p);
4139 #endif
4140 pthread_rwlock_unlock(&lock);
4141 return rc;
4142 }
4143
sym_is_matched(const Sym * sym,size_t addr_offset_so)4144 static inline int sym_is_matched(const Sym* sym, size_t addr_offset_so) {
4145 return sym->st_value &&
4146 (1<<(sym->st_info&0xf) != STT_TLS) &&
4147 (addr_offset_so >= sym->st_value) &&
4148 (addr_offset_so < sym->st_value + sym->st_size);
4149 }
4150
find_addr_by_elf(size_t addr_offset_so,struct dso * p)4151 static inline Sym* find_addr_by_elf(size_t addr_offset_so, struct dso *p) {
4152 uint32_t nsym = p->hashtab[1];
4153 Sym *sym = p->syms;
4154 for (; nsym; nsym--, sym++) {
4155 if (sym_is_matched(sym, addr_offset_so)) {
4156 return sym;
4157 }
4158 }
4159
4160 return NULL;
4161 }
4162
find_addr_by_gnu(size_t addr_offset_so,struct dso * p)4163 static inline Sym* find_addr_by_gnu(size_t addr_offset_so, struct dso *p) {
4164
4165 size_t i, nsym, first_hash_sym_index;
4166 uint32_t *hashval;
4167 Sym *sym_tab = p->syms;
4168 uint32_t *buckets= p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
4169 // Points to the first defined symbol, all symbols before it are undefined.
4170 first_hash_sym_index = buckets[0];
4171 Sym *sym = &sym_tab[first_hash_sym_index];
4172
4173 // Get the location pointed by the last bucket.
4174 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
4175 if (buckets[i] > nsym)
4176 nsym = buckets[i];
4177 }
4178
4179 for (i = first_hash_sym_index; i < nsym; i++) {
4180 if (sym_is_matched(sym, addr_offset_so)) {
4181 return sym;
4182 }
4183 sym++;
4184 }
4185
4186 // Start traversing the hash list from the position pointed to by the last bucket.
4187 if (nsym) {
4188 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
4189 do {
4190 nsym++;
4191 if (sym_is_matched(sym, addr_offset_so)) {
4192 return sym;
4193 }
4194 sym++;
4195 }
4196 while (!(*hashval++ & 1));
4197 }
4198
4199 return NULL;
4200 }
4201
4202
dladdr(const void * addr_arg,Dl_info * info)4203 int dladdr(const void *addr_arg, Dl_info *info)
4204 {
4205 size_t addr = (size_t)addr_arg;
4206 struct dso *p;
4207 Sym *match_sym = NULL;
4208 char *strings;
4209
4210 pthread_rwlock_rdlock(&lock);
4211 p = addr2dso(addr);
4212 pthread_rwlock_unlock(&lock);
4213
4214 if (!p) return 0;
4215
4216 strings = p->strings;
4217 size_t addr_offset_so = addr - (size_t)p->base;
4218
4219 info->dli_fname = p->name;
4220 info->dli_fbase = p->map;
4221
4222 if (p->ghashtab) {
4223 match_sym = find_addr_by_gnu(addr_offset_so, p);
4224
4225 } else {
4226 match_sym = find_addr_by_elf(addr_offset_so, p);
4227 }
4228
4229 if (!match_sym) {
4230 info->dli_sname = 0;
4231 info->dli_saddr = 0;
4232 return 1;
4233 }
4234 info->dli_sname = strings + match_sym->st_name;
4235 info->dli_saddr = (void *)laddr(p, match_sym->st_value);
4236 return 1;
4237 }
4238
__dlsym(void * restrict p,const char * restrict s,void * restrict ra)4239 hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
4240 {
4241 void *res;
4242 pthread_rwlock_rdlock(&lock);
4243 #ifdef HANDLE_RANDOMIZATION
4244 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4245 struct dso *dso = find_dso_by_handle(p);
4246 if (dso == NULL) {
4247 pthread_rwlock_unlock(&lock);
4248 return 0;
4249 }
4250 res = do_dlsym(dso, s, "", ra);
4251 } else {
4252 res = do_dlsym(p, s, "", ra);
4253 }
4254 #else
4255 res = do_dlsym(p, s, "", ra);
4256 #endif
4257 pthread_rwlock_unlock(&lock);
4258 return res;
4259 }
4260
__dlvsym(void * restrict p,const char * restrict s,const char * restrict v,void * restrict ra)4261 hidden void *__dlvsym(void *restrict p, const char *restrict s, const char *restrict v, void *restrict ra)
4262 {
4263 void *res;
4264 pthread_rwlock_rdlock(&lock);
4265 #ifdef HANDLE_RANDOMIZATION
4266 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4267 struct dso *dso = find_dso_by_handle(p);
4268 if (dso == NULL) {
4269 pthread_rwlock_unlock(&lock);
4270 return 0;
4271 }
4272 res = do_dlsym(dso, s, v, ra);
4273 } else {
4274 res = do_dlsym(p, s, v, ra);
4275 }
4276 #else
4277 res = do_dlsym(p, s, v, ra);
4278 #endif
4279 pthread_rwlock_unlock(&lock);
4280 return res;
4281 }
4282
__dlsym_redir_time64(void * restrict p,const char * restrict s,void * restrict ra)4283 hidden void *__dlsym_redir_time64(void *restrict p, const char *restrict s, void *restrict ra)
4284 {
4285 #if _REDIR_TIME64
4286 const char *suffix, *suffix2 = "";
4287 char redir[36];
4288
4289 /* Map the symbol name to a time64 version of itself according to the
4290 * pattern used for naming the redirected time64 symbols. */
4291 size_t l = strnlen(s, sizeof redir);
4292 if (l<4 || l==sizeof redir) goto no_redir;
4293 if (s[l-2]=='_' && s[l-1]=='r') {
4294 l -= 2;
4295 suffix2 = s+l;
4296 }
4297 if (l<4) goto no_redir;
4298 if (!strcmp(s+l-4, "time")) suffix = "64";
4299 else suffix = "_time64";
4300
4301 /* Use the presence of the remapped symbol name in libc to determine
4302 * whether it's one that requires time64 redirection; replace if so. */
4303 snprintf(redir, sizeof redir, "__%.*s%s%s", (int)l, s, suffix, suffix2);
4304 if (find_sym(&ldso, redir, 1).sym) s = redir;
4305 no_redir:
4306 #endif
4307 return __dlsym(p, s, ra);
4308 }
4309
dl_iterate_phdr(int (* callback)(struct dl_phdr_info * info,size_t size,void * data),void * data)4310 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
4311 {
4312 struct dso *current;
4313 struct dl_phdr_info info;
4314 int ret = 0;
4315 for(current = head; current;) {
4316 info.dlpi_addr = (uintptr_t)current->base;
4317 info.dlpi_name = current->name;
4318 info.dlpi_phdr = current->phdr;
4319 info.dlpi_phnum = current->phnum;
4320 info.dlpi_adds = gencnt;
4321 info.dlpi_subs = 0;
4322 info.dlpi_tls_modid = current->tls_id;
4323 info.dlpi_tls_data = !current->tls_id ? 0 :
4324 __tls_get_addr((tls_mod_off_t[]){current->tls_id,0});
4325
4326 ret = (callback)(&info, sizeof (info), data);
4327
4328 if (ret != 0) break;
4329
4330 pthread_rwlock_rdlock(&lock);
4331 current = current->next;
4332 pthread_rwlock_unlock(&lock);
4333 }
4334 return ret;
4335 }
4336
error(const char * fmt,...)4337 static void error(const char *fmt, ...)
4338 {
4339 va_list ap;
4340 va_start(ap, fmt);
4341 if (!runtime) {
4342 vdprintf(2, fmt, ap);
4343 dprintf(2, "\n");
4344 ldso_fail = 1;
4345 va_end(ap);
4346 return;
4347 }
4348 __dl_vseterr(fmt, ap);
4349 va_end(ap);
4350 }
4351
dlns_set_namespace_lib_path(const char * name,const char * lib_path)4352 int dlns_set_namespace_lib_path(const char * name, const char * lib_path)
4353 {
4354 if (!name || !lib_path) {
4355 LD_LOGE("dlns_set_namespace_lib_path name or lib_path is null.");
4356 return EINVAL;
4357 }
4358
4359 pthread_rwlock_wrlock(&lock);
4360 const void *caller_addr = __builtin_return_address(0);
4361 if (is_permitted(caller_addr, name) == false) {
4362 pthread_rwlock_unlock(&lock);
4363 return EPERM;
4364 }
4365
4366 ns_t* ns = find_ns_by_name(name);
4367 if (!ns) {
4368 pthread_rwlock_unlock(&lock);
4369 LD_LOGE("dlns_set_namespace_lib_path fail, input ns name : [%{public}s] is not found.", name);
4370 return ENOKEY;
4371 }
4372
4373 ns_set_lib_paths(ns, lib_path);
4374 pthread_rwlock_unlock(&lock);
4375 return 0;
4376 }
4377
dlns_set_namespace_separated(const char * name,const bool separated)4378 int dlns_set_namespace_separated(const char * name, const bool separated)
4379 {
4380 if (!name) {
4381 LD_LOGE("dlns_set_namespace_separated name is null.");
4382 return EINVAL;
4383 }
4384
4385 pthread_rwlock_wrlock(&lock);
4386 const void *caller_addr = __builtin_return_address(0);
4387 if (is_permitted(caller_addr, name) == false) {
4388 pthread_rwlock_unlock(&lock);
4389 return EPERM;
4390 }
4391
4392 ns_t* ns = find_ns_by_name(name);
4393 if (!ns) {
4394 pthread_rwlock_unlock(&lock);
4395 LD_LOGE("dlns_set_namespace_separated fail, input ns name : [%{public}s] is not found.", name);
4396 return ENOKEY;
4397 }
4398
4399 ns_set_separated(ns, separated);
4400 pthread_rwlock_unlock(&lock);
4401 return 0;
4402 }
4403
dlns_set_namespace_permitted_paths(const char * name,const char * permitted_paths)4404 int dlns_set_namespace_permitted_paths(const char * name, const char * permitted_paths)
4405 {
4406 if (!name || !permitted_paths) {
4407 LD_LOGE("dlns_set_namespace_permitted_paths name or permitted_paths is null.");
4408 return EINVAL;
4409 }
4410
4411 pthread_rwlock_wrlock(&lock);
4412 const void *caller_addr = __builtin_return_address(0);
4413 if (is_permitted(caller_addr, name) == false) {
4414 pthread_rwlock_unlock(&lock);
4415 return EPERM;
4416 }
4417
4418 ns_t* ns = find_ns_by_name(name);
4419 if (!ns) {
4420 pthread_rwlock_unlock(&lock);
4421 LD_LOGE("dlns_set_namespace_permitted_paths fail, input ns name : [%{public}s] is not found.", name);
4422 return ENOKEY;
4423 }
4424
4425 ns_set_permitted_paths(ns, permitted_paths);
4426 pthread_rwlock_unlock(&lock);
4427 return 0;
4428 }
4429
dlns_set_namespace_allowed_libs(const char * name,const char * allowed_libs)4430 int dlns_set_namespace_allowed_libs(const char * name, const char * allowed_libs)
4431 {
4432 if (!name || !allowed_libs) {
4433 LD_LOGE("dlns_set_namespace_allowed_libs name or allowed_libs is null.");
4434 return EINVAL;
4435 }
4436
4437 pthread_rwlock_wrlock(&lock);
4438 const void *caller_addr = __builtin_return_address(0);
4439 if (is_permitted(caller_addr, name) == false) {
4440 pthread_rwlock_unlock(&lock);
4441 return EPERM;
4442 }
4443
4444 ns_t* ns = find_ns_by_name(name);
4445 if (!ns) {
4446 pthread_rwlock_unlock(&lock);
4447 LD_LOGE("dlns_set_namespace_allowed_libs fail, input ns name : [%{public}s] is not found.", name);
4448 return ENOKEY;
4449 }
4450
4451 ns_set_allowed_libs(ns, allowed_libs);
4452 pthread_rwlock_unlock(&lock);
4453 return 0;
4454 }
4455
handle_asan_path_open(int fd,const char * name,ns_t * namespace,char * buf,size_t buf_size)4456 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size)
4457 {
4458 LD_LOGD("handle_asan_path_open fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4459 fd,
4460 name,
4461 namespace ? namespace->ns_name : "NULL");
4462 int fd_tmp = fd;
4463 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4464 if (namespace->lib_paths && namespace->asan_lib_paths) {
4465 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4466 char *new_lib_paths = malloc(newlen);
4467 memset(new_lib_paths, 0, newlen);
4468 strcpy(new_lib_paths, namespace->asan_lib_paths);
4469 strcat(new_lib_paths, ":");
4470 strcat(new_lib_paths, namespace->lib_paths);
4471 fd_tmp = path_open(name, new_lib_paths, buf, buf_size);
4472 LD_LOGD("handle_asan_path_open path_open new_lib_paths:%{public}s ,fd: %{public}d.", new_lib_paths, fd_tmp);
4473 free(new_lib_paths);
4474 } else if (namespace->asan_lib_paths) {
4475 fd_tmp = path_open(name, namespace->asan_lib_paths, buf, buf_size);
4476 LD_LOGD("handle_asan_path_open path_open asan_lib_paths:%{public}s ,fd: %{public}d.",
4477 namespace->asan_lib_paths,
4478 fd_tmp);
4479 } else {
4480 fd_tmp = path_open(name, namespace->lib_paths, buf, buf_size);
4481 LD_LOGD(
4482 "handle_asan_path_open path_open lib_paths:%{public}s ,fd: %{public}d.", namespace->lib_paths, fd_tmp);
4483 }
4484 }
4485 return fd_tmp;
4486 }
4487
dlopen_ext(const char * file,int mode,const dl_extinfo * extinfo)4488 void* dlopen_ext(const char *file, int mode, const dl_extinfo *extinfo)
4489 {
4490 const void *caller_addr = __builtin_return_address(0);
4491 musl_log_reset();
4492 ld_log_reset();
4493 if (extinfo != NULL) {
4494 if ((extinfo->flag & ~(DL_EXT_VALID_FLAG_BITS)) != 0) {
4495 LD_LOGE("Error dlopen_ext %{public}s: invalid flag %{public}x", file, extinfo->flag);
4496 return NULL;
4497 }
4498 }
4499 LD_LOGI("dlopen_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , extinfo->flag:%{public}x",
4500 file,
4501 mode,
4502 caller_addr,
4503 extinfo ? extinfo->flag : 0);
4504 return dlopen_impl(file, mode, NULL, caller_addr, extinfo);
4505 }
4506
4507 #ifdef LOAD_ORDER_RANDOMIZATION
open_library_by_path(const char * name,const char * s,struct loadtask * task,struct zip_info * z_info)4508 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info)
4509 {
4510 char *buf = task->buf;
4511 size_t buf_size = sizeof task->buf;
4512 size_t l;
4513 for (;;) {
4514 s += strspn(s, ":\n");
4515 l = strcspn(s, ":\n");
4516 if (l-1 >= INT_MAX) return;
4517 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
4518 char *separator = strstr(buf, ZIP_FILE_PATH_SEPARATOR);
4519 if (separator != NULL) {
4520 int res = open_uncompressed_library_in_zipfile(buf, z_info, separator);
4521 if (res == 0) {
4522 task->fd = z_info->fd;
4523 task->file_offset = z_info->file_offset;
4524 break;
4525 } else {
4526 memset(z_info->path_buf, 0, sizeof(z_info->path_buf));
4527 }
4528 } else {
4529 if ((task->fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) break;
4530 }
4531 }
4532 s += l;
4533 }
4534 return;
4535 }
4536
handle_asan_path_open_by_task(int fd,const char * name,ns_t * namespace,struct loadtask * task,struct zip_info * z_info)4537 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task,
4538 struct zip_info *z_info)
4539 {
4540 LD_LOGD("handle_asan_path_open_by_task fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4541 fd,
4542 name,
4543 namespace ? namespace->ns_name : "NULL");
4544 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4545 if (namespace->lib_paths && namespace->asan_lib_paths) {
4546 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4547 char *new_lib_paths = malloc(newlen);
4548 memset(new_lib_paths, 0, newlen);
4549 strcpy(new_lib_paths, namespace->asan_lib_paths);
4550 strcat(new_lib_paths, ":");
4551 strcat(new_lib_paths, namespace->lib_paths);
4552 open_library_by_path(name, new_lib_paths, task, z_info);
4553 LD_LOGD("handle_asan_path_open_by_task open_library_by_path new_lib_paths:%{public}s ,fd: %{public}d.",
4554 new_lib_paths,
4555 task->fd);
4556 free(new_lib_paths);
4557 } else if (namespace->asan_lib_paths) {
4558 open_library_by_path(name, namespace->asan_lib_paths, task, z_info);
4559 LD_LOGD("handle_asan_path_open_by_task open_library_by_path asan_lib_paths:%{public}s ,fd: %{public}d.",
4560 namespace->asan_lib_paths,
4561 task->fd);
4562 } else {
4563 open_library_by_path(name, namespace->lib_paths, task, z_info);
4564 LD_LOGD("handle_asan_path_open_by_task open_library_by_path lib_paths:%{public}s ,fd: %{public}d.",
4565 namespace->lib_paths,
4566 task->fd);
4567 }
4568 }
4569 return;
4570 }
4571
4572 /* Used to get an uncompress library offset in zip file, then we can use the offset to mmap the library directly. */
open_uncompressed_library_in_zipfile(const char * path,struct zip_info * z_info,char * separator)4573 int open_uncompressed_library_in_zipfile(const char *path, struct zip_info *z_info, char *separator)
4574 {
4575 struct local_file_header zip_file_header;
4576 struct central_dir_entry c_dir_entry;
4577 struct zip_end_locator end_locator;
4578
4579 /* Use "'!/' to split the path into zipfile path and library path in zipfile.
4580 * For example:
4581 * - path: x/xx/xxx.zip!/x/xx/xxx.so
4582 * - zipfile path: x/xx/xxx.zip
4583 * - library path in zipfile: x/xx/xxx.so */
4584 if (strlcpy(z_info->path_buf, path, PATH_BUF_SIZE) >= PATH_BUF_SIZE) {
4585 LD_LOGE("Open uncompressed library: input path %{public}s is too long.", path);
4586 return -1;
4587 }
4588 z_info->path_buf[separator - path] = '\0';
4589 z_info->file_path_index = separator - path + 2;
4590 char *zip_file_path = z_info->path_buf;
4591 char *lib_path = &z_info->path_buf[z_info->file_path_index];
4592 if (zip_file_path == NULL || lib_path == NULL) {
4593 LD_LOGE("Open uncompressed library: get zip and lib path failed.");
4594 return -1;
4595 }
4596 LD_LOGD("Open uncompressed library: input path: %{public}s, zip file path: %{public}s, library path: %{public}s.",
4597 path, zip_file_path, lib_path);
4598
4599 // Get zip file length
4600 FILE *zip_file = fopen(zip_file_path, "re");
4601 if (zip_file == NULL) {
4602 LD_LOGE("Open uncompressed library: fopen %{public}s failed.", zip_file_path);
4603 return -1;
4604 }
4605 if (fseek(zip_file, 0, SEEK_END) != 0) {
4606 LD_LOGE("Open uncompressed library: fseek SEEK_END failed.");
4607 fclose(zip_file);
4608 return -1;
4609 }
4610 int64_t zip_file_len = ftell(zip_file);
4611 if (zip_file_len == -1) {
4612 LD_LOGE("Open uncompressed library: get zip file length failed.");
4613 fclose(zip_file);
4614 return -1;
4615 }
4616
4617 // Read end of central directory record.
4618 size_t end_locator_len = sizeof(end_locator);
4619 size_t end_locator_pos = zip_file_len - end_locator_len;
4620 if (fseek(zip_file, end_locator_pos, SEEK_SET) != 0) {
4621 LD_LOGE("Open uncompressed library: fseek end locator position failed.");
4622 fclose(zip_file);
4623 return -1;
4624 }
4625 if (fread(&end_locator, sizeof(end_locator), 1, zip_file) != 1 || end_locator.signature != EOCD_SIGNATURE) {
4626 LD_LOGE("Open uncompressed library: fread end locator failed.");
4627 fclose(zip_file);
4628 return -1;
4629 }
4630
4631 char file_name[PATH_BUF_SIZE];
4632 uint64_t current_dir_pos = end_locator.offset;
4633 for (uint16_t i = 0; i < end_locator.total_entries; i++) {
4634 // Read central dir entry.
4635 if (fseek(zip_file, current_dir_pos, SEEK_SET) != 0) {
4636 LD_LOGE("Open uncompressed library: fseek current centra dir entry position failed.");
4637 fclose(zip_file);
4638 return -1;
4639 }
4640 if (fread(&c_dir_entry, sizeof(c_dir_entry), 1, zip_file) != 1 || c_dir_entry.signature != CENTRAL_SIGNATURE) {
4641 LD_LOGE("Open uncompressed library: fread centra dir entry failed.");
4642 fclose(zip_file);
4643 return -1;
4644 }
4645
4646 if (fread(file_name, c_dir_entry.name_size, 1, zip_file) != 1) {
4647 LD_LOGE("Open uncompressed library: fread file name failed.");
4648 fclose(zip_file);
4649 return -1;
4650 }
4651 if (strcmp(file_name, lib_path) == 0) {
4652 // Read local file header.
4653 if (fseek(zip_file, c_dir_entry.local_header_offset, SEEK_SET) != 0) {
4654 LD_LOGE("Open uncompressed library: fseek local file header failed.");
4655 fclose(zip_file);
4656 return -1;
4657 }
4658 if (fread(&zip_file_header, sizeof(zip_file_header), 1, zip_file) != 1) {
4659 LD_LOGE("Open uncompressed library: fread local file header failed.");
4660 fclose(zip_file);
4661 return -1;
4662 }
4663 if (zip_file_header.signature != LOCAL_FILE_HEADER_SIGNATURE) {
4664 LD_LOGE("Open uncompressed library: read local file header signature error.");
4665 fclose(zip_file);
4666 return -1;
4667 }
4668
4669 z_info->file_offset = c_dir_entry.local_header_offset + sizeof(zip_file_header) +
4670 zip_file_header.name_size + zip_file_header.extra_size;
4671 if (zip_file_header.compression_method != COMPRESS_STORED || z_info->file_offset % PAGE_SIZE != 0) {
4672 LD_LOGE("Open uncompressed library: open %{public}s in %{public}s failed because of misalignment or saved with compression."
4673 "compress method %{public}d, file offset %{public}lu",
4674 lib_path, zip_file_path, zip_file_header.compression_method, z_info->file_offset);
4675 fclose(zip_file);
4676 return -2;
4677 }
4678 z_info->found = true;
4679 break;
4680 }
4681
4682 memset(file_name, 0, sizeof(file_name));
4683 current_dir_pos += sizeof(c_dir_entry);
4684 current_dir_pos += c_dir_entry.name_size + c_dir_entry.extra_size + c_dir_entry.comment_size;
4685 }
4686 if(!z_info->found) {
4687 LD_LOGE("Open uncompressed library: %{public}s was not found in %{public}s.", lib_path, zip_file_path);
4688 fclose(zip_file);
4689 return -3;
4690 }
4691 z_info->fd = fileno(zip_file);
4692
4693 return 0;
4694 }
4695
map_library_header(struct loadtask * task)4696 static bool map_library_header(struct loadtask *task)
4697 {
4698 off_t off_start;
4699 Phdr *ph;
4700 size_t i;
4701 size_t str_size;
4702 off_t str_table;
4703
4704 ssize_t l = pread(task->fd, task->ehdr_buf, sizeof task->ehdr_buf, task->file_offset);
4705 task->eh = task->ehdr_buf;
4706 if (l < 0) {
4707 LD_LOGE("Error mapping header %{public}s: failed to read fd", task->name);
4708 return false;
4709 }
4710 if (l < sizeof(Ehdr) || (task->eh->e_type != ET_DYN && task->eh->e_type != ET_EXEC)) {
4711 LD_LOGE("Error mapping header %{public}s: invaliled Ehdr", task->name);
4712 goto noexec;
4713 }
4714 task->phsize = task->eh->e_phentsize * task->eh->e_phnum;
4715 if (task->phsize > sizeof task->ehdr_buf - sizeof(Ehdr)) {
4716 task->allocated_buf = malloc(task->phsize);
4717 if (!task->allocated_buf) {
4718 LD_LOGE("Error mapping header %{public}s: failed to alloc memory", task->name);
4719 return false;
4720 }
4721 l = pread(task->fd, task->allocated_buf, task->phsize, task->eh->e_phoff + task->file_offset);
4722 if (l < 0) {
4723 LD_LOGE("Error mapping header %{public}s: failed to pread", task->name);
4724 goto error;
4725 }
4726 if (l != task->phsize) {
4727 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
4728 goto noexec;
4729 }
4730 ph = task->ph0 = task->allocated_buf;
4731 } else if (task->eh->e_phoff + task->phsize > l) {
4732 l = pread(task->fd, task->ehdr_buf + 1, task->phsize, task->eh->e_phoff + task->file_offset);
4733 if (l < 0) {
4734 LD_LOGE("Error mapping header %{public}s: failed to pread", task->name);
4735 goto error;
4736 }
4737 if (l != task->phsize) {
4738 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
4739 goto noexec;
4740 }
4741 ph = task->ph0 = (void *)(task->ehdr_buf + 1);
4742 } else {
4743 ph = task->ph0 = (void *)((char *)task->ehdr_buf + task->eh->e_phoff);
4744 }
4745
4746 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4747 if (ph->p_type == PT_DYNAMIC) {
4748 task->dyn = ph->p_vaddr;
4749 } else if (ph->p_type == PT_TLS) {
4750 task->tls_image = ph->p_vaddr;
4751 task->tls.align = ph->p_align;
4752 task->tls.len = ph->p_filesz;
4753 task->tls.size = ph->p_memsz;
4754 }
4755
4756 if (ph->p_type != PT_DYNAMIC) {
4757 continue;
4758 }
4759 // map the dynamic segment and the string table of the library
4760 off_start = ph->p_offset;
4761 off_start &= -PAGE_SIZE;
4762 task->dyn_map_len = ph->p_memsz + (ph->p_offset - off_start);
4763 /* The default value of file_offset is 0.
4764 * The value of file_offset may be greater than 0 when opening library from zip file.
4765 * The value of file_offset ensures PAGE_SIZE aligned. */
4766 task->dyn_map = mmap(0, task->dyn_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4767 if (task->dyn_map == MAP_FAILED) {
4768 LD_LOGE("Error mapping header %{public}s: failed to map dynamic section", task->name);
4769 goto error;
4770 }
4771 task->dyn_addr = (size_t *)((unsigned char *)task->dyn_map + (ph->p_offset - off_start));
4772 size_t dyn_tmp;
4773 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRTAB)) {
4774 str_table = dyn_tmp;
4775 } else {
4776 LD_LOGE("Error mapping header %{public}s: DT_STRTAB not found", task->name);
4777 goto error;
4778 }
4779 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRSZ)) {
4780 str_size = dyn_tmp;
4781 } else {
4782 LD_LOGE("Error mapping header %{public}s: DT_STRSZ not found", task->name);
4783 goto error;
4784 }
4785 }
4786
4787 task->shsize = task->eh->e_shentsize * task->eh->e_shnum;
4788 off_start = task->eh->e_shoff;
4789 off_start &= -PAGE_SIZE;
4790 task->shsize += task->eh->e_shoff - off_start;
4791 task->shdr_allocated_buf = mmap(0, task->shsize, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4792 Shdr *sh = (char *)task->shdr_allocated_buf + task->eh->e_shoff - off_start;
4793 for (i = task->eh->e_shnum; i; i--, sh = (void *)((char *)sh + task->eh->e_shentsize)) {
4794 if (sh->sh_type != SHT_STRTAB || sh->sh_addr != str_table || sh->sh_size != str_size) {
4795 continue;
4796 }
4797 off_start = sh->sh_offset;
4798 off_start &= -PAGE_SIZE;
4799 task->str_map_len = sh->sh_size + (sh->sh_offset - off_start);
4800 task->str_map = mmap(0, task->str_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4801 if (task->str_map == MAP_FAILED) {
4802 LD_LOGE("Error mapping section header %{public}s: failed to map string section", task->name);
4803 goto error;
4804 }
4805 task->str_addr = (char *)task->str_map + sh->sh_offset - off_start;
4806 break;
4807 }
4808 if (!task->dyn) {
4809 LD_LOGE("Error mapping header %{public}s: dynamic section not found", task->name);
4810 goto noexec;
4811 }
4812 return true;
4813 noexec:
4814 errno = ENOEXEC;
4815 error:
4816 free(task->allocated_buf);
4817 task->allocated_buf = NULL;
4818 munmap(task->shdr_allocated_buf, task->shsize);
4819 task->shdr_allocated_buf = NULL;
4820 return false;
4821 }
4822
task_map_library(struct loadtask * task,struct reserved_address_params * reserved_params)4823 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params)
4824 {
4825 size_t addr_min = SIZE_MAX, addr_max = 0, map_len;
4826 size_t this_min, this_max;
4827 size_t nsegs = 0;
4828 off_t off_start;
4829 Phdr *ph = task->ph0;
4830 unsigned prot;
4831 unsigned char *map = MAP_FAILED, *base;
4832 size_t i;
4833 int map_flags = MAP_PRIVATE;
4834 size_t start_addr;
4835 size_t start_alignment = PAGE_SIZE;
4836 bool hugepage_enabled = false;
4837
4838 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4839 if (ph->p_type == PT_GNU_RELRO) {
4840 task->p->relro_start = ph->p_vaddr & -PAGE_SIZE;
4841 task->p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
4842 } else if (ph->p_type == PT_GNU_STACK) {
4843 if (!runtime && ph->p_memsz > __default_stacksize) {
4844 __default_stacksize =
4845 ph->p_memsz < DEFAULT_STACK_MAX ?
4846 ph->p_memsz : DEFAULT_STACK_MAX;
4847 }
4848 }
4849 if (ph->p_type != PT_LOAD) {
4850 continue;
4851 }
4852 nsegs++;
4853 if (ph->p_vaddr < addr_min) {
4854 addr_min = ph->p_vaddr;
4855 off_start = ph->p_offset;
4856 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
4857 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
4858 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
4859 }
4860 if (ph->p_vaddr + ph->p_memsz > addr_max) {
4861 addr_max = ph->p_vaddr + ph->p_memsz;
4862 }
4863 }
4864 if (!task->dyn) {
4865 LD_LOGE("Error mapping library %{public}s: dynamic section not found", task->name);
4866 goto noexec;
4867 }
4868 if (DL_FDPIC && !(task->eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
4869 task->p->loadmap = calloc(1, sizeof(struct fdpic_loadmap) + nsegs * sizeof(struct fdpic_loadseg));
4870 if (!task->p->loadmap) {
4871 goto error;
4872 }
4873 task->p->loadmap->nsegs = nsegs;
4874 for (ph = task->ph0, i = 0; i < nsegs; ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4875 if (ph->p_type != PT_LOAD) {
4876 continue;
4877 }
4878 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
4879 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
4880 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
4881 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1),
4882 prot, MAP_PRIVATE,
4883 task->fd, ph->p_offset & -PAGE_SIZE + task->file_offset);
4884 if (map == MAP_FAILED) {
4885 unmap_library(task->p);
4886 goto error;
4887 }
4888 task->p->loadmap->segs[i].addr = (size_t)map +
4889 (ph->p_vaddr & PAGE_SIZE - 1);
4890 task->p->loadmap->segs[i].p_vaddr = ph->p_vaddr;
4891 task->p->loadmap->segs[i].p_memsz = ph->p_memsz;
4892 i++;
4893 if (prot & PROT_WRITE) {
4894 size_t brk = (ph->p_vaddr & PAGE_SIZE - 1) + ph->p_filesz;
4895 size_t pgbrk = (brk + PAGE_SIZE - 1) & -PAGE_SIZE;
4896 size_t pgend = (brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
4897 if (pgend > pgbrk && mmap_fixed(map + pgbrk,
4898 pgend - pgbrk, prot,
4899 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
4900 -1, off_start) == MAP_FAILED)
4901 goto error;
4902 memset(map + brk, 0, pgbrk - brk);
4903 }
4904 }
4905 map = (void *)task->p->loadmap->segs[0].addr;
4906 map_len = 0;
4907 goto done_mapping;
4908 }
4909 addr_max += PAGE_SIZE - 1;
4910 addr_max &= -PAGE_SIZE;
4911 addr_min &= -PAGE_SIZE;
4912 off_start &= -PAGE_SIZE;
4913 map_len = addr_max - addr_min + off_start;
4914 start_addr = addr_min;
4915
4916 hugepage_enabled = get_transparent_hugepages_supported();
4917 if (hugepage_enabled) {
4918 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(task->ph0, task->eh->e_phnum);
4919
4920 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
4921 }
4922
4923 if (reserved_params) {
4924 if (map_len > reserved_params->reserved_size) {
4925 if (reserved_params->must_use_reserved) {
4926 LD_LOGE("Error mapping library %{public}s: map len is larger than reserved address", task->name);
4927 goto error;
4928 }
4929 } else {
4930 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
4931 map_flags |= MAP_FIXED;
4932 }
4933 }
4934
4935 /* we will find a mapping_align aligned address as the start of dso
4936 * so we need a tmp_map_len as map_len + mapping_align to make sure
4937 * we have enough space to shift the dso to the correct location. */
4938 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
4939 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
4940
4941 /* map the whole load segments with PROT_READ first for security consideration. */
4942 prot = PROT_READ;
4943
4944 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
4945 if (reserved_params) {
4946 map = DL_NOMMU_SUPPORT
4947 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
4948 : mmap((void *)start_addr, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
4949 if (map == MAP_FAILED) {
4950 goto error;
4951 }
4952 if (reserved_params && map_len < reserved_params->reserved_size) {
4953 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
4954 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
4955 }
4956 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
4957 } else {
4958 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
4959 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
4960 if (temp_map == MAP_FAILED) {
4961 goto error;
4962 }
4963
4964 /* find the mapping_align aligned address */
4965 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
4966
4967 /* mummap the space we mmap before so that we can mmap correct space again */
4968 munmap(temp_map, tmp_map_len);
4969
4970 map = DL_NOMMU_SUPPORT
4971 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
4972 /* use map_len to mmap correct space for the dso with file mapping */
4973 : mmap(real_map, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
4974 if (map == MAP_FAILED) {
4975 goto error;
4976 }
4977 }
4978 task->p->map = map;
4979 task->p->map_len = map_len;
4980 /* If the loaded file is not relocatable and the requested address is
4981 * not available, then the load operation must fail. */
4982 if (task->eh->e_type != ET_DYN && addr_min && map != (void *)addr_min) {
4983 LD_LOGE("Error mapping library %{public}s: device or resource busy", task->name);
4984 errno = EBUSY;
4985 goto error;
4986 }
4987 base = map - addr_min;
4988 task->p->phdr = 0;
4989 task->p->phnum = 0;
4990 for (ph = task->ph0, i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4991 if (ph->p_type == PT_OHOS_RANDOMDATA) {
4992 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
4993 continue;
4994 }
4995 if (ph->p_type != PT_LOAD) {
4996 continue;
4997 }
4998 /* Check if the programs headers are in this load segment, and
4999 * if so, record the address for use by dl_iterate_phdr. */
5000 if (!task->p->phdr && task->eh->e_phoff >= ph->p_offset
5001 && task->eh->e_phoff + task->phsize <= ph->p_offset + ph->p_filesz) {
5002 task->p->phdr = (void *)(base + ph->p_vaddr + (task->eh->e_phoff - ph->p_offset));
5003 task->p->phnum = task->eh->e_phnum;
5004 task->p->phentsize = task->eh->e_phentsize;
5005 }
5006 this_min = ph->p_vaddr & -PAGE_SIZE;
5007 this_max = ph->p_vaddr + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5008 off_start = ph->p_offset & -PAGE_SIZE;
5009 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5010 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5011 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5012 /* Reuse the existing mapping for the lowest-address LOAD */
5013 if (mmap_fixed(
5014 base + this_min,
5015 this_max - this_min,
5016 prot, MAP_PRIVATE | MAP_FIXED,
5017 task->fd,
5018 off_start + task->file_offset) == MAP_FAILED) {
5019 LD_LOGE("Error mapping library %{public}s: mmap fix failed, errno: %{public}d", task->name, errno);
5020 goto error;
5021 }
5022 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
5023 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
5024 if (ph->p_memsz > ph->p_filesz && (ph->p_flags & PF_W)) {
5025 size_t brk = (size_t)base + ph->p_vaddr + ph->p_filesz;
5026 size_t pgbrk = brk + PAGE_SIZE - 1 & -PAGE_SIZE;
5027 size_t zeromap_size = (size_t)base + this_max - pgbrk;
5028 memset((void *)brk, 0, pgbrk - brk & PAGE_SIZE - 1);
5029 if (pgbrk - (size_t)base < this_max && mmap_fixed(
5030 (void *)pgbrk,
5031 zeromap_size,
5032 prot,
5033 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5034 -1,
5035 0) == MAP_FAILED) {
5036 LD_LOGE("Error mapping library: mmap fix failed");
5037 goto error;
5038 }
5039 set_bss_vma_name(task->p->name, (void *)pgbrk, zeromap_size);
5040 }
5041 }
5042 for (i = 0; ((size_t *)(base + task->dyn))[i]; i += NEXT_DYNAMIC_INDEX) {
5043 if (((size_t *)(base + task->dyn))[i] == DT_TEXTREL) {
5044 if (mprotect(map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC) && errno != ENOSYS) {
5045 LD_LOGE("Error mapping library %{public}s: mprotect failed", task->name);
5046 goto error;
5047 }
5048 break;
5049 }
5050 }
5051 done_mapping:
5052 task->p->base = base;
5053 task->p->dynv = laddr(task->p, task->dyn);
5054 if (task->p->tls.size) {
5055 task->p->tls.image = laddr(task->p, task->tls_image);
5056 }
5057 free(task->allocated_buf);
5058 task->allocated_buf = NULL;
5059 munmap(task->shdr_allocated_buf, task->shsize);
5060 task->shdr_allocated_buf = NULL;
5061 return true;
5062 noexec:
5063 errno = ENOEXEC;
5064 error:
5065 if (map != MAP_FAILED) {
5066 unmap_library(task->p);
5067 }
5068 free(task->allocated_buf);
5069 task->allocated_buf = NULL;
5070 munmap(task->shdr_allocated_buf, task->shsize);
5071 task->shdr_allocated_buf = NULL;
5072 return false;
5073 }
5074
resolve_fd_to_realpath(struct loadtask * task)5075 static bool resolve_fd_to_realpath(struct loadtask *task)
5076 {
5077 char proc_self_fd[32];
5078 static char resolved_path[PATH_MAX];
5079
5080 int ret = snprintf(proc_self_fd, sizeof(proc_self_fd), "/proc/self/fd/%d", task->fd);
5081 if (ret < 0 || ret >= sizeof(proc_self_fd)) {
5082 return false;
5083 }
5084 ssize_t len = readlink(proc_self_fd, resolved_path, sizeof(resolved_path) - 1);
5085 if (len < 0) {
5086 return false;
5087 }
5088 resolved_path[len] = '\0';
5089 strncpy(task->buf, resolved_path, PATH_MAX);
5090
5091 return true;
5092 }
5093
load_library_header(struct loadtask * task)5094 static bool load_library_header(struct loadtask *task)
5095 {
5096 const char *name = task->name;
5097 struct dso *needed_by = task->needed_by;
5098 ns_t *namespace = task->namespace;
5099 bool check_inherited = task->check_inherited;
5100 struct zip_info z_info;
5101
5102 bool map = false;
5103 struct stat st;
5104 size_t alloc_size;
5105 int n_th = 0;
5106 int is_self = 0;
5107
5108 if (!*name) {
5109 errno = EINVAL;
5110 return false;
5111 }
5112
5113 /* Catch and block attempts to reload the implementation itself */
5114 if (name[NAME_INDEX_ZERO] == 'l' && name[NAME_INDEX_ONE] == 'i' && name[NAME_INDEX_TWO] == 'b') {
5115 static const char reserved[] =
5116 "c.pthread.rt.m.dl.util.xnet.";
5117 const char *rp, *next;
5118 for (rp = reserved; *rp; rp = next) {
5119 next = strchr(rp, '.') + 1;
5120 if (strncmp(name + NAME_INDEX_THREE, rp, next - rp) == 0) {
5121 break;
5122 }
5123 }
5124 if (*rp) {
5125 if (ldd_mode) {
5126 /* Track which names have been resolved
5127 * and only report each one once. */
5128 static unsigned reported;
5129 unsigned mask = 1U << (rp - reserved);
5130 if (!(reported & mask)) {
5131 reported |= mask;
5132 dprintf(1, "\t%s => %s (%p)\n",
5133 name, ldso.name,
5134 ldso.base);
5135 }
5136 }
5137 is_self = 1;
5138 }
5139 }
5140 if (!strcmp(name, ldso.name)) {
5141 is_self = 1;
5142 }
5143 if (is_self) {
5144 if (!ldso.prev) {
5145 tail->next = &ldso;
5146 ldso.prev = tail;
5147 tail = &ldso;
5148 ldso.namespace = namespace;
5149 ns_add_dso(namespace, &ldso);
5150 }
5151 task->isloaded = true;
5152 task->p = &ldso;
5153 return true;
5154 }
5155 if (strchr(name, '/')) {
5156 char *separator = strstr(name, ZIP_FILE_PATH_SEPARATOR);
5157 if (separator != NULL) {
5158 int res = open_uncompressed_library_in_zipfile(name, &z_info, separator);
5159 if (!res) {
5160 task->pathname = name;
5161 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5162 LD_LOGE("Open uncompressed library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5163 task->pathname, namespace ? namespace->ns_name : "NULL");
5164 task->fd = -1;
5165 } else {
5166 task->fd = z_info.fd;
5167 task->file_offset = z_info.file_offset;
5168 }
5169 } else {
5170 LD_LOGE("Open uncompressed library in zip file failed, name:%{public}s res:%{public}d", name, res);
5171 return false;
5172 }
5173 } else {
5174 task->pathname = name;
5175 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5176 task->fd = -1;
5177 } else {
5178 task->fd = open(name, O_RDONLY | O_CLOEXEC);
5179 }
5180 }
5181 } else {
5182 /* Search for the name to see if it's already loaded */
5183 /* Search in namespace */
5184 task->p = find_library_by_name(name, namespace, check_inherited);
5185 if (task->p) {
5186 task->isloaded = true;
5187 LD_LOGD("find_library_by_name(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5188 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5189 return true;
5190 }
5191 if (strlen(name) > NAME_MAX) {
5192 LD_LOGE("load_library name length is larger than NAME_MAX:%{public}s.", name);
5193 return false;
5194 }
5195 task->fd = -1;
5196 if (namespace->env_paths) {
5197 open_library_by_path(name, namespace->env_paths, task, &z_info);
5198 }
5199 for (task->p = needed_by; task->fd == -1 && task->p; task->p = task->p->needed_by) {
5200 if (fixup_rpath(task->p, task->buf, sizeof task->buf) < 0) {
5201 task->fd = INVALID_FD_INHIBIT_FURTHER_SEARCH; /* Inhibit further search. */
5202 }
5203 if (task->p->rpath) {
5204 open_library_by_path(name, task->p->rpath, task, &z_info);
5205 if (task->fd != -1 && resolve_fd_to_realpath(task)) {
5206 if (!is_accessible(namespace, task->buf, g_is_asan, check_inherited)) {
5207 close(task->fd);
5208 task->fd = -1;
5209 }
5210 }
5211 }
5212 }
5213 if (g_is_asan) {
5214 handle_asan_path_open_by_task(task->fd, name, namespace, task, &z_info);
5215 LD_LOGD("load_library handle_asan_path_open_by_task fd:%{public}d.", task->fd);
5216 } else {
5217 if (task->fd == -1 && namespace->lib_paths) {
5218 open_library_by_path(name, namespace->lib_paths, task, &z_info);
5219 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", task->fd);
5220 }
5221 }
5222 task->pathname = task->buf;
5223 }
5224 if (task->fd < 0) {
5225 if (!check_inherited || !namespace->ns_inherits) {
5226 return false;
5227 }
5228 /* Load lib in inherited namespace. Do not check inherited again.*/
5229 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
5230 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
5231 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) {
5232 continue;
5233 }
5234 task->namespace = inherit->inherited_ns;
5235 task->check_inherited = false;
5236 if (load_library_header(task)) {
5237 return true;
5238 }
5239 }
5240 return false;
5241 }
5242
5243 if (fstat(task->fd, &st) < 0) {
5244 LD_LOGE("Error loading header %{public}s: failed to get file state", task->name);
5245 close(task->fd);
5246 task->fd = -1;
5247 return false;
5248 }
5249 /* Search in namespace */
5250 task->p = find_library_by_fstat(&st, namespace, check_inherited, task->file_offset);
5251 if (task->p) {
5252 /* If this library was previously loaded with a
5253 * pathname but a search found the same inode,
5254 * setup its shortname so it can be found by name. */
5255 if (!task->p->shortname && task->pathname != name) {
5256 task->p->shortname = strrchr(task->p->name, '/') + 1;
5257 }
5258 close(task->fd);
5259 task->fd = -1;
5260 task->isloaded = true;
5261 LD_LOGD("find_library_by_fstat(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5262 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5263 return true;
5264 }
5265
5266 map = noload ? 0 : map_library_header(task);
5267 if (!map) {
5268 LD_LOGE("Error loading header %{public}s: failed to map header", task->name);
5269 close(task->fd);
5270 task->fd = -1;
5271 return false;
5272 }
5273
5274 /* Allocate storage for the new DSO. When there is TLS, this
5275 * storage must include a reservation for all pre-existing
5276 * threads to obtain copies of both the new TLS, and an
5277 * extended DTV capable of storing an additional slot for
5278 * the newly-loaded DSO. */
5279 alloc_size = sizeof(struct dso) + strlen(task->pathname) + 1;
5280 if (runtime && task->tls.size) {
5281 size_t per_th = task->tls.size + task->tls.align + sizeof(void *) * (tls_cnt + TLS_CNT_INCREASE);
5282 n_th = libc.threads_minus_1 + 1;
5283 if (n_th > SSIZE_MAX / per_th) {
5284 alloc_size = SIZE_MAX;
5285 } else {
5286 alloc_size += n_th * per_th;
5287 }
5288 }
5289 task->p = calloc(1, alloc_size);
5290 if (!task->p) {
5291 LD_LOGE("Error loading header %{public}s: failed to allocate dso", task->name);
5292 close(task->fd);
5293 task->fd = -1;
5294 return false;
5295 }
5296 task->p->dev = st.st_dev;
5297 task->p->ino = st.st_ino;
5298 task->p->file_offset = task->file_offset;
5299 task->p->needed_by = needed_by;
5300 task->p->name = task->p->buf;
5301 strcpy(task->p->name, task->pathname);
5302 task->p->tls = task->tls;
5303 task->p->dynv = task->dyn_addr;
5304 task->p->strings = task->str_addr;
5305 size_t rpath_offset;
5306 size_t runpath_offset;
5307 if (search_vec(task->p->dynv, &rpath_offset, DT_RPATH))
5308 task->p->rpath_orig = task->p->strings + rpath_offset;
5309 if (search_vec(task->p->dynv, &runpath_offset, DT_RUNPATH))
5310 task->p->rpath_orig = task->p->strings + runpath_offset;
5311
5312 /* Add a shortname only if name arg was not an explicit pathname. */
5313 if (task->pathname != name) {
5314 task->p->shortname = strrchr(task->p->name, '/') + 1;
5315 }
5316
5317 if (task->p->tls.size) {
5318 task->p->tls_id = ++tls_cnt;
5319 task->p->new_dtv = (void *)(-sizeof(size_t) &
5320 (uintptr_t)(task->p->name + strlen(task->p->name) + sizeof(size_t)));
5321 task->p->new_tls = (void *)(task->p->new_dtv + n_th * (tls_cnt + 1));
5322 }
5323
5324 tail->next = task->p;
5325 task->p->prev = tail;
5326 tail = task->p;
5327
5328 /* Add dso to namespace */
5329 task->p->namespace = namespace;
5330 ns_add_dso(namespace, task->p);
5331 return true;
5332 }
5333
task_load_library(struct loadtask * task,struct reserved_address_params * reserved_params)5334 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5335 {
5336 LD_LOGD("load_library loading ns=%{public}s name=%{public}s by_dlopen=%{public}d", task->namespace->ns_name, task->p->name, runtime);
5337 bool map = noload ? 0 : task_map_library(task, reserved_params);
5338 close(task->fd);
5339 task->fd = -1;
5340 if (!map) {
5341 LD_LOGE("Error loading library %{public}s: failed to map library", task->name);
5342 error("Error loading library %s: failed to map library", task->name);
5343 if (runtime) {
5344 longjmp(*rtld_fail, 1);
5345 }
5346 return;
5347 };
5348
5349 /* Avoid the danger of getting two versions of libc mapped into the
5350 * same process when an absolute pathname was used. The symbols
5351 * checked are chosen to catch both musl and glibc, and to avoid
5352 * false positives from interposition-hack libraries. */
5353 decode_dyn(task->p);
5354 if (find_sym(task->p, "__libc_start_main", 1).sym &&
5355 find_sym(task->p, "stdin", 1).sym) {
5356 do_dlclose(task->p);
5357 task->p = NULL;
5358 free((void*)task->name);
5359 task->name = ld_strdup("libc.so");
5360 task->check_inherited = true;
5361 if (!load_library_header(task)) {
5362 LD_LOGE("Error loading library %{public}s: failed to load libc.so", task->name);
5363 error("Error loading library %s: failed to load libc.so", task->name);
5364 if (runtime) {
5365 longjmp(*rtld_fail, 1);
5366 }
5367 }
5368 return;
5369 }
5370 /* Past this point, if we haven't reached runtime yet, ldso has
5371 * committed either to use the mapped library or to abort execution.
5372 * Unmapping is not possible, so we can safely reclaim gaps. */
5373 if (!runtime) {
5374 reclaim_gaps(task->p);
5375 }
5376 task->p->runtime_loaded = runtime;
5377 if (runtime)
5378 task->p->by_dlopen = 1;
5379
5380 if (DL_FDPIC) {
5381 makefuncdescs(task->p);
5382 }
5383
5384 if (ldd_mode) {
5385 dprintf(1, "\t%s => %s (%p)\n", task->name, task->pathname, task->p->base);
5386 }
5387
5388 #ifdef ENABLE_HWASAN
5389 if (libc.load_hook) {
5390 libc.load_hook((long unsigned int)task->p->base, task->p->phdr, task->p->phnum);
5391 }
5392 #endif
5393 }
5394
preload_direct_deps(struct dso * p,ns_t * namespace,struct loadtasks * tasks)5395 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks)
5396 {
5397 size_t i, cnt = 0;
5398 if (p->deps) {
5399 return;
5400 }
5401 /* For head, all preloads are direct pseudo-dependencies.
5402 * Count and include them now to avoid realloc later. */
5403 if (p == head) {
5404 for (struct dso *q = p->next; q; q = q->next) {
5405 cnt++;
5406 }
5407 }
5408 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5409 if (p->dynv[i] == DT_NEEDED) {
5410 cnt++;
5411 }
5412 }
5413 /* Use builtin buffer for apps with no external deps, to
5414 * preserve property of no runtime failure paths. */
5415 p->deps = (p == head && cnt < MIN_DEPS_COUNT) ? builtin_deps :
5416 calloc(cnt + 1, sizeof *p->deps);
5417 if (!p->deps) {
5418 LD_LOGE("Error loading dependencies for %{public}s", p->name);
5419 error("Error loading dependencies for %s", p->name);
5420 if (runtime) {
5421 longjmp(*rtld_fail, 1);
5422 }
5423 }
5424 cnt = 0;
5425 if (p == head) {
5426 for (struct dso *q = p->next; q; q = q->next) {
5427 p->deps[cnt++] = q;
5428 }
5429 }
5430 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5431 if (p->dynv[i] != DT_NEEDED) {
5432 continue;
5433 }
5434 const char* dtneed_name = p->strings + p->dynv[i + 1];
5435 LD_LOGD("load_library %{public}s adding DT_NEEDED task %{public}s namespace(%{public}s)", p->name, dtneed_name, namespace->ns_name);
5436 struct loadtask *task = create_loadtask(dtneed_name, p, namespace, true);
5437 if (!task) {
5438 LD_LOGE("Error loading dependencies %{public}s : create load task failed", p->name);
5439 error("Error loading dependencies for %s : create load task failed", p->name);
5440 if (runtime) {
5441 longjmp(*rtld_fail, 1);
5442 }
5443 continue;
5444 }
5445 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
5446 if (!load_library_header(task)) {
5447 free_task(task);
5448 task = NULL;
5449 LD_LOGE("Error loading shared library %{public}s: (needed by %{public}s)",
5450 p->strings + p->dynv[i + 1],
5451 p->name);
5452 error("Error loading shared library %s: %m (needed by %s)",
5453 p->strings + p->dynv[i + 1], p->name);
5454 if (runtime) {
5455 longjmp(*rtld_fail, 1);
5456 }
5457 continue;
5458 }
5459 p->deps[cnt++] = task->p;
5460 if (task->isloaded) {
5461 free_task(task);
5462 task = NULL;
5463 } else {
5464 append_loadtasks(tasks, task);
5465 }
5466 }
5467 p->deps[cnt] = 0;
5468 p->ndeps_direct = cnt;
5469 for (i = 0; i < p->ndeps_direct; i++) {
5470 add_dso_parent(p->deps[i], p);
5471 }
5472 }
5473
unmap_preloaded_sections(struct loadtasks * tasks)5474 static void unmap_preloaded_sections(struct loadtasks *tasks)
5475 {
5476 struct loadtask *task = NULL;
5477 for (size_t i = 0; i < tasks->length; i++) {
5478 task = get_loadtask(tasks, i);
5479 if (!task) {
5480 continue;
5481 }
5482 if (task->dyn_map_len) {
5483 munmap(task->dyn_map, task->dyn_map_len);
5484 task->dyn_map = NULL;
5485 task->dyn_map_len = 0;
5486 if (task->p) {
5487 task->p->dynv = NULL;
5488 }
5489 }
5490 if (task->str_map_len) {
5491 munmap(task->str_map, task->str_map_len);
5492 task->str_map = NULL;
5493 task->str_map_len = 0;
5494 if (task->p) {
5495 task->p->strings = NULL;
5496 }
5497 }
5498 }
5499 }
5500
preload_deps(struct dso * p,struct loadtasks * tasks)5501 static void preload_deps(struct dso *p, struct loadtasks *tasks)
5502 {
5503 if (p->deps) {
5504 return;
5505 }
5506 for (; p; p = p->next) {
5507 preload_direct_deps(p, p->namespace, tasks);
5508 }
5509 }
5510
run_loadtasks(struct loadtasks * tasks,struct reserved_address_params * reserved_params)5511 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params)
5512 {
5513 struct loadtask *task = NULL;
5514 bool reserved_address = false;
5515 for (size_t i = 0; i < tasks->length; i++) {
5516 task = get_loadtask(tasks, i);
5517 if (task) {
5518 if (reserved_params) {
5519 reserved_address = reserved_params->reserved_address_recursive || (reserved_params->target == task->p);
5520 }
5521 task_load_library(task, reserved_address ? reserved_params : NULL);
5522 }
5523 }
5524 }
5525
assign_tls(struct dso * p)5526 UT_STATIC void assign_tls(struct dso *p)
5527 {
5528 while (p) {
5529 if (p->tls.image) {
5530 tls_align = MAXP2(tls_align, p->tls.align);
5531 #ifdef TLS_ABOVE_TP
5532 p->tls.offset = tls_offset + ((p->tls.align - 1) &
5533 (-tls_offset + (uintptr_t)p->tls.image));
5534 tls_offset = p->tls.offset + p->tls.size;
5535 #else
5536 tls_offset += p->tls.size + p->tls.align - 1;
5537 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
5538 & (p->tls.align - 1);
5539 p->tls.offset = tls_offset;
5540 #endif
5541 if (tls_tail) {
5542 tls_tail->next = &p->tls;
5543 } else {
5544 libc.tls_head = &p->tls;
5545 }
5546 tls_tail = &p->tls;
5547 }
5548
5549 p = p->next;
5550 }
5551 }
5552
load_preload(char * s,ns_t * ns,struct loadtasks * tasks)5553 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks)
5554 {
5555 int tmp;
5556 char *z;
5557
5558 struct loadtask *task = NULL;
5559 for (z = s; *z; s = z) {
5560 for (; *s && (isspace(*s) || *s == ':'); s++) {
5561 ;
5562 }
5563 for (z = s; *z && !isspace(*z) && *z != ':'; z++) {
5564 ;
5565 }
5566 tmp = *z;
5567 *z = 0;
5568 task = create_loadtask(s, NULL, ns, true);
5569 if (!task) {
5570 continue;
5571 }
5572 if (load_library_header(task)) {
5573 if (!task->isloaded) {
5574 append_loadtasks(tasks, task);
5575 task = NULL;
5576 }
5577 }
5578 if (task) {
5579 free_task(task);
5580 }
5581 *z = tmp;
5582 }
5583 }
5584 #endif
5585
serialize_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)5586 static int serialize_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
5587 {
5588 ssize_t count = dso->relro_end - dso->relro_start;
5589 ssize_t offset = 0;
5590 while (count > 0) {
5591 ssize_t write_size = TEMP_FAILURE_RETRY(write(fd, laddr(dso, dso->relro_start + offset), count));
5592 if (-1 == write_size) {
5593 LD_LOGE("Error serializing relro %{public}s: failed to write GNU_RELRO", dso->name);
5594 return -1;
5595 }
5596 offset += write_size;
5597 count -= write_size;
5598 }
5599
5600 ssize_t size = dso->relro_end - dso->relro_start;
5601 void *map = mmap(
5602 laddr(dso, dso->relro_start),
5603 size,
5604 PROT_READ,
5605 MAP_PRIVATE | MAP_FIXED,
5606 fd,
5607 *file_offset);
5608 if (map == MAP_FAILED) {
5609 LD_LOGE("Error serializing relro %{public}s: failed to map GNU_RELRO", dso->name);
5610 return -1;
5611 }
5612 *file_offset += size;
5613 return 0;
5614 }
5615
map_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)5616 static int map_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
5617 {
5618 ssize_t ext_fd_file_size = 0;
5619 struct stat ext_fd_file_stat;
5620 if (TEMP_FAILURE_RETRY(fstat(fd, &ext_fd_file_stat)) != 0) {
5621 LD_LOGE("Error mapping relro %{public}s: failed to get file state", dso->name);
5622 return -1;
5623 }
5624 ext_fd_file_size = ext_fd_file_stat.st_size;
5625
5626 void *ext_temp_map = MAP_FAILED;
5627 ext_temp_map = mmap(NULL, ext_fd_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
5628 if (ext_temp_map == MAP_FAILED) {
5629 LD_LOGE("Error mapping relro %{public}s: failed to map fd", dso->name);
5630 return -1;
5631 }
5632
5633 char *file_base = (char *)(ext_temp_map) + *file_offset;
5634 char *mem_base = (char *)(laddr(dso, dso->relro_start));
5635 ssize_t start_offset = 0;
5636 ssize_t size = dso->relro_end - dso->relro_start;
5637
5638 if (size > ext_fd_file_size - *file_offset) {
5639 LD_LOGE("Error mapping relro %{public}s: invalid file size", dso->name);
5640 return -1;
5641 }
5642 while (start_offset < size) {
5643 // Find start location.
5644 while (start_offset < size) {
5645 if (memcmp(mem_base + start_offset, file_base + start_offset, PAGE_SIZE) == 0) {
5646 break;
5647 }
5648 start_offset += PAGE_SIZE;
5649 }
5650
5651 // Find end location.
5652 ssize_t end_offset = start_offset;
5653 while (end_offset < size) {
5654 if (memcmp(mem_base + end_offset, file_base + end_offset, PAGE_SIZE) != 0) {
5655 break;
5656 }
5657 end_offset += PAGE_SIZE;
5658 }
5659
5660 // Map pages.
5661 ssize_t map_length = end_offset - start_offset;
5662 ssize_t map_offset = *file_offset + start_offset;
5663 if (map_length > 0) {
5664 void *map = mmap(
5665 mem_base + start_offset, map_length, PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, map_offset);
5666 if (map == MAP_FAILED) {
5667 LD_LOGE("Error mapping relro %{public}s: failed to map GNU_RELRO", dso->name);
5668 munmap(ext_temp_map, ext_fd_file_size);
5669 return -1;
5670 }
5671 }
5672
5673 start_offset = end_offset;
5674 }
5675 *file_offset += size;
5676 munmap(ext_temp_map, ext_fd_file_size);
5677 return 0;
5678 }
5679
handle_relro_sharing(struct dso * p,const dl_extinfo * extinfo,ssize_t * relro_fd_offset)5680 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset)
5681 {
5682 if (extinfo == NULL) {
5683 return;
5684 }
5685 if (extinfo->flag & DL_EXT_WRITE_RELRO) {
5686 LD_LOGD("Serializing GNU_RELRO %{public}s", p->name);
5687 if (serialize_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
5688 LD_LOGE("Error serializing GNU_RELRO %{public}s", p->name);
5689 error("Error serializing GNU_RELRO");
5690 if (runtime) longjmp(*rtld_fail, 1);
5691 }
5692 } else if (extinfo->flag & DL_EXT_USE_RELRO) {
5693 LD_LOGD("Mapping GNU_RELRO %{public}s", p->name);
5694 if (map_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
5695 LD_LOGE("Error mapping GNU_RELRO %{public}s", p->name);
5696 error("Error mapping GNU_RELRO");
5697 if (runtime) longjmp(*rtld_fail, 1);
5698 }
5699 }
5700 }
5701
set_bss_vma_name(char * path_name,void * addr,size_t zeromap_size)5702 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size)
5703 {
5704 char so_bss_name[ANON_NAME_MAX_LEN];
5705 if (path_name == NULL) {
5706 snprintf(so_bss_name, ANON_NAME_MAX_LEN, ".bss");
5707 } else {
5708 char *t = strrchr(path_name, '/');
5709 if (t) {
5710 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", ++t);
5711 } else {
5712 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", path_name);
5713 }
5714 }
5715
5716 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, zeromap_size, so_bss_name);
5717 }
5718
find_and_set_bss_name(struct dso * p)5719 static void find_and_set_bss_name(struct dso *p)
5720 {
5721 size_t cnt;
5722 Phdr *ph = p->phdr;
5723 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
5724 if (ph->p_type != PT_LOAD) continue;
5725 size_t seg_start = p->base + ph->p_vaddr;
5726 size_t seg_file_end = seg_start + ph->p_filesz + PAGE_SIZE - 1 & -PAGE_SIZE;
5727 size_t seg_max_addr = seg_start + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5728 size_t zeromap_size = seg_max_addr - seg_file_end;
5729 if (zeromap_size > 0 && (ph->p_flags & PF_W)) {
5730 set_bss_vma_name(p->name, (void *)seg_file_end, zeromap_size);
5731 }
5732 }
5733 }
5734
sync_with_debugger(void)5735 static void sync_with_debugger(void)
5736 {
5737 debug.ver = 1;
5738 debug.bp = dl_debug_state;
5739 debug.head = NULL;
5740 debug.base = ldso.base;
5741
5742 add_dso_info_to_debug_map(head);
5743
5744 debug.state = RT_CONSISTENT;
5745 _dl_debug_state();
5746 }
5747
notify_addition_to_debugger(struct dso * p)5748 static void notify_addition_to_debugger(struct dso *p)
5749 {
5750 debug.state = RT_ADD;
5751 _dl_debug_state();
5752
5753 add_dso_info_to_debug_map(p);
5754
5755 debug.state = RT_CONSISTENT;
5756 _dl_debug_state();
5757 }
5758
notify_remove_to_debugger(struct dso * p)5759 static void notify_remove_to_debugger(struct dso *p)
5760 {
5761 debug.state = RT_DELETE;
5762 _dl_debug_state();
5763
5764 remove_dso_info_from_debug_map(p);
5765
5766 debug.state = RT_CONSISTENT;
5767 _dl_debug_state();
5768 }
5769
add_dso_info_to_debug_map(struct dso * p)5770 static void add_dso_info_to_debug_map(struct dso *p)
5771 {
5772 for (struct dso *so = p; so != NULL; so = so->next) {
5773 struct dso_debug_info *debug_info = malloc(sizeof(struct dso_debug_info));
5774 if (debug_info == NULL) {
5775 LD_LOGE("malloc error! dso name: %{public}s.", so->name);
5776 continue;
5777 }
5778 #if DL_FDPIC
5779 debug_info->loadmap = so->loadmap;
5780 #else
5781 debug_info->base = so->base;
5782 #endif
5783 debug_info->name = so->name;
5784 debug_info->dynv = so->dynv;
5785 if (debug.head == NULL) {
5786 debug_info->prev = NULL;
5787 debug_info->next = NULL;
5788 debug.head = debug_tail = debug_info;
5789 } else {
5790 debug_info->prev = debug_tail;
5791 debug_info->next = NULL;
5792 debug_tail->next = debug_info;
5793 debug_tail = debug_info;
5794 }
5795 so->debug_info = debug_info;
5796 }
5797 }
5798
remove_dso_info_from_debug_map(struct dso * p)5799 static void remove_dso_info_from_debug_map(struct dso *p)
5800 {
5801 struct dso_debug_info *debug_info = p->debug_info;
5802 if (debug_info == debug_tail) {
5803 debug_tail = debug_tail->prev;
5804 debug_tail->next = NULL;
5805 } else {
5806 debug_info->next->prev = debug_info->prev;
5807 debug_info->prev->next = debug_info->next;
5808 }
5809 free(debug_info);
5810 }
5811