1 #define _GNU_SOURCE
2 #define SYSCALL_NO_TLS 1
3
4 #include "dynlink.h"
5
6 #include <stdbool.h>
7 #include <stdlib.h>
8 #include <stdarg.h>
9 #include <stddef.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdint.h>
13 #include <elf.h>
14 #include <sys/mman.h>
15 #include <limits.h>
16 #include <fcntl.h>
17 #include <sys/stat.h>
18 #include <errno.h>
19 #include <link.h>
20 #include <setjmp.h>
21 #include <pthread.h>
22 #include <ctype.h>
23 #include <dlfcn.h>
24 #include <semaphore.h>
25 #include <sys/membarrier.h>
26 #include <sys/time.h>
27 #include <time.h>
28 #include <sys/prctl.h>
29 #include <sys/queue.h>
30
31 #include "cfi.h"
32 #include "dlfcn_ext.h"
33 #include "dynlink_rand.h"
34 #include "ld_log.h"
35 #include "libc.h"
36 #include "musl_fdsan.h"
37 #include "namespace.h"
38 #include "ns_config.h"
39 #include "pthread_impl.h"
40 #include "fork_impl.h"
41 #include "strops.h"
42 #include "trace/trace_marker.h"
43
44 #ifdef OHOS_ENABLE_PARAMETER
45 #include "sys_param.h"
46 #endif
47 #ifdef LOAD_ORDER_RANDOMIZATION
48 #include "zip_archive.h"
49 #endif
50
51 #define malloc __libc_malloc
52 #define calloc __libc_calloc
53 #define realloc __libc_realloc
54 #define free __libc_free
55
56 static void error(const char *, ...);
57
58 #define MAXP2(a,b) (-(-(a)&-(b)))
59 #define ALIGN(x,y) ((x)+(y)-1 & -(y))
60 #define GNU_HASH_FILTER(ght, ghm, gho) \
61 const size_t *bloomwords = (const void *)(ght+4); \
62 size_t f = bloomwords[gho & (ght[2]-1)]; \
63 if (!(f & ghm)) continue; \
64 f >>= (gh >> ght[3]) % (8 * sizeof f); \
65 if (!(f & 1)) continue;
66
67 #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m)))
68 #define countof(a) ((sizeof (a))/(sizeof (a)[0]))
69 #define DSO_FLAGS_NODELETE 0x1
70
71 #ifdef HANDLE_RANDOMIZATION
72 #define NEXT_DYNAMIC_INDEX 2
73 #define MIN_DEPS_COUNT 2
74 #define NAME_INDEX_ZERO 0
75 #define NAME_INDEX_ONE 1
76 #define NAME_INDEX_TWO 2
77 #define NAME_INDEX_THREE 3
78 #define TLS_CNT_INCREASE 3
79 #define INVALID_FD_INHIBIT_FURTHER_SEARCH (-2)
80 #endif
81
82 #define PARENTS_BASE_CAPACITY 8
83 #define RELOC_CAN_SEARCH_DSO_BASE_CAPACITY 32
84 #define ANON_NAME_MAX_LEN 70
85
86 #define KPMD_SIZE (1UL << 21)
87 #define HUGEPAGES_SUPPORTED_STR_SIZE (32)
88
89 #ifdef UNIT_TEST_STATIC
90 #define UT_STATIC
91 #else
92 #define UT_STATIC static
93 #endif
94
95 /* Used for dlclose */
96 #define UNLOAD_NR_DLOPEN_CHECK 1
97 #define UNLOAD_COMMON_CHECK 2
98 #define UNLOAD_ALL_CHECK 3
99 struct dso_entry {
100 struct dso *dso;
101 TAILQ_ENTRY(dso_entry) entries;
102 };
103
104 struct debug {
105 int ver;
106 void *head;
107 void (*bp)(void);
108 int state;
109 void *base;
110 };
111
112 struct reserved_address_params {
113 void* start_addr;
114 size_t reserved_size;
115 bool must_use_reserved;
116 bool reserved_address_recursive;
117 #ifdef LOAD_ORDER_RANDOMIZATION
118 struct dso *target;
119 #endif
120 };
121
122 typedef void (*stage3_func)(size_t *, size_t *, size_t *);
123
124 static struct builtin_tls {
125 char c[8];
126 struct pthread pt;
127 void *space[16];
128 } builtin_tls[1];
129 #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
130
131 #define ADDEND_LIMIT 4096
132 static size_t *saved_addends, *apply_addends_to;
133 static bool g_is_asan;
134 static struct dso ldso;
135 static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head;
136 static struct dso_debug_info *debug_tail = NULL;
137 static char *env_path, *sys_path;
138 static unsigned long long gencnt;
139 static int runtime;
140 static int ldd_mode;
141 static int ldso_fail;
142 static int noload;
143 static int shutting_down;
144 static jmp_buf *rtld_fail;
145 static pthread_rwlock_t lock;
146 static struct debug debug;
147 static struct tls_module *tls_tail;
148 static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
149 static size_t static_tls_cnt;
150 static pthread_mutex_t init_fini_lock;
151 static pthread_cond_t ctor_cond;
152 static struct dso *builtin_deps[2];
153 static struct dso *const no_deps[1];
154 static struct dso *builtin_ctor_queue[4];
155 static struct dso **main_ctor_queue;
156 static struct fdpic_loadmap *app_loadmap;
157 static struct fdpic_dummy_loadmap app_dummy_loadmap;
158
159 struct debug *_dl_debug_addr = &debug;
160
161 extern hidden int __malloc_replaced;
162
163 hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
164
165 extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void);
166
167 #ifdef USE_GWP_ASAN
168 extern bool init_gwp_asan_by_libc(bool force_init);
169 #endif
170
171 weak_alias(__init_array_start, __init_array_end);
172 weak_alias(__fini_array_start, __fini_array_end);
173 #ifdef DFX_SIGNAL_LIBC
__InstallSignalHandler()174 UT_STATIC void __InstallSignalHandler()
175 {
176 }
177 weak_alias(__InstallSignalHandler, DFX_InstallSignalHandler);
178 #endif
179
180 #ifdef HANDLE_RANDOMIZATION
181 static int do_dlclose(struct dso *p);
182 #endif
183
184 #ifdef LOAD_ORDER_RANDOMIZATION
185 static bool map_library_header(struct loadtask *task);
186 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params);
187 static bool resolve_fd_to_realpath(struct loadtask *task);
188 static bool load_library_header(struct loadtask *task);
189 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params);
190 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks);
191 static void unmap_preloaded_sections(struct loadtasks *tasks);
192 static void preload_deps(struct dso *p, struct loadtasks *tasks);
193 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params);
194 UT_STATIC void assign_tls(struct dso *p);
195 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks);
196 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info);
197 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task, struct zip_info *z_info);
198 #endif
199
200 extern int __close(int fd);
201
202 /* Sharing relro */
203 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset);
204
205 /* asan path open */
206 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size);
207
208 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size);
209
210 static void find_and_set_bss_name(struct dso *p);
211
212 /* lldb debug function */
213 static void sync_with_debugger();
214 static void notify_addition_to_debugger(struct dso *p);
215 static void notify_remove_to_debugger(struct dso *p);
216 static void add_dso_info_to_debug_map(struct dso *p);
217 static void remove_dso_info_from_debug_map(struct dso *p);
218
219 /* add namespace function */
220 static void get_sys_path(ns_configor *conf);
221 static void dlclose_ns(struct dso *p);
get_app_path(char * path,size_t size)222 static bool get_app_path(char *path, size_t size)
223 {
224 int l = 0;
225 l = readlink("/proc/self/exe", path, size);
226 if (l < 0 || l >= size) {
227 LD_LOGD("get_app_path readlink failed!");
228 return false;
229 }
230 path[l] = 0;
231 LD_LOGD("get_app_path path:%{public}s.", path);
232 return true;
233 }
234
init_default_namespace(struct dso * app)235 static void init_default_namespace(struct dso *app)
236 {
237 ns_t *default_ns = get_default_ns();
238 memset(default_ns, 0, sizeof *default_ns);
239 ns_set_name(default_ns, NS_DEFAULT_NAME);
240 if (env_path) ns_set_env_paths(default_ns, env_path);
241 ns_set_lib_paths(default_ns, sys_path);
242 ns_set_separated(default_ns, false);
243 app->namespace = default_ns;
244 ns_add_dso(default_ns, app);
245 LD_LOGD("init_default_namespace default_namespace:"
246 "nsname: default ,"
247 "lib_paths:%{public}s ,"
248 "env_path:%{public}s ,"
249 "separated: false.",
250 sys_path, env_path);
251 return;
252 }
253
set_ns_attrs(ns_t * ns,ns_configor * conf)254 UT_STATIC void set_ns_attrs(ns_t *ns, ns_configor *conf)
255 {
256 if(!ns || !conf) {
257 return;
258 }
259
260 char *lib_paths, *asan_lib_paths, *permitted_paths, *asan_permitted_paths, *allowed_libs;
261
262 ns_set_separated(ns, conf->get_separated(ns->ns_name));
263
264 lib_paths = conf->get_lib_paths(ns->ns_name);
265 if (lib_paths) ns_set_lib_paths(ns, lib_paths);
266
267 asan_lib_paths = conf->get_asan_lib_paths(ns->ns_name);
268 if (asan_lib_paths) ns_set_asan_lib_paths(ns, asan_lib_paths);
269
270 permitted_paths = conf->get_permitted_paths(ns->ns_name);
271 if (permitted_paths) ns_set_permitted_paths(ns, permitted_paths);
272
273 asan_permitted_paths = conf->get_asan_permitted_paths(ns->ns_name);
274 if (asan_permitted_paths) ns_set_asan_permitted_paths(ns, asan_permitted_paths);
275
276 allowed_libs = conf->get_allowed_libs(ns->ns_name);
277 if (allowed_libs) ns_set_allowed_libs(ns, allowed_libs);
278
279 LD_LOGD("set_ns_attrs :"
280 "ns_name: %{public}s ,"
281 "separated:%{public}d ,"
282 "lib_paths:%{public}s ,"
283 "asan_lib_paths:%{public}s ,"
284 "permitted_paths:%{public}s ,"
285 "asan_permitted_paths:%{public}s ,"
286 "allowed_libs: %{public}s .",
287 ns->ns_name, ns->separated, ns->lib_paths, ns->asan_lib_paths, permitted_paths,
288 asan_permitted_paths, allowed_libs);
289 }
290
set_ns_inherits(ns_t * ns,ns_configor * conf)291 UT_STATIC void set_ns_inherits(ns_t *ns, ns_configor *conf)
292 {
293 if(!ns || !conf) {
294 return;
295 }
296
297 strlist *inherits = conf->get_inherits(ns->ns_name);
298 if (inherits) {
299 for (size_t i=0; i<inherits->num; i++) {
300 ns_t *inherited_ns = find_ns_by_name(inherits->strs[i]);
301 if (inherited_ns) {
302 char *shared_libs = conf->get_inherit_shared_libs(ns->ns_name, inherited_ns->ns_name);
303 ns_add_inherit(ns, inherited_ns, shared_libs);
304 LD_LOGD("set_ns_inherits :"
305 "ns_name: %{public}s ,"
306 "separated:%{public}d ,"
307 "lib_paths:%{public}s ,"
308 "asan_lib_paths:%{public}s ,",
309 inherited_ns->ns_name, inherited_ns->separated, inherited_ns->lib_paths,
310 inherited_ns->asan_lib_paths);
311 }
312 }
313 strlist_free(inherits);
314 } else {
315 LD_LOGD("set_ns_inherits inherits is NULL!");
316 }
317 }
318
init_namespace(struct dso * app)319 static void init_namespace(struct dso *app)
320 {
321 char app_path[PATH_MAX+1];
322 if (!get_app_path(app_path, sizeof app_path)) {
323 strcpy(app_path, app->name);
324 }
325 char *t = strrchr(app_path, '/');
326 if (t) {
327 *t = 0;
328 } else {
329 app_path[0] = '.';
330 app_path[1] = 0;
331 }
332
333 nslist *nsl = nslist_init();
334 ns_configor *conf = configor_init();
335 char file_path[sizeof "/etc/ld-musl-namespace-" + sizeof (LDSO_ARCH) + sizeof ".ini" + 1] = {0};
336 (void)snprintf(file_path, sizeof file_path, "/etc/ld-musl-namespace-%s.ini", LDSO_ARCH);
337 LD_LOGI("init_namespace file_path:%{public}s", file_path);
338 trace_marker_reset();
339 trace_marker_begin(HITRACE_TAG_MUSL, "parse linker config", file_path);
340 int ret = conf->parse(file_path, app_path);
341 if (ret < 0) {
342 LD_LOGE("init_namespace ini file parse failed!");
343 /* Init_default_namespace is required even if the ini file parsing fails */
344 if (!sys_path) get_sys_path(conf);
345 init_default_namespace(app);
346 configor_free();
347 trace_marker_end(HITRACE_TAG_MUSL);
348 return;
349 }
350
351 /* sys_path needs to be parsed through ini file */
352 if (!sys_path) get_sys_path(conf);
353 init_default_namespace(app);
354
355 /* Init default namespace */
356 ns_t *d_ns = get_default_ns();
357 set_ns_attrs(d_ns, conf);
358
359 /* Init other namespace */
360 if (!nsl) {
361 LD_LOGE("init nslist fail!");
362 configor_free();
363 trace_marker_end(HITRACE_TAG_MUSL);
364 return;
365 }
366 strlist *s_ns = conf->get_namespaces();
367 if (s_ns) {
368 for (size_t i=0; i<s_ns->num; i++) {
369 ns_t *ns = ns_alloc();
370 ns_set_name(ns, s_ns->strs[i]);
371 set_ns_attrs(ns, conf);
372 ns_add_dso(ns, app);
373 nslist_add_ns(ns);
374 }
375 strlist_free(s_ns);
376 }
377 /* Set inherited namespace */
378 set_ns_inherits(d_ns, conf);
379 for (size_t i = 0; i < nsl->num; i++) {
380 set_ns_inherits(nsl->nss[i], conf);
381 }
382 configor_free();
383 trace_marker_end(HITRACE_TAG_MUSL);
384 return;
385 }
386
387 /* Compute load address for a virtual address in a given dso. */
388 #if DL_FDPIC
laddr(const struct dso * p,size_t v)389 void *laddr(const struct dso *p, size_t v)
390 {
391 size_t j=0;
392 if (!p->loadmap) return p->base + v;
393 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
394 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
395 }
laddr_pg(const struct dso * p,size_t v)396 static void *laddr_pg(const struct dso *p, size_t v)
397 {
398 size_t j=0;
399 size_t pgsz = PAGE_SIZE;
400 if (!p->loadmap) return p->base + v;
401 for (j=0; ; j++) {
402 size_t a = p->loadmap->segs[j].p_vaddr;
403 size_t b = a + p->loadmap->segs[j].p_memsz;
404 a &= -pgsz;
405 b += pgsz-1;
406 b &= -pgsz;
407 if (v-a<b-a) break;
408 }
409 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
410 }
fdbarrier(void * p)411 static void (*fdbarrier(void *p))()
412 {
413 void (*fd)();
414 __asm__("" : "=r"(fd) : "0"(p));
415 return fd;
416 }
417 #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
418 laddr(p, v), (p)->got }))
419 #else
420 #define laddr(p, v) (void *)((p)->base + (v))
421 #define laddr_pg(p, v) laddr(p, v)
422 #define fpaddr(p, v) ((void (*)())laddr(p, v))
423 #endif
424
decode_vec(size_t * v,size_t * a,size_t cnt)425 static void decode_vec(size_t *v, size_t *a, size_t cnt)
426 {
427 size_t i;
428 for (i=0; i<cnt; i++) a[i] = 0;
429 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
430 if (v[0] < 8*sizeof(long)) {
431 a[0] |= 1UL<<v[0];
432 }
433 a[v[0]] = v[1];
434 }
435 }
436
search_vec(size_t * v,size_t * r,size_t key)437 static int search_vec(size_t *v, size_t *r, size_t key)
438 {
439 for (; v[0]!=key; v+=2)
440 if (!v[0]) return 0;
441 *r = v[1];
442 return 1;
443 }
444
check_vna_hash(Verdef * def,int16_t vsym,uint32_t vna_hash)445 UT_STATIC int check_vna_hash(Verdef *def, int16_t vsym, uint32_t vna_hash)
446 {
447 int matched = 0;
448
449 vsym &= 0x7fff;
450 Verdef *verdef = def;
451 for(;;) {
452 if ((verdef->vd_ndx & 0x7fff) == vsym) {
453 if (vna_hash == verdef->vd_hash) {
454 matched = 1;
455 }
456 break;
457 }
458 if (matched) {
459 break;
460 }
461 if (verdef->vd_next == 0) {
462 break;
463 }
464 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
465 }
466 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
467 if (!matched) {
468 LD_LOGD("check_vna_hash no matched found. vsym=%{public}d vna_hash=%{public}x", vsym, vna_hash);
469 }
470 #endif
471 return matched;
472 }
473
check_verinfo(Verdef * def,int16_t * versym,uint32_t index,struct verinfo * verinfo,char * strings)474 UT_STATIC int check_verinfo(Verdef *def, int16_t *versym, uint32_t index, struct verinfo *verinfo, char *strings)
475 {
476 /* if the versym and verinfo is null , then not need version. */
477 if (!versym || !def) {
478 if (strlen(verinfo->v) == 0) {
479 return 1;
480 } else {
481 LD_LOGD("check_verinfo versym or def is null and verinfo->v exist, s:%{public}s v:%{public}s.",
482 verinfo->s, verinfo->v);
483 return 0;
484 }
485 }
486
487 int16_t vsym = versym[index];
488
489 /* find the verneed symbol. */
490 if (verinfo->use_vna_hash) {
491 if (vsym != VER_NDX_LOCAL && versym != VER_NDX_GLOBAL) {
492 return check_vna_hash(def, vsym, verinfo->vna_hash);
493 }
494 }
495
496 /* if the version length is zero and vsym not less than zero, then library hava default version symbol. */
497 if (strlen(verinfo->v) == 0) {
498 if (vsym >= 0) {
499 return 1;
500 } else {
501 LD_LOGD("check_verinfo not default version. vsym:%{public}d s:%{public}s", vsym, verinfo->s);
502 return 0;
503 }
504 }
505
506 /* find the version of symbol. */
507 vsym &= 0x7fff;
508 for (;;) {
509 if (!(def->vd_flags & VER_FLG_BASE) && (def->vd_ndx & 0x7fff) == vsym) {
510 break;
511 }
512 if (def->vd_next == 0) {
513 return 0;
514 }
515 def = (Verdef *)((char *)def + def->vd_next);
516 }
517
518 Verdaux *aux = (Verdaux *)((char *)def + def->vd_aux);
519
520 int ret = !strcmp(verinfo->v, strings + aux->vda_name);
521 #if (LD_LOG_LEVEL & LD_LOG_DEBUG)
522 if (!ret) {
523 LD_LOGD("check_verinfo version not match. s=%{public}s v=%{public}s vsym=%{public}d vda_name=%{public}s",
524 verinfo->s, verinfo->v, vsym, strings + aux->vda_name);
525 }
526 #endif
527 return ret;
528 }
529
sysv_hash(const char * s0)530 static struct sym_info_pair sysv_hash(const char *s0)
531 {
532 struct sym_info_pair s_info_p;
533 const unsigned char *s = (void *)s0;
534 uint_fast32_t h = 0;
535 while (*s) {
536 h = 16*h + *s++;
537 h ^= h>>24 & 0xf0;
538 }
539 s_info_p.sym_h = h & 0xfffffff;
540 s_info_p.sym_l = (char *)s - s0;
541 return s_info_p;
542 }
543
gnu_hash(const char * s0)544 struct sym_info_pair gnu_hash(const char *s0)
545 {
546 struct sym_info_pair s_info_p;
547 const unsigned char *s = (void *)s0;
548 uint_fast32_t h = 5381;
549 for (; *s; s++)
550 h += h*32 + *s;
551 s_info_p.sym_h = h;
552 s_info_p.sym_l = (char *)s - s0;
553 return s_info_p;
554 }
555
sysv_lookup(struct verinfo * verinfo,struct sym_info_pair s_info_p,struct dso * dso)556 static Sym *sysv_lookup(struct verinfo *verinfo, struct sym_info_pair s_info_p, struct dso *dso)
557 {
558 size_t i;
559 uint32_t h = s_info_p.sym_h;
560 Sym *syms = dso->syms;
561 Elf_Symndx *hashtab = dso->hashtab;
562 char *strings = dso->strings;
563 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
564 if ((!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
565 && (!memcmp(verinfo->s, strings+syms[i].st_name, s_info_p.sym_l))) {
566 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
567 continue;
568 }
569
570 return syms+i;
571 }
572
573 }
574 LD_LOGD("sysv_lookup not find the symbol, "
575 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
576 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
577 return 0;
578 }
579
gnu_lookup(struct sym_info_pair s_info_p,uint32_t * hashtab,struct dso * dso,struct verinfo * verinfo)580 static Sym *gnu_lookup(struct sym_info_pair s_info_p, uint32_t *hashtab, struct dso *dso, struct verinfo *verinfo)
581 {
582 uint32_t h1 = s_info_p.sym_h;
583 uint32_t nbuckets = hashtab[0];
584 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
585 uint32_t i = buckets[h1 % nbuckets];
586
587 if (!i) {
588 LD_LOGD("gnu_lookup symbol not found (bloom filter), so:%{public}s s:%{public}s", dso->name, verinfo->s);
589 return 0;
590 }
591
592 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
593
594 for (h1 |= 1; ; i++) {
595 uint32_t h2 = *hashval++;
596 if ((h1 == (h2|1)) && (!dso->versym || (dso->versym[i] & 0x7fff) >= 0)
597 && !memcmp(verinfo->s, dso->strings + dso->syms[i].st_name, s_info_p.sym_l)) {
598 if (!check_verinfo(dso->verdef, dso->versym, i, verinfo, dso->strings)) {
599 continue;
600 }
601
602 return dso->syms+i;
603 }
604
605 if (h2 & 1) break;
606 }
607
608 LD_LOGD("gnu_lookup symbol not found, "
609 "so:%{public}s s:%{public}s v:%{public}s use_vna_hash:%{public}d vna_hash:%{public}x",
610 dso->name, verinfo->s, verinfo->v, verinfo->use_vna_hash, verinfo->vna_hash);
611 return 0;
612 }
613
check_sym_accessible(struct dso * dso,ns_t * ns)614 static bool check_sym_accessible(struct dso *dso, ns_t *ns)
615 {
616 if (!dso || !dso->namespace || !ns) {
617 LD_LOGD("check_sym_accessible invalid parameter!");
618 return false;
619 }
620 if (dso->namespace == ns) {
621 return true;
622 }
623 for (int i = 0; i < dso->parents_count; i++) {
624 if (dso->parents[i]->namespace == ns) {
625 return true;
626 }
627 }
628 LD_LOGD(
629 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
630 return false;
631 }
632
is_dso_accessible(struct dso * dso,ns_t * ns)633 static inline bool is_dso_accessible(struct dso *dso, ns_t *ns)
634 {
635 if (dso->namespace == ns) {
636 return true;
637 }
638 for (int i = 0; i < dso->parents_count; i++) {
639 if (dso->parents[i]->namespace == ns) {
640 return true;
641 }
642 }
643 LD_LOGD(
644 "check_sym_accessible dso name [%{public}s] ns_name [%{public}s] not accessible!", dso->name, ns->ns_name);
645 return false;
646 }
647
find_dso_parent(struct dso * p,struct dso * target)648 static int find_dso_parent(struct dso *p, struct dso *target)
649 {
650 int index = -1;
651 for (int i = 0; i < p->parents_count; i++) {
652 if (p->parents[i] == target) {
653 index = i;
654 break;
655 }
656 }
657 return index;
658 }
659
add_dso_parent(struct dso * p,struct dso * parent)660 static void add_dso_parent(struct dso *p, struct dso *parent)
661 {
662 int index = find_dso_parent(p, parent);
663 if (index != -1) {
664 return;
665 }
666 if (p->parents_count + 1 > p->parents_capacity) {
667 if (p->parents_capacity == 0) {
668 p->parents = (struct dso **)malloc(sizeof(struct dso *) * PARENTS_BASE_CAPACITY);
669 if (!p->parents) {
670 return;
671 }
672 p->parents_capacity = PARENTS_BASE_CAPACITY;
673 } else {
674 struct dso ** realloced = (struct dso **)realloc(
675 p->parents, sizeof(struct dso *) * (p->parents_capacity + PARENTS_BASE_CAPACITY));
676 if (!realloced) {
677 return;
678 }
679 p->parents = realloced;
680 p->parents_capacity += PARENTS_BASE_CAPACITY;
681 }
682 }
683 p->parents[p->parents_count] = parent;
684 p->parents_count++;
685 }
686
remove_dso_parent(struct dso * p,struct dso * parent)687 static void remove_dso_parent(struct dso *p, struct dso *parent)
688 {
689 int index = find_dso_parent(p, parent);
690 if (index == -1) {
691 return;
692 }
693 int i;
694 for (i = 0; i < index; i++) {
695 p->parents[i] = p->parents[i];
696 }
697 for (i = index; i < p->parents_count - 1; i++) {
698 p->parents[i] = p->parents[i + 1];
699 }
700 p->parents_count--;
701 }
702
add_reloc_can_search_dso(struct dso * p,struct dso * can_search_so)703 static void add_reloc_can_search_dso(struct dso *p, struct dso *can_search_so)
704 {
705 if (p->reloc_can_search_dso_count + 1 > p->reloc_can_search_dso_capacity) {
706 if (p->reloc_can_search_dso_capacity == 0) {
707 p->reloc_can_search_dso_list =
708 (struct dso **)malloc(sizeof(struct dso *) * RELOC_CAN_SEARCH_DSO_BASE_CAPACITY);
709 if (!p->reloc_can_search_dso_list) {
710 return;
711 }
712 p->reloc_can_search_dso_capacity = RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
713 } else {
714 struct dso ** realloced = (struct dso **)realloc(
715 p->reloc_can_search_dso_list,
716 sizeof(struct dso *) * (p->reloc_can_search_dso_capacity + RELOC_CAN_SEARCH_DSO_BASE_CAPACITY));
717 if (!realloced) {
718 return;
719 }
720 p->reloc_can_search_dso_list = realloced;
721 p->reloc_can_search_dso_capacity += RELOC_CAN_SEARCH_DSO_BASE_CAPACITY;
722 }
723 }
724 p->reloc_can_search_dso_list[p->reloc_can_search_dso_count] = can_search_so;
725 p->reloc_can_search_dso_count++;
726 }
727
free_reloc_can_search_dso(struct dso * p)728 static void free_reloc_can_search_dso(struct dso *p)
729 {
730 if (p->reloc_can_search_dso_list) {
731 free(p->reloc_can_search_dso_list);
732 p->reloc_can_search_dso_list = NULL;
733 p->reloc_can_search_dso_count = 0;
734 p->reloc_can_search_dso_capacity = 0;
735 }
736 }
737
738 /* The list of so that can be accessed during relocation include:
739 * - The is_global flag of the so is true which means accessible by default.
740 * Global so includes exe, ld preload so and ldso.
741 * - We only check whether ns is accessible for the so if is_reloc_head_so_dep is true.
742 *
743 * How to set is_reloc_head_so_dep:
744 * When dlopen A, we set is_reloc_head_so_dep to true for
745 * all direct and indirect dependent sos of A, including A itself. */
add_can_search_so_list_in_dso(struct dso * dso_relocating,struct dso * start_check_dso)746 static void add_can_search_so_list_in_dso(struct dso *dso_relocating, struct dso *start_check_dso) {
747 struct dso *p = start_check_dso;
748 for (; p; p = p->syms_next) {
749 if (p->is_global) {
750 add_reloc_can_search_dso(dso_relocating, p);
751 continue;
752 }
753
754 if (p->is_reloc_head_so_dep) {
755 if (dso_relocating->namespace && check_sym_accessible(p, dso_relocating->namespace)) {
756 add_reloc_can_search_dso(dso_relocating, p);
757 }
758 }
759 }
760
761 return;
762 }
763
764 #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS)
765 #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE)
766
767 #ifndef ARCH_SYM_REJECT_UND
768 #define ARCH_SYM_REJECT_UND(s) 0
769 #endif
770
771 #if defined(__GNUC__)
772 __attribute__((always_inline))
773 #endif
774
find_sym_impl(struct dso * dso,struct verinfo * verinfo,struct sym_info_pair s_info_g,int need_def,ns_t * ns)775 struct symdef find_sym_impl(
776 struct dso *dso, struct verinfo *verinfo, struct sym_info_pair s_info_g, int need_def, ns_t *ns)
777 {
778 Sym *sym;
779 struct sym_info_pair s_info_s = {0, 0};
780 uint32_t *ght;
781 uint32_t gh = s_info_g.sym_h;
782 uint32_t gho = gh / (8 * sizeof(size_t));
783 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
784 struct symdef def = {0};
785 if (ns && !check_sym_accessible(dso, ns))
786 return def;
787
788 if ((ght = dso->ghashtab)) {
789 const size_t *bloomwords = (const void *)(ght + 4);
790 size_t f = bloomwords[gho & (ght[2] - 1)];
791 if (!(f & ghm))
792 return def;
793
794 f >>= (gh >> ght[3]) % (8 * sizeof f);
795 if (!(f & 1))
796 return def;
797
798 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
799 } else {
800 if (!s_info_s.sym_h)
801 s_info_s = sysv_hash(verinfo->s);
802
803 sym = sysv_lookup(verinfo, s_info_s, dso);
804 }
805
806 if (!sym)
807 return def;
808
809 if (!sym->st_shndx)
810 if (need_def || (sym->st_info & 0xf) == STT_TLS || ARCH_SYM_REJECT_UND(sym))
811 return def;
812
813 if (!sym->st_value)
814 if ((sym->st_info & 0xf) != STT_TLS)
815 return def;
816
817 if (!(1 << (sym->st_info & 0xf) & OK_TYPES))
818 return def;
819
820 if (!(1 << (sym->st_info >> 4) & OK_BINDS))
821 return def;
822
823 def.sym = sym;
824 def.dso = dso;
825 return def;
826 }
827
find_sym2(struct dso * dso,struct verinfo * verinfo,int need_def,int use_deps,ns_t * ns)828 static inline struct symdef find_sym2(struct dso *dso, struct verinfo *verinfo, int need_def, int use_deps, ns_t *ns)
829 {
830 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
831 struct sym_info_pair s_info_s = {0, 0};
832 uint32_t gh = s_info_g.sym_h, gho = gh / (8*sizeof(size_t)), *ght;
833 size_t ghm = 1ul << gh % (8*sizeof(size_t));
834 struct symdef def = {0};
835 struct dso **deps = use_deps ? dso->deps : 0;
836 for (; dso; dso=use_deps ? *deps++ : dso->syms_next) {
837 Sym *sym;
838 if (ns && !check_sym_accessible(dso, ns)) {
839 continue;
840 }
841 if ((ght = dso->ghashtab)) {
842 GNU_HASH_FILTER(ght, ghm, gho)
843 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
844 } else {
845 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
846 sym = sysv_lookup(verinfo, s_info_s, dso);
847 }
848
849 if (!sym) continue;
850 if (!sym->st_shndx)
851 if (need_def || (sym->st_info&0xf) == STT_TLS
852 || ARCH_SYM_REJECT_UND(sym))
853 continue;
854 if (!sym->st_value)
855 if ((sym->st_info&0xf) != STT_TLS)
856 continue;
857 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
858 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
859 def.sym = sym;
860 def.dso = dso;
861 break;
862 }
863 return def;
864 }
865
find_sym_by_deps(struct dso * dso,struct verinfo * verinfo,int need_def,ns_t * ns)866 static inline struct symdef find_sym_by_deps(struct dso *dso, struct verinfo *verinfo, int need_def, ns_t *ns)
867 {
868 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
869 struct sym_info_pair s_info_s = {0, 0};
870 uint32_t h = 0, gh = s_info_g.sym_h, gho = gh / (8*sizeof(size_t)), *ght;
871 size_t ghm = 1ul << gh % (8*sizeof(size_t));
872 struct symdef def = {0};
873 struct dso **deps = dso->deps;
874 for (; dso; dso=*deps++) {
875 Sym *sym;
876 if (!is_dso_accessible(dso, ns)) {
877 continue;
878 }
879 if ((ght = dso->ghashtab)) {
880 GNU_HASH_FILTER(ght, ghm, gho)
881 sym = gnu_lookup(s_info_g, ght, dso, verinfo);
882 } else {
883 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
884 sym = sysv_lookup(verinfo, s_info_s, dso);
885 }
886
887 if (!sym) continue;
888 if (!sym->st_shndx)
889 if (need_def || (sym->st_info&0xf) == STT_TLS
890 || ARCH_SYM_REJECT_UND(sym))
891 continue;
892 if (!sym->st_value)
893 if ((sym->st_info&0xf) != STT_TLS)
894 continue;
895 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
896 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
897 def.sym = sym;
898 def.dso = dso;
899 break;
900 }
901 return def;
902 }
903
find_sym_by_saved_so_list(int sym_type,struct dso * dso,struct verinfo * verinfo,int need_def,struct dso * dso_relocating)904 static inline struct symdef find_sym_by_saved_so_list(
905 int sym_type, struct dso *dso, struct verinfo *verinfo, int need_def, struct dso *dso_relocating)
906 {
907 struct sym_info_pair s_info_g = gnu_hash(verinfo->s);
908 struct sym_info_pair s_info_s = {0, 0};
909 uint32_t gh = s_info_g.sym_h, gho = gh / (8 * sizeof(size_t)), *ght;
910 size_t ghm = 1ul << gh % (8 * sizeof(size_t));
911 struct symdef def = {0};
912 // skip head dso.
913 int start_search_index = sym_type==REL_COPY ? 1 : 0;
914 struct dso *dso_searching = 0;
915 for (int i = start_search_index; i < dso_relocating->reloc_can_search_dso_count; i++) {
916 dso_searching = dso_relocating->reloc_can_search_dso_list[i];
917 Sym *sym;
918 if ((ght = dso_searching->ghashtab)) {
919 GNU_HASH_FILTER(ght, ghm, gho)
920 sym = gnu_lookup(s_info_g, ght, dso_searching, verinfo);
921 } else {
922 if (!s_info_s.sym_h) s_info_s = sysv_hash(verinfo->s);
923 sym = sysv_lookup(verinfo, s_info_s, dso_searching);
924 }
925 if (!sym) continue;
926 if (!sym->st_shndx)
927 if (need_def || (sym->st_info&0xf) == STT_TLS
928 || ARCH_SYM_REJECT_UND(sym))
929 continue;
930 if (!sym->st_value)
931 if ((sym->st_info&0xf) != STT_TLS)
932 continue;
933 if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue;
934 if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue;
935 def.sym = sym;
936 def.dso = dso_searching;
937 break;
938 }
939 return def;
940 }
941
find_sym(struct dso * dso,const char * s,int need_def)942 static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
943 {
944 struct verinfo verinfo = { .s = s, .v = "", .use_vna_hash = false };
945 return find_sym2(dso, &verinfo, need_def, 0, NULL);
946 }
947
get_vna_hash(struct dso * dso,int sym_index,uint32_t * vna_hash)948 static bool get_vna_hash(struct dso *dso, int sym_index, uint32_t *vna_hash)
949 {
950 if (!dso->versym || !dso->verneed) {
951 return false;
952 }
953
954 uint16_t vsym = dso->versym[sym_index];
955 if (vsym == VER_NDX_LOCAL || vsym == VER_NDX_GLOBAL) {
956 return false;
957 }
958
959 bool result = false;
960 Verneed *verneed = dso->verneed;
961 Vernaux *vernaux;
962 vsym &= 0x7fff;
963
964 for(;;) {
965 vernaux = (Vernaux *)((char *)verneed + verneed->vn_aux);
966
967 for (size_t cnt = 0; cnt < verneed->vn_cnt; cnt++) {
968 if ((vernaux->vna_other & 0x7fff) == vsym) {
969 result = true;
970 *vna_hash = vernaux->vna_hash;
971 break;
972 }
973
974 vernaux = (Vernaux *)((char *)vernaux + vernaux->vna_next);
975 }
976
977 if (result) {
978 break;
979 }
980
981 if (verneed->vn_next == 0) {
982 break;
983 }
984
985 verneed = (Verneed *)((char *)verneed + verneed->vn_next);
986 }
987 return result;
988 }
989
get_verinfo(struct dso * dso,int sym_index,struct verinfo * vinfo)990 static void get_verinfo(struct dso *dso, int sym_index, struct verinfo *vinfo)
991 {
992 char *strings = dso->strings;
993 // try to get version number from .gnu.version
994 int16_t vsym = dso->versym[sym_index];
995 Verdef *verdef = dso->verdef;
996 vsym &= 0x7fff;
997 if (!verdef) {
998 return;
999 }
1000 int version_found = 0;
1001 for (;;) {
1002 if (!verdef) {
1003 break;
1004 }
1005 if (!(verdef->vd_flags & VER_FLG_BASE) && (verdef->vd_ndx & 0x7fff) == vsym) {
1006 version_found = 1;
1007 break;
1008 }
1009 if (verdef->vd_next == 0) {
1010 break;
1011 }
1012 verdef = (Verdef *)((char *)verdef + verdef->vd_next);
1013 }
1014 if (version_found) {
1015 Verdaux *aux = (Verdaux *)((char *)verdef + verdef->vd_aux);
1016 if (aux && aux->vda_name && strings && (dso->strings + aux->vda_name)) {
1017 vinfo->v = dso->strings + aux->vda_name;
1018 }
1019 }
1020 }
1021
do_relocs(struct dso * dso,size_t * rel,size_t rel_size,size_t stride)1022 static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
1023 {
1024 unsigned char *base = dso->base;
1025 Sym *syms = dso->syms;
1026 char *strings = dso->strings;
1027 Sym *sym;
1028 const char *name;
1029 void *ctx;
1030 int type;
1031 int sym_index;
1032 struct symdef def;
1033 size_t *reloc_addr;
1034 size_t sym_val;
1035 size_t tls_val;
1036 size_t addend;
1037 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
1038
1039 if (dso == &ldso) {
1040 /* Only ldso's REL table needs addend saving/reuse. */
1041 if (rel == apply_addends_to)
1042 reuse_addends = 1;
1043 skip_relative = 1;
1044 }
1045
1046 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
1047 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue;
1048 type = R_TYPE(rel[1]);
1049 if (type == REL_NONE) continue;
1050 reloc_addr = laddr(dso, rel[0]);
1051
1052 if (stride > 2) {
1053 addend = rel[2];
1054 } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) {
1055 addend = 0;
1056 } else if (reuse_addends) {
1057 /* Save original addend in stage 2 where the dso
1058 * chain consists of just ldso; otherwise read back
1059 * saved addend since the inline one was clobbered. */
1060 if (head==&ldso)
1061 saved_addends[save_slot] = *reloc_addr;
1062 addend = saved_addends[save_slot++];
1063 } else {
1064 addend = *reloc_addr;
1065 }
1066
1067 sym_index = R_SYM(rel[1]);
1068 if (sym_index) {
1069 sym = syms + sym_index;
1070 name = strings + sym->st_name;
1071 ctx = type==REL_COPY ? head->syms_next : head;
1072 struct verinfo vinfo = { .s = name, .v = ""};
1073
1074 vinfo.use_vna_hash = get_vna_hash(dso, sym_index, &vinfo.vna_hash);
1075 if (!vinfo.use_vna_hash && dso->versym && (dso->versym[sym_index] & 0x7fff) >= 0) {
1076 get_verinfo(dso, sym_index, &vinfo);
1077 }
1078 if (dso->cache_sym_index == sym_index) {
1079 def = (struct symdef){ .dso = dso->cache_dso, .sym = dso->cache_sym };
1080 } else {
1081 def = (sym->st_info>>4) == STB_LOCAL
1082 ? (struct symdef){ .dso = dso, .sym = sym }
1083 : dso != &ldso ? find_sym_by_saved_so_list(type, ctx, &vinfo, type==REL_PLT, dso)
1084 : find_sym2(ctx, &vinfo, type==REL_PLT, 0, dso->namespace);
1085 dso->cache_sym_index = sym_index;
1086 dso->cache_dso = def.dso;
1087 dso->cache_sym = def.sym;
1088 }
1089
1090 if (!def.sym && (sym->st_shndx != SHN_UNDEF
1091 || sym->st_info>>4 != STB_WEAK)) {
1092 if (dso->lazy && (type==REL_PLT || type==REL_GOT)) {
1093 dso->lazy[3*dso->lazy_cnt+0] = rel[0];
1094 dso->lazy[3*dso->lazy_cnt+1] = rel[1];
1095 dso->lazy[3*dso->lazy_cnt+2] = addend;
1096 dso->lazy_cnt++;
1097 continue;
1098 }
1099 LD_LOGE("relocating failed: symbol not found. "
1100 "dso=%{public}s s=%{public}s use_vna_hash=%{public}d van_hash=%{public}x",
1101 dso->name, name, vinfo.use_vna_hash, vinfo.vna_hash);
1102 error("Error relocating %s: %s: symbol not found",
1103 dso->name, name);
1104 if (runtime) longjmp(*rtld_fail, 1);
1105 continue;
1106 }
1107 } else {
1108 sym = 0;
1109 def.sym = 0;
1110 def.dso = dso;
1111 }
1112
1113 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0;
1114 tls_val = def.sym ? def.sym->st_value : 0;
1115
1116 if ((type == REL_TPOFF || type == REL_TPOFF_NEG)
1117 && def.dso->tls_id > static_tls_cnt) {
1118 error("Error relocating %s: %s: initial-exec TLS "
1119 "resolves to dynamic definition in %s",
1120 dso->name, name, def.dso->name);
1121 longjmp(*rtld_fail, 1);
1122 }
1123
1124 switch(type) {
1125 case REL_OFFSET:
1126 addend -= (size_t)reloc_addr;
1127 case REL_SYMBOLIC:
1128 case REL_GOT:
1129 case REL_PLT:
1130 *reloc_addr = sym_val + addend;
1131 break;
1132 case REL_USYMBOLIC:
1133 memcpy(reloc_addr, &(size_t){sym_val + addend}, sizeof(size_t));
1134 break;
1135 case REL_RELATIVE:
1136 *reloc_addr = (size_t)base + addend;
1137 break;
1138 case REL_SYM_OR_REL:
1139 if (sym) *reloc_addr = sym_val + addend;
1140 else *reloc_addr = (size_t)base + addend;
1141 break;
1142 case REL_COPY:
1143 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
1144 break;
1145 case REL_OFFSET32:
1146 *(uint32_t *)reloc_addr = sym_val + addend
1147 - (size_t)reloc_addr;
1148 break;
1149 case REL_FUNCDESC:
1150 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
1151 + (def.sym - def.dso->syms)) : 0;
1152 break;
1153 case REL_FUNCDESC_VAL:
1154 if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val;
1155 else *reloc_addr = sym_val;
1156 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
1157 break;
1158 case REL_DTPMOD:
1159 *reloc_addr = def.dso->tls_id;
1160 break;
1161 case REL_DTPOFF:
1162 *reloc_addr = tls_val + addend - DTP_OFFSET;
1163 break;
1164 #ifdef TLS_ABOVE_TP
1165 case REL_TPOFF:
1166 *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
1167 break;
1168 #else
1169 case REL_TPOFF:
1170 *reloc_addr = tls_val - def.dso->tls.offset + addend;
1171 break;
1172 case REL_TPOFF_NEG:
1173 *reloc_addr = def.dso->tls.offset - tls_val + addend;
1174 break;
1175 #endif
1176 case REL_TLSDESC:
1177 if (stride<3) addend = reloc_addr[1];
1178 if (def.dso->tls_id > static_tls_cnt) {
1179 struct td_index *new = malloc(sizeof *new);
1180 if (!new) {
1181 error(
1182 "Error relocating %s: cannot allocate TLSDESC for %s",
1183 dso->name, sym ? name : "(local)" );
1184 longjmp(*rtld_fail, 1);
1185 }
1186 new->next = dso->td_index;
1187 dso->td_index = new;
1188 new->args[0] = def.dso->tls_id;
1189 new->args[1] = tls_val + addend - DTP_OFFSET;
1190 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
1191 reloc_addr[1] = (size_t)new;
1192 } else {
1193 reloc_addr[0] = (size_t)__tlsdesc_static;
1194 #ifdef TLS_ABOVE_TP
1195 reloc_addr[1] = tls_val + def.dso->tls.offset
1196 + TPOFF_K + addend;
1197 #else
1198 reloc_addr[1] = tls_val - def.dso->tls.offset
1199 + addend;
1200 #endif
1201 }
1202 #ifdef TLSDESC_BACKWARDS
1203 /* Some archs (32-bit ARM at least) invert the order of
1204 * the descriptor members. Fix them up here. */
1205 size_t tmp = reloc_addr[0];
1206 reloc_addr[0] = reloc_addr[1];
1207 reloc_addr[1] = tmp;
1208 #endif
1209 break;
1210 default:
1211 error("Error relocating %s: unsupported relocation type %d",
1212 dso->name, type);
1213 if (runtime) longjmp(*rtld_fail, 1);
1214 continue;
1215 }
1216 }
1217 }
1218
redo_lazy_relocs()1219 static void redo_lazy_relocs()
1220 {
1221 struct dso *p = lazy_head, *next;
1222 lazy_head = 0;
1223 for (; p; p=next) {
1224 next = p->lazy_next;
1225 size_t size = p->lazy_cnt*3*sizeof(size_t);
1226 p->lazy_cnt = 0;
1227 do_relocs(p, p->lazy, size, 3);
1228 if (p->lazy_cnt) {
1229 p->lazy_next = lazy_head;
1230 lazy_head = p;
1231 } else {
1232 free(p->lazy);
1233 p->lazy = 0;
1234 p->lazy_next = 0;
1235 }
1236 }
1237 }
1238
1239 /* A huge hack: to make up for the wastefulness of shared libraries
1240 * needing at least a page of dirty memory even if they have no global
1241 * data, we reclaim the gaps at the beginning and end of writable maps
1242 * and "donate" them to the heap. */
1243
reclaim(struct dso * dso,size_t start,size_t end)1244 static void reclaim(struct dso *dso, size_t start, size_t end)
1245 {
1246 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
1247 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
1248 if (start >= end) return;
1249 char *base = laddr_pg(dso, start);
1250 __malloc_donate(base, base+(end-start));
1251 }
1252
reclaim_gaps(struct dso * dso)1253 static void reclaim_gaps(struct dso *dso)
1254 {
1255 Phdr *ph = dso->phdr;
1256 size_t phcnt = dso->phnum;
1257
1258 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
1259 if (ph->p_type!=PT_LOAD) continue;
1260 if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue;
1261 reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr);
1262 reclaim(dso, ph->p_vaddr+ph->p_memsz,
1263 ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE);
1264 }
1265 }
1266
read_loop(int fd,void * p,size_t n)1267 static ssize_t read_loop(int fd, void *p, size_t n)
1268 {
1269 for (size_t i=0; i<n; ) {
1270 ssize_t l = read(fd, (char *)p+i, n-i);
1271 if (l<0) {
1272 if (errno==EINTR) continue;
1273 else return -1;
1274 }
1275 if (l==0) return i;
1276 i += l;
1277 }
1278 return n;
1279 }
1280
mmap_fixed(void * p,size_t n,int prot,int flags,int fd,off_t off)1281 UT_STATIC void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
1282 {
1283 static int no_map_fixed;
1284 char *q;
1285 if (!n) return p;
1286 if (!no_map_fixed) {
1287 q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
1288 if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
1289 return q;
1290 no_map_fixed = 1;
1291 }
1292 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
1293 if (flags & MAP_ANONYMOUS) {
1294 memset(p, 0, n);
1295 return p;
1296 }
1297 ssize_t r;
1298 if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
1299 for (q=p; n; q+=r, off+=r, n-=r) {
1300 r = read(fd, q, n);
1301 if (r < 0 && errno != EINTR) return MAP_FAILED;
1302 if (!r) {
1303 memset(q, 0, n);
1304 break;
1305 }
1306 }
1307 return p;
1308 }
1309
unmap_library(struct dso * dso)1310 UT_STATIC void unmap_library(struct dso *dso)
1311 {
1312 if (dso->loadmap) {
1313 size_t i;
1314 for (i=0; i<dso->loadmap->nsegs; i++) {
1315 if (!dso->loadmap->segs[i].p_memsz)
1316 continue;
1317 if (!is_dlclose_debug_enable()) {
1318 munmap((void *)dso->loadmap->segs[i].addr,
1319 dso->loadmap->segs[i].p_memsz);
1320 } else {
1321 (void)mprotect((void *)dso->loadmap->segs[i].addr,
1322 dso->loadmap->segs[i].p_memsz, PROT_NONE);
1323 }
1324 }
1325 free(dso->loadmap);
1326 } else if (dso->map && dso->map_len) {
1327 if (!is_dlclose_debug_enable()) {
1328 munmap(dso->map, dso->map_len);
1329 } else {
1330 mprotect(dso->map, dso->map_len, PROT_NONE);
1331 }
1332 }
1333 }
1334
get_random(void * buf,size_t buflen)1335 UT_STATIC bool get_random(void *buf, size_t buflen)
1336 {
1337 int ret;
1338 int fd = open("/dev/urandom", O_RDONLY);
1339 if (fd < 0) {
1340 return false;
1341 }
1342
1343 ret = read(fd, buf, buflen);
1344 if (ret < 0) {
1345 close(fd);
1346 return false;
1347 }
1348
1349 close(fd);
1350 return true;
1351 }
1352
fill_random_data(void * buf,size_t buflen)1353 UT_STATIC void fill_random_data(void *buf, size_t buflen)
1354 {
1355 uint64_t x;
1356 int i;
1357 int pos = 0;
1358 struct timespec ts;
1359 /* Try to use urandom to get the random number first */
1360 if (!get_random(buf, buflen)) {
1361 /* Can't get random number from /dev/urandom, generate from addr based on ASLR and time */
1362 for (i = 1; i <= (buflen / sizeof(x)); i++) {
1363 (void)clock_gettime(CLOCK_REALTIME, &ts);
1364 x = (((uint64_t)get_random) << 32) ^ (uint64_t)fill_random_data ^ ts.tv_nsec;
1365 memcpy((char *)buf + pos, &x, sizeof(x));
1366 pos += sizeof(x);
1367 }
1368 }
1369 return;
1370 }
1371
get_transparent_hugepages_supported(void)1372 static bool get_transparent_hugepages_supported(void)
1373 {
1374 int fd = -1;
1375 ssize_t read_size = 0;
1376 bool enable = false;
1377 char buf[HUGEPAGES_SUPPORTED_STR_SIZE] = {'0'};
1378
1379 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
1380 if (fd < 0)
1381 goto done;
1382
1383 read_size = read(fd, buf, HUGEPAGES_SUPPORTED_STR_SIZE - 1);
1384 if (read_size < 0)
1385 goto close_fd;
1386
1387 buf[HUGEPAGES_SUPPORTED_STR_SIZE - 1] = '\0';
1388 if (strstr(buf, "[never]") == NULL)
1389 enable = true;
1390
1391 close_fd:
1392 close(fd);
1393 done:
1394 return enable;
1395 }
1396
phdr_table_get_maxinum_alignment(Phdr * phdr_table,size_t phdr_count)1397 static size_t phdr_table_get_maxinum_alignment(Phdr *phdr_table, size_t phdr_count)
1398 {
1399 #if defined(__LP64__)
1400 size_t maxinum_alignment = PAGE_SIZE;
1401 size_t i = 0;
1402
1403 for (i = 0; i < phdr_count; ++i) {
1404 const Phdr *phdr = &phdr_table[i];
1405
1406 /* p_align must be 0, 1, or a positive, integral power of two */
1407 if ((phdr->p_type != PT_LOAD) || ((phdr->p_align & (phdr->p_align - 1)) != 0))
1408 continue;
1409
1410 if (phdr->p_align > maxinum_alignment)
1411 maxinum_alignment = phdr->p_align;
1412 }
1413
1414 return maxinum_alignment;
1415 #else
1416 return PAGE_SIZE;
1417 #endif
1418 }
1419
map_library(int fd,struct dso * dso,struct reserved_address_params * reserved_params)1420 UT_STATIC void *map_library(int fd, struct dso *dso, struct reserved_address_params *reserved_params)
1421 {
1422 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
1423 void *allocated_buf=0;
1424 size_t phsize;
1425 size_t addr_min=SIZE_MAX, addr_max=0, map_len;
1426 size_t this_min, this_max;
1427 size_t nsegs = 0;
1428 off_t off_start;
1429 Ehdr *eh;
1430 Phdr *ph, *ph0;
1431 unsigned prot;
1432 unsigned char *map=MAP_FAILED, *base;
1433 size_t dyn=0;
1434 size_t tls_image=0;
1435 size_t i;
1436 int map_flags = MAP_PRIVATE;
1437 size_t start_addr;
1438 size_t start_alignment = PAGE_SIZE;
1439 bool hugepage_enabled = false;
1440
1441 ssize_t l = read(fd, buf, sizeof buf);
1442 eh = buf;
1443 if (l<0) return 0;
1444 if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
1445 goto noexec;
1446 phsize = eh->e_phentsize * eh->e_phnum;
1447 if (phsize > sizeof buf - sizeof *eh) {
1448 allocated_buf = malloc(phsize);
1449 if (!allocated_buf) return 0;
1450 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
1451 if (l < 0) goto error;
1452 if (l != phsize) goto noexec;
1453 ph = ph0 = allocated_buf;
1454 } else if (eh->e_phoff + phsize > l) {
1455 l = pread(fd, buf+1, phsize, eh->e_phoff);
1456 if (l < 0) goto error;
1457 if (l != phsize) goto noexec;
1458 ph = ph0 = (void *)(buf + 1);
1459 } else {
1460 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
1461 }
1462 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1463 if (ph->p_type == PT_DYNAMIC) {
1464 dyn = ph->p_vaddr;
1465 } else if (ph->p_type == PT_TLS) {
1466 tls_image = ph->p_vaddr;
1467 dso->tls.align = ph->p_align;
1468 dso->tls.len = ph->p_filesz;
1469 dso->tls.size = ph->p_memsz;
1470 } else if (ph->p_type == PT_GNU_RELRO) {
1471 dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
1472 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
1473 } else if (ph->p_type == PT_GNU_STACK) {
1474 if (!runtime && ph->p_memsz > __default_stacksize) {
1475 __default_stacksize =
1476 ph->p_memsz < DEFAULT_STACK_MAX ?
1477 ph->p_memsz : DEFAULT_STACK_MAX;
1478 }
1479 }
1480 if (ph->p_type != PT_LOAD) continue;
1481 nsegs++;
1482 if (ph->p_vaddr < addr_min) {
1483 addr_min = ph->p_vaddr;
1484 off_start = ph->p_offset;
1485 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1486 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1487 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1488 }
1489 if (ph->p_vaddr+ph->p_memsz > addr_max) {
1490 addr_max = ph->p_vaddr+ph->p_memsz;
1491 }
1492 }
1493 if (!dyn) goto noexec;
1494 if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
1495 dso->loadmap = calloc(1, sizeof *dso->loadmap
1496 + nsegs * sizeof *dso->loadmap->segs);
1497 if (!dso->loadmap) goto error;
1498 dso->loadmap->nsegs = nsegs;
1499 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
1500 if (ph->p_type != PT_LOAD) continue;
1501 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1502 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1503 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1504 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
1505 prot, MAP_PRIVATE,
1506 fd, ph->p_offset & -PAGE_SIZE);
1507 if (map == MAP_FAILED) {
1508 unmap_library(dso);
1509 goto error;
1510 }
1511 dso->loadmap->segs[i].addr = (size_t)map +
1512 (ph->p_vaddr & PAGE_SIZE-1);
1513 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
1514 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
1515 i++;
1516 if (prot & PROT_WRITE) {
1517 size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
1518 + ph->p_filesz;
1519 size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
1520 size_t pgend = brk + ph->p_memsz - ph->p_filesz
1521 + PAGE_SIZE-1 & -PAGE_SIZE;
1522 if (pgend > pgbrk && mmap_fixed(map+pgbrk,
1523 pgend-pgbrk, prot,
1524 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
1525 -1, off_start) == MAP_FAILED)
1526 goto error;
1527 memset(map + brk, 0, pgbrk-brk);
1528 }
1529 }
1530 map = (void *)dso->loadmap->segs[0].addr;
1531 map_len = 0;
1532 goto done_mapping;
1533 }
1534 addr_max += PAGE_SIZE-1;
1535 addr_max &= -PAGE_SIZE;
1536 addr_min &= -PAGE_SIZE;
1537 off_start &= -PAGE_SIZE;
1538 map_len = addr_max - addr_min + off_start;
1539 start_addr = addr_min;
1540
1541 hugepage_enabled = get_transparent_hugepages_supported();
1542 if (hugepage_enabled) {
1543 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(ph0, eh->e_phnum);
1544
1545 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
1546 }
1547
1548 if (reserved_params) {
1549 if (map_len > reserved_params->reserved_size) {
1550 if (reserved_params->must_use_reserved) {
1551 goto error;
1552 }
1553 } else {
1554 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
1555 map_flags |= MAP_FIXED;
1556 }
1557 }
1558
1559 /* we will find a mapping_align aligned address as the start of dso
1560 * so we need a tmp_map_len as map_len + mapping_align to make sure
1561 * we have enough space to shift the dso to the correct location. */
1562 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
1563 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
1564
1565 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
1566 if (reserved_params) {
1567 map = DL_NOMMU_SUPPORT
1568 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1569 : mmap((void *)start_addr, map_len, prot, map_flags, fd, off_start);
1570 if (map == MAP_FAILED) {
1571 goto error;
1572 }
1573 if (reserved_params && map_len < reserved_params->reserved_size) {
1574 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
1575 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
1576 }
1577 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
1578 } else {
1579 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
1580 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1581 if (temp_map == MAP_FAILED) {
1582 goto error;
1583 }
1584
1585 /* find the mapping_align aligned address */
1586 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
1587
1588 /* mummap the space we mmap before so that we can mmap correct space again */
1589 munmap(temp_map, tmp_map_len);
1590
1591 map = DL_NOMMU_SUPPORT
1592 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
1593 /* use map_len to mmap correct space for the dso with file mapping */
1594 : mmap(real_map, map_len, prot, map_flags, fd, off_start);
1595 if (map == MAP_FAILED) {
1596 goto error;
1597 }
1598 }
1599 dso->map = map;
1600 dso->map_len = map_len;
1601 /* If the loaded file is not relocatable and the requested address is
1602 * not available, then the load operation must fail. */
1603 if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
1604 errno = EBUSY;
1605 goto error;
1606 }
1607 base = map - addr_min;
1608 dso->phdr = 0;
1609 dso->phnum = 0;
1610 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
1611 if (ph->p_type == PT_OHOS_RANDOMDATA) {
1612 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
1613 continue;
1614 }
1615 if (ph->p_type != PT_LOAD) continue;
1616 /* Check if the programs headers are in this load segment, and
1617 * if so, record the address for use by dl_iterate_phdr. */
1618 if (!dso->phdr && eh->e_phoff >= ph->p_offset
1619 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
1620 dso->phdr = (void *)(base + ph->p_vaddr
1621 + (eh->e_phoff-ph->p_offset));
1622 dso->phnum = eh->e_phnum;
1623 dso->phentsize = eh->e_phentsize;
1624 }
1625 this_min = ph->p_vaddr & -PAGE_SIZE;
1626 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
1627 off_start = ph->p_offset & -PAGE_SIZE;
1628 prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
1629 ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
1630 ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
1631 /* Reuse the existing mapping for the lowest-address LOAD */
1632 if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
1633 if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
1634 goto error;
1635 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
1636 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
1637 if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
1638 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
1639 size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
1640 size_t zeromap_size = (size_t)base+this_max-pgbrk;
1641 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
1642 if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, zeromap_size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
1643 goto error;
1644 set_bss_vma_name(dso->name, (void *)pgbrk, zeromap_size);
1645 }
1646 }
1647 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
1648 if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
1649 if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
1650 && errno != ENOSYS)
1651 goto error;
1652 break;
1653 }
1654 done_mapping:
1655 dso->base = base;
1656 dso->dynv = laddr(dso, dyn);
1657 if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
1658 free(allocated_buf);
1659 return map;
1660 noexec:
1661 errno = ENOEXEC;
1662 error:
1663 if (map!=MAP_FAILED) unmap_library(dso);
1664 free(allocated_buf);
1665 return 0;
1666 }
1667
path_open(const char * name,const char * s,char * buf,size_t buf_size)1668 static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
1669 {
1670 size_t l;
1671 int fd;
1672 for (;;) {
1673 s += strspn(s, ":\n");
1674 l = strcspn(s, ":\n");
1675 if (l-1 >= INT_MAX) return -1;
1676 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
1677 if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd;
1678 switch (errno) {
1679 case ENOENT:
1680 case ENOTDIR:
1681 case EACCES:
1682 case ENAMETOOLONG:
1683 break;
1684 default:
1685 /* Any negative value but -1 will inhibit
1686 * futher path search. */
1687 return -2;
1688 }
1689 }
1690 s += l;
1691 }
1692 }
1693
fixup_rpath(struct dso * p,char * buf,size_t buf_size)1694 UT_STATIC int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
1695 {
1696 size_t n, l;
1697 const char *s, *t, *origin;
1698 char *d;
1699 if (p->rpath || !p->rpath_orig) return 0;
1700 if (!strchr(p->rpath_orig, '$')) {
1701 p->rpath = p->rpath_orig;
1702 return 0;
1703 }
1704 n = 0;
1705 s = p->rpath_orig;
1706 while ((t=strchr(s, '$'))) {
1707 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
1708 return 0;
1709 s = t+1;
1710 n++;
1711 }
1712 if (n > SSIZE_MAX/PATH_MAX) return 0;
1713
1714 if (p->kernel_mapped) {
1715 /* $ORIGIN searches cannot be performed for the main program
1716 * when it is suid/sgid/AT_SECURE. This is because the
1717 * pathname is under the control of the caller of execve.
1718 * For libraries, however, $ORIGIN can be processed safely
1719 * since the library's pathname came from a trusted source
1720 * (either system paths or a call to dlopen). */
1721 if (libc.secure)
1722 return 0;
1723 l = readlink("/proc/self/exe", buf, buf_size);
1724 if (l == -1) switch (errno) {
1725 case ENOENT:
1726 case ENOTDIR:
1727 case EACCES:
1728 break;
1729 default:
1730 return -1;
1731 }
1732 if (l >= buf_size)
1733 return 0;
1734 buf[l] = 0;
1735 origin = buf;
1736 } else {
1737 origin = p->name;
1738 }
1739 t = strrchr(origin, '/');
1740 if (t) {
1741 l = t-origin;
1742 } else {
1743 /* Normally p->name will always be an absolute or relative
1744 * pathname containing at least one '/' character, but in the
1745 * case where ldso was invoked as a command to execute a
1746 * program in the working directory, app.name may not. Fix. */
1747 origin = ".";
1748 l = 1;
1749 }
1750 /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */
1751 if (libc.secure && *origin != '/')
1752 return 0;
1753 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
1754 if (!p->rpath) return -1;
1755
1756 d = p->rpath;
1757 s = p->rpath_orig;
1758 while ((t=strchr(s, '$'))) {
1759 memcpy(d, s, t-s);
1760 d += t-s;
1761 memcpy(d, origin, l);
1762 d += l;
1763 /* It was determined previously that the '$' is followed
1764 * either by "ORIGIN" or "{ORIGIN}". */
1765 s = t + 7 + 2*(t[1]=='{');
1766 }
1767 strcpy(d, s);
1768 return 0;
1769 }
1770
decode_dyn(struct dso * p)1771 static void decode_dyn(struct dso *p)
1772 {
1773 size_t dyn[DYN_CNT];
1774 size_t flags1 = 0;
1775 decode_vec(p->dynv, dyn, DYN_CNT);
1776 search_vec(p->dynv, &flags1, DT_FLAGS_1);
1777 if (flags1 & DF_1_GLOBAL) {
1778 LD_LOGI("Add DF_1_GLOBAL for %{public}s", p->name);
1779 p->is_global = true;
1780 }
1781 if (flags1 & DF_1_NODELETE) {
1782 p->flags |= DSO_FLAGS_NODELETE;
1783 }
1784 p->syms = laddr(p, dyn[DT_SYMTAB]);
1785 p->strings = laddr(p, dyn[DT_STRTAB]);
1786 if (dyn[0]&(1<<DT_HASH))
1787 p->hashtab = laddr(p, dyn[DT_HASH]);
1788 if (dyn[0]&(1<<DT_RPATH))
1789 p->rpath_orig = p->strings + dyn[DT_RPATH];
1790 if (dyn[0]&(1<<DT_RUNPATH))
1791 p->rpath_orig = p->strings + dyn[DT_RUNPATH];
1792 if (dyn[0]&(1<<DT_PLTGOT))
1793 p->got = laddr(p, dyn[DT_PLTGOT]);
1794 if (search_vec(p->dynv, dyn, DT_GNU_HASH))
1795 p->ghashtab = laddr(p, *dyn);
1796 if (search_vec(p->dynv, dyn, DT_VERSYM))
1797 p->versym = laddr(p, *dyn);
1798 if (search_vec(p->dynv, dyn, DT_VERDEF))
1799 p->verdef = laddr(p, *dyn);
1800 if (search_vec(p->dynv, dyn, DT_VERNEED))
1801 p->verneed = laddr(p, *dyn);
1802 }
1803
count_syms(struct dso * p)1804 UT_STATIC size_t count_syms(struct dso *p)
1805 {
1806 if (p->hashtab) return p->hashtab[1];
1807
1808 size_t nsym, i;
1809 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
1810 uint32_t *hashval;
1811 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
1812 if (buckets[i] > nsym)
1813 nsym = buckets[i];
1814 }
1815 if (nsym) {
1816 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
1817 do nsym++;
1818 while (!(*hashval++ & 1));
1819 }
1820 return nsym;
1821 }
1822
dl_mmap(size_t n)1823 static void *dl_mmap(size_t n)
1824 {
1825 void *p;
1826 int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE;
1827 #ifdef SYS_mmap2
1828 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0);
1829 #else
1830 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0);
1831 #endif
1832 return (unsigned long)p > -4096UL ? 0 : p;
1833 }
1834
makefuncdescs(struct dso * p)1835 static void makefuncdescs(struct dso *p)
1836 {
1837 static int self_done;
1838 size_t nsym = count_syms(p);
1839 size_t i, size = nsym * sizeof(*p->funcdescs);
1840
1841 if (!self_done) {
1842 p->funcdescs = dl_mmap(size);
1843 self_done = 1;
1844 } else {
1845 p->funcdescs = malloc(size);
1846 }
1847 if (!p->funcdescs) {
1848 if (!runtime) a_crash();
1849 error("Error allocating function descriptors for %s", p->name);
1850 longjmp(*rtld_fail, 1);
1851 }
1852 for (i=0; i<nsym; i++) {
1853 if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) {
1854 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value);
1855 p->funcdescs[i].got = p->got;
1856 } else {
1857 p->funcdescs[i].addr = 0;
1858 p->funcdescs[i].got = 0;
1859 }
1860 }
1861 }
1862
get_sys_path(ns_configor * conf)1863 static void get_sys_path(ns_configor *conf)
1864 {
1865 LD_LOGD("get_sys_path g_is_asan:%{public}d", g_is_asan);
1866 /* Use ini file's system paths when Asan is not enabled */
1867 if (!g_is_asan) {
1868 sys_path = conf->get_sys_paths();
1869 } else {
1870 /* Use ini file's asan system paths when the Asan is enabled
1871 * Merge two strings when both sys_paths and asan_sys_paths are valid */
1872 sys_path = conf->get_asan_sys_paths();
1873 char *sys_path_default = conf->get_sys_paths();
1874 if (!sys_path) {
1875 sys_path = sys_path_default;
1876 } else if (sys_path_default) {
1877 size_t newlen = strlen(sys_path) + strlen(sys_path_default) + 2;
1878 char *new_syspath = malloc(newlen);
1879 memset(new_syspath, 0, newlen);
1880 strcpy(new_syspath, sys_path);
1881 strcat(new_syspath, ":");
1882 strcat(new_syspath, sys_path_default);
1883 sys_path = new_syspath;
1884 }
1885 }
1886 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib:/lib64";
1887 LD_LOGD("get_sys_path sys_path:%{public}s", sys_path);
1888 }
1889
search_dso_by_name(const char * name,const ns_t * ns)1890 static struct dso *search_dso_by_name(const char *name, const ns_t *ns) {
1891 LD_LOGD("search_dso_by_name name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
1892 for (size_t i = 0; i < ns->ns_dsos->num; i++){
1893 struct dso *p = ns->ns_dsos->dsos[i];
1894 if (p->shortname && !strcmp(p->shortname, name)) {
1895 LD_LOGD("search_dso_by_name found name:%{public}s, ns_name:%{public}s", name, ns ? ns->ns_name: "NULL");
1896 return p;
1897 }
1898 }
1899 return NULL;
1900 }
1901
search_dso_by_fstat(const struct stat * st,const ns_t * ns,uint64_t file_offset)1902 static struct dso *search_dso_by_fstat(const struct stat *st, const ns_t *ns, uint64_t file_offset) {
1903 LD_LOGD("search_dso_by_fstat ns_name:%{public}s", ns ? ns->ns_name : "NULL");
1904 for (size_t i = 0; i < ns->ns_dsos->num; i++){
1905 struct dso *p = ns->ns_dsos->dsos[i];
1906 if (p->dev == st->st_dev && p->ino == st->st_ino && p->file_offset == file_offset) {
1907 LD_LOGD("search_dso_by_fstat found dev:%{public}lu, ino:%{public}lu, ns_name:%{public}s",
1908 st->st_dev, st->st_ino, ns ? ns->ns_name : "NULL");
1909 return p;
1910 }
1911 }
1912 return NULL;
1913 }
1914
app_has_same_name_so(const char * so_name,const ns_t * ns)1915 static inline int app_has_same_name_so(const char *so_name, const ns_t *ns)
1916 {
1917 int fd = -1;
1918 /* Only check system app. */
1919 if (((ns->flag & LOCAL_NS_PREFERED) != 0) && ns->lib_paths) {
1920 char tmp_buf[PATH_MAX+1];
1921 fd = path_open(so_name, ns->lib_paths, tmp_buf, sizeof tmp_buf);
1922 }
1923 return fd;
1924 }
1925
1926 /* Find loaded so by name */
find_library_by_name(const char * name,const ns_t * ns,bool check_inherited)1927 static struct dso *find_library_by_name(const char *name, const ns_t *ns, bool check_inherited)
1928 {
1929 LD_LOGD("find_library_by_name name:%{public}s, ns_name:%{public}s, check_inherited:%{public}d",
1930 name,
1931 ns ? ns->ns_name : "NULL",
1932 !!check_inherited);
1933 struct dso *p = search_dso_by_name(name, ns);
1934 if (p) return p;
1935 if (check_inherited && ns->ns_inherits) {
1936 for (size_t i = 0; i < ns->ns_inherits->num; i++){
1937 ns_inherit * inherit = ns->ns_inherits->inherits[i];
1938 p = search_dso_by_name(name, inherit->inherited_ns);
1939 if (p && is_sharable(inherit, name)) {
1940 if (app_has_same_name_so(name, ns) != -1) {
1941 return NULL;
1942 }
1943 return p;
1944 }
1945 }
1946 }
1947 return NULL;
1948 }
1949 /* Find loaded so by file stat */
find_library_by_fstat(const struct stat * st,const ns_t * ns,bool check_inherited,uint64_t file_offset)1950 UT_STATIC struct dso *find_library_by_fstat(const struct stat *st, const ns_t *ns, bool check_inherited, uint64_t file_offset) {
1951 LD_LOGD("find_library_by_fstat ns_name:%{public}s, check_inherited :%{public}d",
1952 ns ? ns->ns_name : "NULL",
1953 !!check_inherited);
1954 struct dso *p = search_dso_by_fstat(st, ns, file_offset);
1955 if (p) return p;
1956 if (check_inherited && ns->ns_inherits) {
1957 for (size_t i = 0; i < ns->ns_inherits->num; i++){
1958 ns_inherit *inherit = ns->ns_inherits->inherits[i];
1959 p = search_dso_by_fstat(st, inherit->inherited_ns, file_offset);
1960 if (p && is_sharable(inherit, p->shortname)) return p;
1961 }
1962 }
1963 return NULL;
1964 }
1965
1966 #ifndef LOAD_ORDER_RANDOMIZATION
1967 /* add namespace function */
load_library(const char * name,struct dso * needed_by,ns_t * namespace,bool check_inherited,struct reserved_address_params * reserved_params)1968 struct dso *load_library(
1969 const char *name, struct dso *needed_by, ns_t *namespace, bool check_inherited, struct reserved_address_params *reserved_params)
1970 {
1971 char buf[PATH_MAX+1];
1972 const char *pathname;
1973 unsigned char *map;
1974 struct dso *p, temp_dso = {0};
1975 int fd;
1976 struct stat st;
1977 size_t alloc_size;
1978 int n_th = 0;
1979 int is_self = 0;
1980
1981 if (!*name) {
1982 errno = EINVAL;
1983 return 0;
1984 }
1985
1986 /* Catch and block attempts to reload the implementation itself */
1987 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
1988 static const char reserved[] =
1989 "c.pthread.rt.m.dl.util.xnet.";
1990 const char *rp, *next;
1991 for (rp=reserved; *rp; rp=next) {
1992 next = strchr(rp, '.') + 1;
1993 if (strncmp(name+3, rp, next-rp) == 0)
1994 break;
1995 }
1996 if (*rp) {
1997 if (ldd_mode) {
1998 /* Track which names have been resolved
1999 * and only report each one once. */
2000 static unsigned reported;
2001 unsigned mask = 1U<<(rp-reserved);
2002 if (!(reported & mask)) {
2003 reported |= mask;
2004 dprintf(1, "\t%s => %s (%p)\n",
2005 name, ldso.name,
2006 ldso.base);
2007 }
2008 }
2009 is_self = 1;
2010 }
2011 }
2012 if (!strcmp(name, ldso.name)) is_self = 1;
2013 if (is_self) {
2014 if (!ldso.prev) {
2015 tail->next = &ldso;
2016 ldso.prev = tail;
2017 tail = &ldso;
2018 ldso.namespace = namespace;
2019 ns_add_dso(namespace, &ldso);
2020 }
2021 return &ldso;
2022 }
2023 if (strchr(name, '/')) {
2024 pathname = name;
2025
2026 if (!is_accessible(namespace, pathname, g_is_asan, check_inherited)) {
2027 fd = -1;
2028 LD_LOGD("load_library is_accessible return false,fd = -1");
2029 } else {
2030 fd = open(name, O_RDONLY|O_CLOEXEC);
2031 LD_LOGD("load_library is_accessible return true, open file fd:%{public}d .", fd);
2032 }
2033 } else {
2034 /* Search for the name to see if it's already loaded */
2035 /* Search in namespace */
2036 p = find_library_by_name(name, namespace, check_inherited);
2037 if (p) {
2038 LD_LOGD("load_library find_library_by_name found p, return it!");
2039 return p;
2040 }
2041 if (strlen(name) > NAME_MAX) {
2042 LD_LOGE("load_library name exceeding the maximum length, return 0!");
2043 return 0;
2044 }
2045 fd = -1;
2046 if (namespace->env_paths) fd = path_open(name, namespace->env_paths, buf, sizeof buf);
2047 for (p = needed_by; fd == -1 && p; p = p->needed_by) {
2048 if (fixup_rpath(p, buf, sizeof buf) < 0) {
2049 LD_LOGD("load_library Inhibit further search,fd = -2.");
2050 fd = -2; /* Inhibit further search. */
2051 }
2052 if (p->rpath) {
2053 fd = path_open(name, p->rpath, buf, sizeof buf);
2054 LD_LOGD("load_library p->rpath path_open fd:%{public}d.", fd);
2055 }
2056
2057 }
2058 if (g_is_asan) {
2059 fd = handle_asan_path_open(fd, name, namespace, buf, sizeof buf);
2060 LD_LOGD("load_library handle_asan_path_open fd:%{public}d.", fd);
2061 } else {
2062 if (fd == -1 && namespace->lib_paths) {
2063 fd = path_open(name, namespace->lib_paths, buf, sizeof buf);
2064 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", fd);
2065 }
2066 }
2067 pathname = buf;
2068 LD_LOGD("load_library lib_paths pathname:%{public}s.", pathname);
2069 }
2070 if (fd < 0) {
2071 if (!check_inherited || !namespace->ns_inherits) return 0;
2072 /* Load lib in inherited namespace. Do not check inherited again.*/
2073 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
2074 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
2075 if (strchr(name, '/')==0 && !is_sharable(inherit, name)) continue;
2076 p = load_library(name, needed_by, inherit->inherited_ns, false, reserved_params);
2077 if (p) {
2078 LD_LOGD("load_library search in inherited, found p ,inherited_ns name:%{public}s",
2079 inherit->inherited_ns->ns_name);
2080 return p;
2081 }
2082 }
2083 return 0;
2084 }
2085 if (fstat(fd, &st) < 0) {
2086 close(fd);
2087 LD_LOGE("load_library fstat < 0,return 0!");
2088 return 0;
2089 }
2090 /* Search in namespace */
2091 p = find_library_by_fstat(&st, namespace, check_inherited, 0);
2092 if (p) {
2093 /* If this library was previously loaded with a
2094 * pathname but a search found the same inode,
2095 * setup its shortname so it can be found by name. */
2096 if (!p->shortname && pathname != name)
2097 p->shortname = strrchr(p->name, '/')+1;
2098 close(fd);
2099 LD_LOGD("load_library find_library_by_fstat, found p and return it!");
2100 return p;
2101 }
2102 map = noload ? 0 : map_library(fd, &temp_dso, reserved_params);
2103 close(fd);
2104 if (!map) return 0;
2105
2106 /* Avoid the danger of getting two versions of libc mapped into the
2107 * same process when an absolute pathname was used. The symbols
2108 * checked are chosen to catch both musl and glibc, and to avoid
2109 * false positives from interposition-hack libraries. */
2110 decode_dyn(&temp_dso);
2111 if (find_sym(&temp_dso, "__libc_start_main", 1).sym &&
2112 find_sym(&temp_dso, "stdin", 1).sym) {
2113 unmap_library(&temp_dso);
2114 return load_library("libc.so", needed_by, namespace, true, reserved_params);
2115 }
2116 /* Past this point, if we haven't reached runtime yet, ldso has
2117 * committed either to use the mapped library or to abort execution.
2118 * Unmapping is not possible, so we can safely reclaim gaps. */
2119 if (!runtime) reclaim_gaps(&temp_dso);
2120
2121 /* Allocate storage for the new DSO. When there is TLS, this
2122 * storage must include a reservation for all pre-existing
2123 * threads to obtain copies of both the new TLS, and an
2124 * extended DTV capable of storing an additional slot for
2125 * the newly-loaded DSO. */
2126 alloc_size = sizeof *p + strlen(pathname) + 1;
2127 if (runtime && temp_dso.tls.image) {
2128 size_t per_th = temp_dso.tls.size + temp_dso.tls.align
2129 + sizeof(void *) * (tls_cnt+3);
2130 n_th = libc.threads_minus_1 + 1;
2131 if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
2132 else alloc_size += n_th * per_th;
2133 }
2134 p = calloc(1, alloc_size);
2135 if (!p) {
2136 unmap_library(&temp_dso);
2137 return 0;
2138 }
2139 memcpy(p, &temp_dso, sizeof temp_dso);
2140 p->dev = st.st_dev;
2141 p->ino = st.st_ino;
2142 p->needed_by = needed_by;
2143 p->name = p->buf;
2144 p->runtime_loaded = runtime;
2145 strcpy(p->name, pathname);
2146 /* Add a shortname only if name arg was not an explicit pathname. */
2147 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
2148 if (p->tls.image) {
2149 p->tls_id = ++tls_cnt;
2150 tls_align = MAXP2(tls_align, p->tls.align);
2151 #ifdef TLS_ABOVE_TP
2152 p->tls.offset = tls_offset + ( (p->tls.align-1) &
2153 (-tls_offset + (uintptr_t)p->tls.image) );
2154 tls_offset = p->tls.offset + p->tls.size;
2155 #else
2156 tls_offset += p->tls.size + p->tls.align - 1;
2157 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
2158 & (p->tls.align-1);
2159 p->tls.offset = tls_offset;
2160 #endif
2161 p->new_dtv = (void *)(-sizeof(size_t) &
2162 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
2163 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
2164 if (tls_tail) tls_tail->next = &p->tls;
2165 else libc.tls_head = &p->tls;
2166 tls_tail = &p->tls;
2167 }
2168
2169 tail->next = p;
2170 p->prev = tail;
2171 tail = p;
2172
2173 /* Add dso to namespace */
2174 p->namespace = namespace;
2175 ns_add_dso(namespace, p);
2176 if (runtime)
2177 p->by_dlopen = 1;
2178
2179 if (DL_FDPIC) makefuncdescs(p);
2180
2181 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
2182
2183 return p;
2184 }
2185
load_direct_deps(struct dso * p,ns_t * namespace,struct reserved_address_params * reserved_params)2186 static void load_direct_deps(struct dso *p, ns_t *namespace, struct reserved_address_params *reserved_params)
2187 {
2188 size_t i, cnt=0;
2189
2190 if (p->deps) return;
2191 /* For head, all preloads are direct pseudo-dependencies.
2192 * Count and include them now to avoid realloc later. */
2193 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2194 cnt++;
2195 for (i=0; p->dynv[i]; i+=2)
2196 if (p->dynv[i] == DT_NEEDED) cnt++;
2197 /* Use builtin buffer for apps with no external deps, to
2198 * preserve property of no runtime failure paths. */
2199 p->deps = (p==head && cnt<2) ? builtin_deps :
2200 calloc(cnt+1, sizeof *p->deps);
2201 if (!p->deps) {
2202 error("Error loading dependencies for %s", p->name);
2203 if (runtime) longjmp(*rtld_fail, 1);
2204 }
2205 cnt=0;
2206 if (p==head) for (struct dso *q=p->next; q; q=q->next)
2207 p->deps[cnt++] = q;
2208 for (i=0; p->dynv[i]; i+=2) {
2209 if (p->dynv[i] != DT_NEEDED) continue;
2210 struct dso *dep = load_library(p->strings + p->dynv[i+1], p, namespace, true, reserved_params);
2211 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
2212 if (!dep) {
2213 error("Error loading shared library %s: %m (needed by %s)",
2214 p->strings + p->dynv[i+1], p->name);
2215 if (runtime) longjmp(*rtld_fail, 1);
2216 continue;
2217 }
2218 p->deps[cnt++] = dep;
2219 }
2220 p->deps[cnt] = 0;
2221 p->ndeps_direct = cnt;
2222 for (i = 0; i < p->ndeps_direct; i++) {
2223 add_dso_parent(p->deps[i], p);
2224 }
2225 }
2226
load_deps(struct dso * p,struct reserved_address_params * reserved_params)2227 static void load_deps(struct dso *p, struct reserved_address_params *reserved_params)
2228 {
2229 if (p->deps) return;
2230 for (; p; p=p->next)
2231 load_direct_deps(p, p->namespace, reserved_params);
2232 }
2233 #endif
2234
extend_bfs_deps(struct dso * p)2235 static void extend_bfs_deps(struct dso *p)
2236 {
2237 size_t i, j, cnt, ndeps_all;
2238 struct dso **tmp;
2239
2240 /* Can't use realloc if the original p->deps was allocated at
2241 * program entry and malloc has been replaced, or if it's
2242 * the builtin non-allocated trivial main program deps array. */
2243 int no_realloc = (__malloc_replaced && !p->runtime_loaded)
2244 || p->deps == builtin_deps;
2245
2246 if (p->bfs_built) return;
2247 ndeps_all = p->ndeps_direct;
2248
2249 /* Mark existing (direct) deps so they won't be duplicated. */
2250 for (i=0; p->deps[i]; i++)
2251 p->deps[i]->mark = 1;
2252
2253 /* For each dependency already in the list, copy its list of direct
2254 * dependencies to the list, excluding any items already in the
2255 * list. Note that the list this loop iterates over will grow during
2256 * the loop, but since duplicates are excluded, growth is bounded. */
2257 for (i=0; p->deps[i]; i++) {
2258 struct dso *dep = p->deps[i];
2259 for (j=cnt=0; j<dep->ndeps_direct; j++)
2260 if (!dep->deps[j]->mark) cnt++;
2261 tmp = no_realloc ?
2262 malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) :
2263 realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1));
2264 if (!tmp) {
2265 error("Error recording dependencies for %s", p->name);
2266 if (runtime) longjmp(*rtld_fail, 1);
2267 continue;
2268 }
2269 if (no_realloc) {
2270 memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1));
2271 no_realloc = 0;
2272 }
2273 p->deps = tmp;
2274 for (j=0; j<dep->ndeps_direct; j++) {
2275 if (dep->deps[j]->mark) continue;
2276 dep->deps[j]->mark = 1;
2277 p->deps[ndeps_all++] = dep->deps[j];
2278 }
2279 p->deps[ndeps_all] = 0;
2280 }
2281 p->bfs_built = 1;
2282 for (p=head; p; p=p->next)
2283 p->mark = 0;
2284 }
2285
2286 #ifndef LOAD_ORDER_RANDOMIZATION
load_preload(char * s,ns_t * ns)2287 static void load_preload(char *s, ns_t *ns)
2288 {
2289 int tmp;
2290 char *z;
2291 for (z=s; *z; s=z) {
2292 for ( ; *s && (isspace(*s) || *s==':'); s++);
2293 for (z=s; *z && !isspace(*z) && *z!=':'; z++);
2294 tmp = *z;
2295 *z = 0;
2296 load_library(s, 0, ns, true, NULL);
2297 *z = tmp;
2298 }
2299 }
2300 #endif
2301
add_syms(struct dso * p)2302 static void add_syms(struct dso *p)
2303 {
2304 if (!p->syms_next && syms_tail != p) {
2305 syms_tail->syms_next = p;
2306 syms_tail = p;
2307 }
2308 }
2309
revert_syms(struct dso * old_tail)2310 static void revert_syms(struct dso *old_tail)
2311 {
2312 struct dso *p, *next;
2313 /* Chop off the tail of the list of dsos that participate in
2314 * the global symbol table, reverting them to RTLD_LOCAL. */
2315 for (p=old_tail; p; p=next) {
2316 next = p->syms_next;
2317 p->syms_next = 0;
2318 }
2319 syms_tail = old_tail;
2320 }
2321
do_mips_relocs(struct dso * p,size_t * got)2322 static void do_mips_relocs(struct dso *p, size_t *got)
2323 {
2324 size_t i, j, rel[2];
2325 unsigned char *base = p->base;
2326 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO);
2327 if (p==&ldso) {
2328 got += i;
2329 } else {
2330 while (i--) *got++ += (size_t)base;
2331 }
2332 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
2333 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
2334 Sym *sym = p->syms + j;
2335 rel[0] = (unsigned char *)got - base;
2336 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
2337 rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT);
2338 do_relocs(p, rel, sizeof rel, 2);
2339 }
2340 }
2341
sleb128_decoder(uint8_t * current,uint8_t * end,size_t * value)2342 static uint8_t* sleb128_decoder(uint8_t* current, uint8_t* end, size_t* value)
2343 {
2344 size_t result = 0;
2345 static const size_t size = CHAR_BIT * sizeof(result);
2346
2347 size_t shift = 0;
2348 uint8_t byte;
2349
2350 do {
2351 if (current >= end) {
2352 a_crash();
2353 }
2354
2355 byte = *current++;
2356 result |= ((size_t)(byte & 127) << shift);
2357 shift += 7;
2358 } while (byte & 128);
2359
2360 if (shift < size && (byte & 64)) {
2361 result |= -((size_t)(1) << shift);
2362 }
2363
2364 *value = result;
2365
2366 return current;
2367 }
2368
do_android_relocs(struct dso * p,size_t dt_name,size_t dt_size)2369 static void do_android_relocs(struct dso *p, size_t dt_name, size_t dt_size)
2370 {
2371 size_t android_rel_addr = 0, android_rel_size = 0;
2372 uint8_t *android_rel_curr, *android_rel_end;
2373
2374 search_vec(p->dynv, &android_rel_addr, dt_name);
2375 search_vec(p->dynv, &android_rel_size, dt_size);
2376
2377 if (!android_rel_addr || (android_rel_size < 4)) {
2378 return;
2379 }
2380
2381 android_rel_curr = laddr(p, android_rel_addr);
2382 if (memcmp(android_rel_curr, "APS2", ANDROID_REL_SIGN_SIZE)) {
2383 return;
2384 }
2385
2386 android_rel_curr += ANDROID_REL_SIGN_SIZE;
2387 android_rel_size -= ANDROID_REL_SIGN_SIZE;
2388
2389 android_rel_end = android_rel_curr + android_rel_size;
2390
2391 size_t relocs_num;
2392 size_t rel[3] = {0};
2393
2394 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &relocs_num);
2395 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[0]);
2396
2397 for (size_t i = 0; i < relocs_num;) {
2398
2399 size_t group_size, group_flags;
2400
2401 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_size);
2402 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_flags);
2403
2404 size_t group_r_offset_delta = 0;
2405
2406 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2407 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &group_r_offset_delta);
2408 }
2409
2410 if (group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) {
2411 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2412 }
2413
2414 const size_t addend_flags = group_flags & (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG);
2415
2416 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2417 } else if (addend_flags == (RELOCATION_GROUP_HAS_ADDEND_FLAG | RELOCATION_GROUPED_BY_ADDEND_FLAG)) {
2418 size_t addend;
2419 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2420 rel[2] += addend;
2421 } else {
2422 rel[2] = 0;
2423 }
2424
2425 for (size_t j = 0; j < group_size; j++) {
2426 if (group_flags & RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG) {
2427 rel[0] += group_r_offset_delta;
2428 } else {
2429 size_t offset_detla;
2430 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &offset_detla);
2431
2432 rel[0] += offset_detla;
2433 }
2434
2435 if ((group_flags & RELOCATION_GROUPED_BY_INFO_FLAG) == 0) {
2436 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &rel[1]);
2437 }
2438
2439 if (addend_flags == RELOCATION_GROUP_HAS_ADDEND_FLAG) {
2440 size_t addend;
2441 android_rel_curr = sleb128_decoder(android_rel_curr, android_rel_end, &addend);
2442 rel[2] += addend;
2443 }
2444
2445 if (dt_name == DT_ANDROID_REL) {
2446 do_relocs(p, rel, sizeof(size_t)*2, 2);
2447 } else {
2448 do_relocs(p, rel, sizeof(size_t)*3, 3);
2449 }
2450 }
2451
2452 i += group_size;
2453 }
2454 }
2455
do_relr_relocs(struct dso * dso,size_t * relr,size_t relr_size)2456 static void do_relr_relocs(struct dso *dso, size_t *relr, size_t relr_size)
2457 {
2458 unsigned char *base = dso->base;
2459 size_t *reloc_addr;
2460 for (; relr_size; relr++, relr_size-=sizeof(size_t))
2461 if ((relr[0]&1) == 0) {
2462 reloc_addr = laddr(dso, relr[0]);
2463 *reloc_addr++ += (size_t)base;
2464 } else {
2465 int i = 0;
2466 for (size_t bitmap=relr[0]; (bitmap>>=1); i++)
2467 if (bitmap&1)
2468 reloc_addr[i] += (size_t)base;
2469 reloc_addr += 8*sizeof(size_t)-1;
2470 }
2471 }
2472
reloc_all(struct dso * p,const dl_extinfo * extinfo)2473 static void reloc_all(struct dso *p, const dl_extinfo *extinfo)
2474 {
2475 ssize_t relro_fd_offset = 0;
2476 size_t dyn[DYN_CNT];
2477 for (; p; p=p->next) {
2478 if (p->relocated) continue;
2479 if (p != &ldso) {
2480 add_can_search_so_list_in_dso(p, head);
2481 }
2482 decode_vec(p->dynv, dyn, DYN_CNT);
2483 if (NEED_MIPS_GOT_RELOCS)
2484 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT]));
2485 do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ],
2486 2+(dyn[DT_PLTREL]==DT_RELA));
2487 do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2);
2488 do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3);
2489 if (!DL_FDPIC)
2490 do_relr_relocs(p, laddr(p, dyn[DT_RELR]), dyn[DT_RELRSZ]);
2491
2492 do_android_relocs(p, DT_ANDROID_REL, DT_ANDROID_RELSZ);
2493 do_android_relocs(p, DT_ANDROID_RELA, DT_ANDROID_RELASZ);
2494
2495 if (head != &ldso && p->relro_start != p->relro_end &&
2496 mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ)
2497 && errno != ENOSYS) {
2498 error("Error relocating %s: RELRO protection failed: %m",
2499 p->name);
2500 if (runtime) longjmp(*rtld_fail, 1);
2501 }
2502 /* Handle serializing/mapping the RELRO segment */
2503 handle_relro_sharing(p, extinfo, &relro_fd_offset);
2504
2505 p->relocated = 1;
2506 free_reloc_can_search_dso(p);
2507 }
2508 }
2509
kernel_mapped_dso(struct dso * p)2510 static void kernel_mapped_dso(struct dso *p)
2511 {
2512 size_t min_addr = -1, max_addr = 0, cnt;
2513 Phdr *ph = p->phdr;
2514 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
2515 if (ph->p_type == PT_DYNAMIC) {
2516 p->dynv = laddr(p, ph->p_vaddr);
2517 } else if (ph->p_type == PT_GNU_RELRO) {
2518 p->relro_start = ph->p_vaddr & -PAGE_SIZE;
2519 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
2520 } else if (ph->p_type == PT_GNU_STACK) {
2521 if (!runtime && ph->p_memsz > __default_stacksize) {
2522 __default_stacksize =
2523 ph->p_memsz < DEFAULT_STACK_MAX ?
2524 ph->p_memsz : DEFAULT_STACK_MAX;
2525 }
2526 }
2527 if (ph->p_type != PT_LOAD) continue;
2528 if (ph->p_vaddr < min_addr)
2529 min_addr = ph->p_vaddr;
2530 if (ph->p_vaddr+ph->p_memsz > max_addr)
2531 max_addr = ph->p_vaddr+ph->p_memsz;
2532 }
2533 min_addr &= -PAGE_SIZE;
2534 max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE;
2535 p->map = p->base + min_addr;
2536 p->map_len = max_addr - min_addr;
2537 p->kernel_mapped = 1;
2538 }
2539
__libc_exit_fini()2540 void __libc_exit_fini()
2541 {
2542 struct dso *p;
2543 size_t dyn[DYN_CNT];
2544 pthread_t self = __pthread_self();
2545
2546 /* Take both locks before setting shutting_down, so that
2547 * either lock is sufficient to read its value. The lock
2548 * order matches that in dlopen to avoid deadlock. */
2549 pthread_rwlock_wrlock(&lock);
2550 pthread_mutex_lock(&init_fini_lock);
2551 shutting_down = 1;
2552 pthread_rwlock_unlock(&lock);
2553 for (p=fini_head; p; p=p->fini_next) {
2554 while (p->ctor_visitor && p->ctor_visitor!=self)
2555 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2556 if (!p->constructed) continue;
2557 decode_vec(p->dynv, dyn, DYN_CNT);
2558 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
2559 size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t);
2560 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n;
2561 while (n--) ((void (*)(void))*--fn)();
2562 }
2563 #ifndef NO_LEGACY_INITFINI
2564 if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI])
2565 fpaddr(p, dyn[DT_FINI])();
2566 #endif
2567 }
2568 }
2569
__ldso_atfork(int who)2570 void __ldso_atfork(int who)
2571 {
2572 if (who<0) {
2573 pthread_rwlock_wrlock(&lock);
2574 pthread_mutex_lock(&init_fini_lock);
2575 } else {
2576 pthread_mutex_unlock(&init_fini_lock);
2577 pthread_rwlock_unlock(&lock);
2578 }
2579 }
2580
queue_ctors(struct dso * dso)2581 static struct dso **queue_ctors(struct dso *dso)
2582 {
2583 size_t cnt, qpos, spos, i;
2584 struct dso *p, **queue, **stack;
2585
2586 if (ldd_mode) return 0;
2587
2588 /* Bound on queue size is the total number of indirect deps.
2589 * If a bfs deps list was built, we can use it. Otherwise,
2590 * bound by the total number of DSOs, which is always safe and
2591 * is reasonable we use it (for main app at startup). */
2592 if (dso->bfs_built) {
2593 for (cnt=0; dso->deps[cnt]; cnt++)
2594 dso->deps[cnt]->mark = 0;
2595 cnt++; /* self, not included in deps */
2596 } else {
2597 for (cnt=0, p=head; p; cnt++, p=p->next)
2598 p->mark = 0;
2599 }
2600 cnt++; /* termination slot */
2601 if (dso==head && cnt <= countof(builtin_ctor_queue))
2602 queue = builtin_ctor_queue;
2603 else
2604 queue = calloc(cnt, sizeof *queue);
2605
2606 if (!queue) {
2607 error("Error allocating constructor queue: %m\n");
2608 if (runtime) longjmp(*rtld_fail, 1);
2609 return 0;
2610 }
2611
2612 /* Opposite ends of the allocated buffer serve as an output queue
2613 * and a working stack. Setup initial stack with just the argument
2614 * dso and initial queue empty... */
2615 stack = queue;
2616 qpos = 0;
2617 spos = cnt;
2618 stack[--spos] = dso;
2619 dso->next_dep = 0;
2620 dso->mark = 1;
2621
2622 /* Then perform pseudo-DFS sort, but ignoring circular deps. */
2623 while (spos<cnt) {
2624 p = stack[spos++];
2625 while (p->next_dep < p->ndeps_direct) {
2626 if (p->deps[p->next_dep]->mark) {
2627 p->next_dep++;
2628 } else {
2629 stack[--spos] = p;
2630 p = p->deps[p->next_dep];
2631 p->next_dep = 0;
2632 p->mark = 1;
2633 }
2634 }
2635 queue[qpos++] = p;
2636 }
2637 queue[qpos] = 0;
2638 for (i=0; i<qpos; i++) queue[i]->mark = 0;
2639
2640 return queue;
2641 }
2642
do_init_fini(struct dso ** queue)2643 static void do_init_fini(struct dso **queue)
2644 {
2645 struct dso *p;
2646 size_t dyn[DYN_CNT], i;
2647 pthread_t self = __pthread_self();
2648
2649 pthread_mutex_lock(&init_fini_lock);
2650 for (i=0; (p=queue[i]); i++) {
2651 while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down)
2652 pthread_cond_wait(&ctor_cond, &init_fini_lock);
2653 if (p->ctor_visitor || p->constructed)
2654 continue;
2655 p->ctor_visitor = self;
2656
2657 decode_vec(p->dynv, dyn, DYN_CNT);
2658 if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) {
2659 p->fini_next = fini_head;
2660 fini_head = p;
2661 }
2662
2663 pthread_mutex_unlock(&init_fini_lock);
2664
2665 #ifndef NO_LEGACY_INITFINI
2666 if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT])
2667 fpaddr(p, dyn[DT_INIT])();
2668 #endif
2669 if (dyn[0] & (1<<DT_INIT_ARRAY)) {
2670 size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t);
2671 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]);
2672 if (p != &ldso) {
2673 trace_marker_begin(HITRACE_TAG_MUSL, "calling constructors: ", p->name);
2674 }
2675 while (n--) ((void (*)(void))*fn++)();
2676 if (p != &ldso) {
2677 trace_marker_end(HITRACE_TAG_MUSL);
2678 }
2679 }
2680
2681 pthread_mutex_lock(&init_fini_lock);
2682 p->ctor_visitor = 0;
2683 p->constructed = 1;
2684 pthread_cond_broadcast(&ctor_cond);
2685 }
2686 pthread_mutex_unlock(&init_fini_lock);
2687 }
2688
__libc_start_init(void)2689 void __libc_start_init(void)
2690 {
2691 do_init_fini(main_ctor_queue);
2692 if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue)
2693 free(main_ctor_queue);
2694 main_ctor_queue = 0;
2695 }
2696
dl_debug_state(void)2697 static void dl_debug_state(void)
2698 {
2699 }
2700
2701 weak_alias(dl_debug_state, _dl_debug_state);
2702
__init_tls(size_t * auxv)2703 void __init_tls(size_t *auxv)
2704 {
2705 }
2706
update_tls_size()2707 static void update_tls_size()
2708 {
2709 libc.tls_cnt = tls_cnt;
2710 libc.tls_align = tls_align;
2711 libc.tls_size = ALIGN(
2712 (1+tls_cnt) * sizeof(void *) +
2713 tls_offset +
2714 sizeof(struct pthread) +
2715 tls_align * 2,
2716 tls_align);
2717 }
2718
install_new_tls(void)2719 static void install_new_tls(void)
2720 {
2721 sigset_t set;
2722 pthread_t self = __pthread_self(), td;
2723 struct dso *dtv_provider = container_of(tls_tail, struct dso, tls);
2724 uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv;
2725 struct dso *p;
2726 size_t i, j;
2727 size_t old_cnt = self->dtv[0];
2728
2729 __block_app_sigs(&set);
2730 __tl_lock();
2731 /* Copy existing dtv contents from all existing threads. */
2732 for (i=0, td=self; !i || td!=self; i++, td=td->next) {
2733 memcpy(newdtv+i, td->dtv,
2734 (old_cnt+1)*sizeof(uintptr_t));
2735 newdtv[i][0] = tls_cnt;
2736 }
2737 /* Install new dtls into the enlarged, uninstalled dtv copies. */
2738 for (p=head; ; p=p->next) {
2739 if (p->tls_id <= old_cnt) continue;
2740 unsigned char *mem = p->new_tls;
2741 for (j=0; j<i; j++) {
2742 unsigned char *new = mem;
2743 new += ((uintptr_t)p->tls.image - (uintptr_t)mem)
2744 & (p->tls.align-1);
2745 memcpy(new, p->tls.image, p->tls.len);
2746 newdtv[j][p->tls_id] =
2747 (uintptr_t)new + DTP_OFFSET;
2748 mem += p->tls.size + p->tls.align;
2749 }
2750 if (p->tls_id == tls_cnt) break;
2751 }
2752
2753 /* Broadcast barrier to ensure contents of new dtv is visible
2754 * if the new dtv pointer is. The __membarrier function has a
2755 * fallback emulation using signals for kernels that lack the
2756 * feature at the syscall level. */
2757
2758 __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
2759
2760 /* Install new dtv for each thread. */
2761 for (j=0, td=self; !j || td!=self; j++, td=td->next) {
2762 td->dtv = newdtv[j];
2763 }
2764
2765 __tl_unlock();
2766 __restore_sigs(&set);
2767 }
2768
2769 /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
2770 * following stage 2 and stage 3 functions via primitive symbolic lookup
2771 * since it does not have access to their addresses to begin with. */
2772
2773 /* Stage 2 of the dynamic linker is called after relative relocations
2774 * have been processed. It can make function calls to static functions
2775 * and access string literals and static data, but cannot use extern
2776 * symbols. Its job is to perform symbolic relocations on the dynamic
2777 * linker itself, but some of the relocations performed may need to be
2778 * replaced later due to copy relocations in the main program. */
2779
__dls2(unsigned char * base,size_t * sp)2780 hidden void __dls2(unsigned char *base, size_t *sp)
2781 {
2782 size_t *auxv;
2783 for (auxv=sp+1+*sp+1; *auxv; auxv++);
2784 auxv++;
2785 if (DL_FDPIC) {
2786 void *p1 = (void *)sp[-2];
2787 void *p2 = (void *)sp[-1];
2788 if (!p1) {
2789 size_t aux[AUX_CNT];
2790 decode_vec(auxv, aux, AUX_CNT);
2791 if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE];
2792 else ldso.base = (void *)(aux[AT_PHDR] & -4096);
2793 }
2794 app_loadmap = p2 ? p1 : 0;
2795 ldso.loadmap = p2 ? p2 : p1;
2796 ldso.base = laddr(&ldso, 0);
2797 } else {
2798 ldso.base = base;
2799 }
2800 size_t aux[AUX_CNT];
2801 decode_vec(auxv, aux, AUX_CNT);
2802 libc.page_size = aux[AT_PAGESZ];
2803 Ehdr *ehdr = (void *)ldso.base;
2804 ldso.name = ldso.shortname = "libc.so";
2805 ldso.phnum = ehdr->e_phnum;
2806 ldso.phdr = laddr(&ldso, ehdr->e_phoff);
2807 ldso.phentsize = ehdr->e_phentsize;
2808 ldso.is_global = true;
2809 kernel_mapped_dso(&ldso);
2810 decode_dyn(&ldso);
2811
2812 if (DL_FDPIC) makefuncdescs(&ldso);
2813
2814 /* Prepare storage for to save clobbered REL addends so they
2815 * can be reused in stage 3. There should be very few. If
2816 * something goes wrong and there are a huge number, abort
2817 * instead of risking stack overflow. */
2818 size_t dyn[DYN_CNT];
2819 decode_vec(ldso.dynv, dyn, DYN_CNT);
2820 size_t *rel = laddr(&ldso, dyn[DT_REL]);
2821 size_t rel_size = dyn[DT_RELSZ];
2822 size_t symbolic_rel_cnt = 0;
2823 apply_addends_to = rel;
2824 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
2825 if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++;
2826 if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash();
2827 size_t addends[symbolic_rel_cnt+1];
2828 saved_addends = addends;
2829
2830 head = &ldso;
2831 reloc_all(&ldso, NULL);
2832
2833 ldso.relocated = 0;
2834
2835 /* Call dynamic linker stage-2b, __dls2b, looking it up
2836 * symbolically as a barrier against moving the address
2837 * load across the above relocation processing. */
2838 struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0);
2839 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp, auxv, aux);
2840 else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp, auxv, aux);
2841 }
2842
2843 /* Stage 2b sets up a valid thread pointer, which requires relocations
2844 * completed in stage 2, and on which stage 3 is permitted to depend.
2845 * This is done as a separate stage, with symbolic lookup as a barrier,
2846 * so that loads of the thread pointer and &errno can be pure/const and
2847 * thereby hoistable. */
2848
__dls2b(size_t * sp,size_t * auxv,size_t * aux)2849 void __dls2b(size_t *sp, size_t *auxv, size_t *aux)
2850 {
2851 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
2852 * use during dynamic linking. If possible it will also serve as the
2853 * thread pointer at runtime. */
2854 search_vec(auxv, &__hwcap, AT_HWCAP);
2855 libc.auxv = auxv;
2856 libc.tls_size = sizeof builtin_tls;
2857 libc.tls_align = tls_align;
2858 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
2859 a_crash();
2860 }
2861 __pthread_self()->stack = (void *)(sp + 1);
2862 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
2863 if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp, auxv, aux);
2864 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp, auxv, aux);
2865 }
2866
2867 /* Stage 3 of the dynamic linker is called with the dynamic linker/libc
2868 * fully functional. Its job is to load (if not already loaded) and
2869 * process dependencies and relocations for the main application and
2870 * transfer control to its entry point. */
2871
__dls3(size_t * sp,size_t * auxv,size_t * aux)2872 void __dls3(size_t *sp, size_t *auxv, size_t *aux)
2873 {
2874 static struct dso app, vdso;
2875 size_t i;
2876 char *env_preload=0;
2877 char *replace_argv0=0;
2878 size_t vdso_base;
2879 int argc = *sp;
2880 char **argv = (void *)(sp+1);
2881 char **argv_orig = argv;
2882 char **envp = argv+argc+1;
2883
2884 /* Find aux vector just past environ[] and use it to initialize
2885 * global data that may be needed before we can make syscalls. */
2886 __environ = envp;
2887 search_vec(auxv, &__sysinfo, AT_SYSINFO);
2888 __pthread_self()->sysinfo = __sysinfo;
2889 libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID]
2890 || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]);
2891
2892 /* Only trust user/env if kernel says we're not suid/sgid */
2893 if (!libc.secure) {
2894 env_path = getenv("LD_LIBRARY_PATH");
2895 env_preload = getenv("LD_PRELOAD");
2896 }
2897 #ifdef OHOS_ENABLE_PARAMETER
2898 InitParameterClient();
2899 #endif
2900 // we may abort when linking other libs, load signal handler before stage start
2901 #ifdef DFX_SIGNAL_LIBC
2902 DFX_InstallSignalHandler();
2903 #endif
2904 __init_fdsan();
2905 /* If the main program was already loaded by the kernel,
2906 * AT_PHDR will point to some location other than the dynamic
2907 * linker's program headers. */
2908 if (aux[AT_PHDR] != (size_t)ldso.phdr) {
2909 size_t interp_off = 0;
2910 size_t tls_image = 0;
2911 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
2912 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR];
2913 app.phnum = aux[AT_PHNUM];
2914 app.phentsize = aux[AT_PHENT];
2915 for (i=aux[AT_PHNUM]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT])) {
2916 if (phdr->p_type == PT_PHDR)
2917 app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr);
2918 else if (phdr->p_type == PT_INTERP)
2919 interp_off = (size_t)phdr->p_vaddr;
2920 else if (phdr->p_type == PT_TLS) {
2921 tls_image = phdr->p_vaddr;
2922 app.tls.len = phdr->p_filesz;
2923 app.tls.size = phdr->p_memsz;
2924 app.tls.align = phdr->p_align;
2925 }
2926 }
2927 if (DL_FDPIC) app.loadmap = app_loadmap;
2928 if (app.tls.size) app.tls.image = laddr(&app, tls_image);
2929 if (interp_off) ldso.name = laddr(&app, interp_off);
2930 if ((aux[0] & (1UL<<AT_EXECFN))
2931 && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
2932 app.name = (char *)aux[AT_EXECFN];
2933 else
2934 app.name = argv[0];
2935 kernel_mapped_dso(&app);
2936 } else {
2937 int fd;
2938 char *ldname = argv[0];
2939 size_t l = strlen(ldname);
2940 if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1;
2941 argv++;
2942 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
2943 char *opt = argv[0]+2;
2944 *argv++ = (void *)-1;
2945 if (!*opt) {
2946 break;
2947 } else if (!memcmp(opt, "list", 5)) {
2948 ldd_mode = 1;
2949 } else if (!memcmp(opt, "library-path", 12)) {
2950 if (opt[12]=='=') env_path = opt+13;
2951 else if (opt[12]) *argv = 0;
2952 else if (*argv) env_path = *argv++;
2953 } else if (!memcmp(opt, "preload", 7)) {
2954 if (opt[7]=='=') env_preload = opt+8;
2955 else if (opt[7]) *argv = 0;
2956 else if (*argv) env_preload = *argv++;
2957 } else if (!memcmp(opt, "argv0", 5)) {
2958 if (opt[5]=='=') replace_argv0 = opt+6;
2959 else if (opt[5]) *argv = 0;
2960 else if (*argv) replace_argv0 = *argv++;
2961 } else {
2962 argv[0] = 0;
2963 }
2964 }
2965 argv[-1] = (void *)(argc - (argv-argv_orig));
2966 if (!argv[0]) {
2967 dprintf(2, "musl libc (" LDSO_ARCH ")\n"
2968 "Version %s\n"
2969 "Dynamic Program Loader\n"
2970 "Usage: %s [options] [--] pathname%s\n",
2971 __libc_version, ldname,
2972 ldd_mode ? "" : " [args]");
2973 _exit(1);
2974 }
2975 fd = open(argv[0], O_RDONLY);
2976 if (fd < 0) {
2977 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno));
2978 _exit(1);
2979 }
2980 Ehdr *ehdr = map_library(fd, &app, NULL);
2981 if (!ehdr) {
2982 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
2983 _exit(1);
2984 }
2985 close(fd);
2986 ldso.name = ldname;
2987 app.name = argv[0];
2988 aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry);
2989 /* Find the name that would have been used for the dynamic
2990 * linker had ldd not taken its place. */
2991 if (ldd_mode) {
2992 for (i=0; i<app.phnum; i++) {
2993 if (app.phdr[i].p_type == PT_INTERP)
2994 ldso.name = laddr(&app, app.phdr[i].p_vaddr);
2995 }
2996 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
2997 }
2998 }
2999 if (app.tls.size) {
3000 libc.tls_head = tls_tail = &app.tls;
3001 app.tls_id = tls_cnt = 1;
3002 #ifdef TLS_ABOVE_TP
3003 app.tls.offset = GAP_ABOVE_TP;
3004 app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image)
3005 & (app.tls.align-1);
3006 tls_offset = app.tls.offset + app.tls.size;
3007 #else
3008 tls_offset = app.tls.offset = app.tls.size
3009 + ( -((uintptr_t)app.tls.image + app.tls.size)
3010 & (app.tls.align-1) );
3011 #endif
3012 tls_align = MAXP2(tls_align, app.tls.align);
3013 }
3014 decode_dyn(&app);
3015 if (DL_FDPIC) {
3016 makefuncdescs(&app);
3017 if (!app.loadmap) {
3018 app.loadmap = (void *)&app_dummy_loadmap;
3019 app.loadmap->nsegs = 1;
3020 app.loadmap->segs[0].addr = (size_t)app.map;
3021 app.loadmap->segs[0].p_vaddr = (size_t)app.map
3022 - (size_t)app.base;
3023 app.loadmap->segs[0].p_memsz = app.map_len;
3024 }
3025 argv[-3] = (void *)app.loadmap;
3026 }
3027 app.is_global = true;
3028
3029 /* Initial dso chain consists only of the app. */
3030 head = tail = syms_tail = &app;
3031
3032 /* Donate unused parts of app and library mapping to malloc */
3033 reclaim_gaps(&app);
3034 reclaim_gaps(&ldso);
3035
3036 find_and_set_bss_name(&app);
3037 find_and_set_bss_name(&ldso);
3038
3039 /* Load preload/needed libraries, add symbols to global namespace. */
3040 ldso.deps = (struct dso **)no_deps;
3041 /* Init g_is_asan */
3042 g_is_asan = false;
3043 LD_LOGD("__dls3 ldso.name:%{public}s.", ldso.name);
3044 /* Through ldso Name to judge whether the Asan function is enabled */
3045 if (strstr(ldso.name, "-asan")) {
3046 g_is_asan = true;
3047 LD_LOGD("__dls3 g_is_asan is true.");
3048 }
3049 /* Init all namespaces by config file. there is a default namespace always*/
3050 init_namespace(&app);
3051
3052 #ifdef LOAD_ORDER_RANDOMIZATION
3053 struct loadtasks *tasks = create_loadtasks();
3054 if (!tasks) {
3055 _exit(1);
3056 }
3057 if (env_preload) {
3058 load_preload(env_preload, get_default_ns(), tasks);
3059 }
3060 for (struct dso *q=head; q; q=q->next) {
3061 q->is_global = true;
3062 }
3063 preload_deps(&app, tasks);
3064 unmap_preloaded_sections(tasks);
3065 shuffle_loadtasks(tasks);
3066 run_loadtasks(tasks, NULL);
3067 free_loadtasks(tasks);
3068 assign_tls(app.next);
3069 #else
3070 if (env_preload) load_preload(env_preload, get_default_ns());
3071 for (struct dso *q=head; q; q=q->next) {
3072 q->is_global = true;
3073 }
3074 load_deps(&app, NULL);
3075 #endif
3076
3077 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of app, including app self. */
3078 for (struct dso *p=head; p; p=p->next) {
3079 p->is_reloc_head_so_dep = true;
3080 add_syms(p);
3081 }
3082
3083 /* Attach to vdso, if provided by the kernel, last so that it does
3084 * not become part of the global namespace. */
3085 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) {
3086 Ehdr *ehdr = (void *)vdso_base;
3087 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
3088 vdso.phnum = ehdr->e_phnum;
3089 vdso.phentsize = ehdr->e_phentsize;
3090 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
3091 if (phdr->p_type == PT_DYNAMIC)
3092 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
3093 if (phdr->p_type == PT_LOAD)
3094 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
3095 }
3096 vdso.name = "";
3097 vdso.shortname = "linux-gate.so.1";
3098 vdso.relocated = 1;
3099 vdso.deps = (struct dso **)no_deps;
3100 decode_dyn(&vdso);
3101 vdso.prev = tail;
3102 tail->next = &vdso;
3103 tail = &vdso;
3104 vdso.namespace = get_default_ns();
3105 ns_add_dso(vdso.namespace, &vdso);
3106 }
3107
3108 for (i=0; app.dynv[i]; i+=2) {
3109 if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG)
3110 app.dynv[i+1] = (size_t)&debug;
3111 if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) {
3112 size_t *ptr = (size_t *) app.dynv[i+1];
3113 *ptr = (size_t)&debug;
3114 }
3115 }
3116
3117 /* This must be done before final relocations, since it calls
3118 * malloc, which may be provided by the application. Calling any
3119 * application code prior to the jump to its entry point is not
3120 * valid in our model and does not work with FDPIC, where there
3121 * are additional relocation-like fixups that only the entry point
3122 * code can see to perform. */
3123 main_ctor_queue = queue_ctors(&app);
3124
3125 /* Initial TLS must also be allocated before final relocations
3126 * might result in calloc being a call to application code. */
3127 update_tls_size();
3128 void *initial_tls = builtin_tls;
3129 if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) {
3130 initial_tls = calloc(libc.tls_size, 1);
3131 if (!initial_tls) {
3132 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
3133 argv[0], libc.tls_size);
3134 _exit(127);
3135 }
3136 }
3137 static_tls_cnt = tls_cnt;
3138
3139 /* The main program must be relocated LAST since it may contain
3140 * copy relocations which depend on libraries' relocations. */
3141 reloc_all(app.next, NULL);
3142 reloc_all(&app, NULL);
3143 for (struct dso *q=head; q; q=q->next) {
3144 q->is_reloc_head_so_dep = false;
3145 }
3146
3147 /* Actual copying to new TLS needs to happen after relocations,
3148 * since the TLS images might have contained relocated addresses. */
3149 if (initial_tls != builtin_tls) {
3150 pthread_t self = __pthread_self();
3151 pthread_t td = __copy_tls(initial_tls);
3152 if (__init_tp(td) < 0) {
3153 a_crash();
3154 }
3155 td->tsd = self->tsd;
3156 } else {
3157 size_t tmp_tls_size = libc.tls_size;
3158 pthread_t self = __pthread_self();
3159 /* Temporarily set the tls size to the full size of
3160 * builtin_tls so that __copy_tls will use the same layout
3161 * as it did for before. Then check, just to be safe. */
3162 libc.tls_size = sizeof builtin_tls;
3163 if (__copy_tls((void*)builtin_tls) != self) a_crash();
3164 libc.tls_size = tmp_tls_size;
3165 }
3166
3167 if (init_cfi_shadow(head, &ldso) == CFI_FAILED) {
3168 error("[%s] init_cfi_shadow failed: %m", __FUNCTION__);
3169 }
3170
3171 if (ldso_fail) _exit(127);
3172 if (ldd_mode) _exit(0);
3173
3174 /* Determine if malloc was interposed by a replacement implementation
3175 * so that calloc and the memalign family can harden against the
3176 * possibility of incomplete replacement. */
3177 if (find_sym(head, "malloc", 1).dso != &ldso)
3178 __malloc_replaced = 1;
3179 if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
3180 __aligned_alloc_replaced = 1;
3181
3182 /* Switch to runtime mode: any further failures in the dynamic
3183 * linker are a reportable failure rather than a fatal startup
3184 * error. */
3185 runtime = 1;
3186
3187 sync_with_debugger();
3188
3189 if (replace_argv0) argv[0] = replace_argv0;
3190
3191 #ifdef USE_GWP_ASAN
3192 init_gwp_asan_by_libc(false);
3193 #endif
3194
3195 errno = 0;
3196
3197 CRTJMP((void *)aux[AT_ENTRY], argv-1);
3198 for(;;);
3199 }
3200
prepare_lazy(struct dso * p)3201 static void prepare_lazy(struct dso *p)
3202 {
3203 size_t dyn[DYN_CNT], n, flags1=0;
3204 decode_vec(p->dynv, dyn, DYN_CNT);
3205 search_vec(p->dynv, &flags1, DT_FLAGS_1);
3206 if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW))
3207 return;
3208 n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1;
3209 if (NEED_MIPS_GOT_RELOCS) {
3210 size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM);
3211 size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO);
3212 n += i-j;
3213 }
3214 p->lazy = calloc(n, 3*sizeof(size_t));
3215 if (!p->lazy) {
3216 error("Error preparing lazy relocation for %s: %m", p->name);
3217 longjmp(*rtld_fail, 1);
3218 }
3219 p->lazy_next = lazy_head;
3220 lazy_head = p;
3221 }
3222
dlopen_post(struct dso * p,int mode)3223 static void *dlopen_post(struct dso* p, int mode) {
3224 if (p == NULL) {
3225 return p;
3226 }
3227 bool is_dlclose_debug = false;
3228 if (is_dlclose_debug_enable()) {
3229 is_dlclose_debug = true;
3230 }
3231 p->nr_dlopen++;
3232 if (is_dlclose_debug) {
3233 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d ",
3234 p->name, p->name, p->nr_dlopen);
3235 }
3236 if (p->bfs_built) {
3237 for (int i = 0; p->deps[i]; i++) {
3238 p->deps[i]->nr_dlopen++;
3239 if (is_dlclose_debug) {
3240 LD_LOGE("[dlclose]: %{public}s nr_dlopen++ when dlopen %{public}s, nr_dlopen:%{public}d",
3241 p->deps[i]->name, p->name, p->deps[i]->nr_dlopen);
3242 }
3243 if (mode & RTLD_NODELETE) {
3244 p->deps[i]->flags |= DSO_FLAGS_NODELETE;
3245 }
3246 }
3247 }
3248
3249 #ifdef HANDLE_RANDOMIZATION
3250 void *handle = assign_valid_handle(p);
3251 if (handle == NULL) {
3252 LD_LOGE("dlopen_post: generate random handle failed");
3253 do_dlclose(p);
3254 }
3255
3256 return handle;
3257 #endif
3258
3259 return p;
3260 }
3261
3262 static char *dlopen_permitted_list[] =
3263 {
3264 "default",
3265 "ndk",
3266 };
3267
3268 #define PERMITIED_TARGET "nweb_ns"
3269 #define PERMITIED_TARGET2 "nweb_ns_legacy"
in_permitted_list(char * caller,char * target)3270 static bool in_permitted_list(char *caller, char *target)
3271 {
3272 for (int i = 0; i < sizeof(dlopen_permitted_list)/sizeof(char*); i++) {
3273 if (strcmp(dlopen_permitted_list[i], caller) == 0) {
3274 return true;
3275 }
3276 }
3277
3278 if (strcmp(PERMITIED_TARGET, target) == 0) {
3279 return true;
3280 }
3281
3282 if (strcmp(PERMITIED_TARGET2, target) == 0) {
3283 return true;
3284 }
3285
3286 return false;
3287 }
3288
is_permitted(const void * caller_addr,char * target)3289 static bool is_permitted(const void *caller_addr, char *target)
3290 {
3291 struct dso *caller;
3292 ns_t *ns;
3293 caller = (struct dso *)addr2dso((size_t)caller_addr);
3294 if ((caller == NULL) || (caller->namespace == NULL)) {
3295 LD_LOGE("caller ns get error");
3296 return false;
3297 }
3298
3299 ns = caller->namespace;
3300 if (in_permitted_list(ns->ns_name, target) == false) {
3301 LD_LOGE("caller ns: %{public}s have no permission, target is %{public}s", ns->ns_name, target);
3302 return false;
3303 }
3304
3305 return true;
3306 }
3307
3308 /* Add namespace function.
3309 * Some limitations come from sanitizer:
3310 * Sanitizer requires this interface to be exposed.
3311 * Pay attention to call __builtin_return_address in this interface because sanitizer can hook and call this interface.
3312 */
dlopen_impl(const char * file,int mode,const char * namespace,const void * caller_addr,const dl_extinfo * extinfo)3313 void *dlopen_impl(
3314 const char *file, int mode, const char *namespace, const void *caller_addr, const dl_extinfo *extinfo)
3315 {
3316 struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next;
3317 struct tls_module *orig_tls_tail;
3318 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
3319 size_t i;
3320 int cs;
3321 jmp_buf jb;
3322 struct dso **volatile ctor_queue = 0;
3323 ns_t *ns;
3324 struct dso *caller;
3325 bool reserved_address = false;
3326 bool reserved_address_recursive = false;
3327 struct reserved_address_params reserved_params = {0};
3328 #ifdef LOAD_ORDER_RANDOMIZATION
3329 struct loadtasks *tasks = NULL;
3330 struct loadtask *task = NULL;
3331 bool is_task_appended = false;
3332 #endif
3333
3334 if (!file) {
3335 LD_LOGD("dlopen_impl file is null, return head.");
3336 return dlopen_post(head, mode);
3337 }
3338
3339 if (extinfo) {
3340 reserved_address_recursive = extinfo->flag & DL_EXT_RESERVED_ADDRESS_RECURSIVE;
3341 if (extinfo->flag & DL_EXT_RESERVED_ADDRESS) {
3342 reserved_address = true;
3343 reserved_params.start_addr = extinfo->reserved_addr;
3344 reserved_params.reserved_size = extinfo->reserved_size;
3345 reserved_params.must_use_reserved = true;
3346 reserved_params.reserved_address_recursive = reserved_address_recursive;
3347 } else if (extinfo->flag & DL_EXT_RESERVED_ADDRESS_HINT) {
3348 reserved_address = true;
3349 reserved_params.start_addr = extinfo->reserved_addr;
3350 reserved_params.reserved_size = extinfo->reserved_size;
3351 reserved_params.must_use_reserved = false;
3352 reserved_params.reserved_address_recursive = reserved_address_recursive;
3353 }
3354 }
3355
3356 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
3357 pthread_rwlock_wrlock(&lock);
3358 __inhibit_ptc();
3359 trace_marker_reset();
3360 trace_marker_begin(HITRACE_TAG_MUSL, "dlopen: ", file);
3361
3362 /* When namespace does not exist, use caller's namespce
3363 * and when caller does not exist, use default namespce. */
3364 caller = (struct dso *)addr2dso((size_t)caller_addr);
3365 ns = find_ns_by_name(namespace);
3366 if (!ns) ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3367
3368 p = 0;
3369 if (shutting_down) {
3370 error("Cannot dlopen while program is exiting.");
3371 goto end;
3372 }
3373 orig_tls_tail = tls_tail;
3374 orig_tls_cnt = tls_cnt;
3375 orig_tls_offset = tls_offset;
3376 orig_tls_align = tls_align;
3377 orig_lazy_head = lazy_head;
3378 orig_syms_tail = syms_tail;
3379 orig_tail = tail;
3380 noload = mode & RTLD_NOLOAD;
3381
3382 rtld_fail = &jb;
3383 if (setjmp(*rtld_fail)) {
3384 /* Clean up anything new that was (partially) loaded */
3385 revert_syms(orig_syms_tail);
3386 for (p=orig_tail->next; p; p=next) {
3387 next = p->next;
3388 while (p->td_index) {
3389 void *tmp = p->td_index->next;
3390 free(p->td_index);
3391 p->td_index = tmp;
3392 }
3393 free(p->funcdescs);
3394 if (p->rpath != p->rpath_orig)
3395 free(p->rpath);
3396 if (p->deps) {
3397 for (int i = 0; i < p->ndeps_direct; i++) {
3398 remove_dso_parent(p->deps[i], p);
3399 }
3400 }
3401 free(p->deps);
3402 dlclose_ns(p);
3403 unmap_library(p);
3404 if (p->parents) {
3405 free(p->parents);
3406 }
3407 free_reloc_can_search_dso(p);
3408 }
3409 for (p=orig_tail->next; p; p=next) {
3410 next = p->next;
3411 free(p);
3412 }
3413 free(ctor_queue);
3414 ctor_queue = 0;
3415 if (!orig_tls_tail) libc.tls_head = 0;
3416 tls_tail = orig_tls_tail;
3417 if (tls_tail) tls_tail->next = 0;
3418 tls_cnt = orig_tls_cnt;
3419 tls_offset = orig_tls_offset;
3420 tls_align = orig_tls_align;
3421 lazy_head = orig_lazy_head;
3422 tail = orig_tail;
3423 tail->next = 0;
3424 p = 0;
3425 goto end;
3426 } else {
3427 #ifdef LOAD_ORDER_RANDOMIZATION
3428 tasks = create_loadtasks();
3429 if (!tasks) {
3430 LD_LOGE("dlopen_impl create loadtasks failed");
3431 goto end;
3432 }
3433 task = create_loadtask(file, head, ns, true);
3434 if (!task) {
3435 LD_LOGE("dlopen_impl create loadtask failed");
3436 goto end;
3437 }
3438 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3439 if (!load_library_header(task)) {
3440 error(noload ?
3441 "Library %s is not already loaded" :
3442 "Error loading shared library %s: %m",
3443 file);
3444 LD_LOGE("dlopen_impl load library header failed for %{public}s", task->name);
3445 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3446 goto end;
3447 }
3448 if (reserved_address) {
3449 reserved_params.target = task->p;
3450 }
3451 }
3452 if (!task->p) {
3453 LD_LOGE("dlopen_impl load library failed for %{public}s", task->name);
3454 error(noload ?
3455 "Library %s is not already loaded" :
3456 "Error loading shared library %s: %m",
3457 file);
3458 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3459 goto end;
3460 }
3461 if (!task->isloaded) {
3462 is_task_appended = append_loadtasks(tasks, task);
3463 }
3464 preload_deps(task->p, tasks);
3465 unmap_preloaded_sections(tasks);
3466 if (!reserved_address_recursive) {
3467 shuffle_loadtasks(tasks);
3468 }
3469 run_loadtasks(tasks, reserved_address ? &reserved_params : NULL);
3470 p = task->p;
3471 if (!task->isloaded) {
3472 assign_tls(p);
3473 }
3474 if (!is_task_appended) {
3475 free_task(task);
3476 task = NULL;
3477 }
3478 free_loadtasks(tasks);
3479 tasks = NULL;
3480 #else
3481 trace_marker_begin(HITRACE_TAG_MUSL, "loading: entry so", file);
3482 p = load_library(file, head, ns, true, reserved_address ? &reserved_params : NULL);
3483 }
3484
3485 if (!p) {
3486 error(noload ?
3487 "Library %s is not already loaded" :
3488 "Error loading shared library %s: %m",
3489 file);
3490 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3491 goto end;
3492 }
3493 /* First load handling */
3494 load_deps(p, reserved_address && reserved_address_recursive ? &reserved_params : NULL);
3495 #endif
3496 trace_marker_end(HITRACE_TAG_MUSL); // "loading: entry so" trace end.
3497 extend_bfs_deps(p);
3498 pthread_mutex_lock(&init_fini_lock);
3499 int constructed = p->constructed;
3500 pthread_mutex_unlock(&init_fini_lock);
3501 if (!constructed) ctor_queue = queue_ctors(p);
3502 if (!p->relocated && (mode & RTLD_LAZY)) {
3503 prepare_lazy(p);
3504 for (i=0; p->deps[i]; i++)
3505 if (!p->deps[i]->relocated)
3506 prepare_lazy(p->deps[i]);
3507 }
3508 if (!p->relocated || (mode & RTLD_GLOBAL)) {
3509 /* Make new symbols global, at least temporarily, so we can do
3510 * relocations. If not RTLD_GLOBAL, this is reverted below. */
3511 add_syms(p);
3512 /* Set is_reloc_head_so_dep to true for all direct and indirect dependent sos of p, including p self. */
3513 p->is_reloc_head_so_dep = true;
3514 for (i=0; p->deps[i]; i++) {
3515 p->deps[i]->is_reloc_head_so_dep = true;
3516 add_syms(p->deps[i]);
3517 }
3518 }
3519 struct dso *reloc_head_so = p;
3520 trace_marker_begin(HITRACE_TAG_MUSL, "linking: entry so", p->name);
3521 if (!p->relocated) {
3522 reloc_all(p, extinfo);
3523 }
3524 trace_marker_end(HITRACE_TAG_MUSL);
3525 reloc_head_so->is_reloc_head_so_dep = false;
3526 for (size_t i=0; reloc_head_so->deps[i]; i++) {
3527 reloc_head_so->deps[i]->is_reloc_head_so_dep = false;
3528 }
3529
3530 /* If RTLD_GLOBAL was not specified, undo any new additions
3531 * to the global symbol table. This is a nop if the library was
3532 * previously loaded and already global. */
3533 if (!(mode & RTLD_GLOBAL))
3534 revert_syms(orig_syms_tail);
3535
3536 /* Processing of deferred lazy relocations must not happen until
3537 * the new libraries are committed; otherwise we could end up with
3538 * relocations resolved to symbol definitions that get removed. */
3539 redo_lazy_relocs();
3540
3541 if (map_dso_to_cfi_shadow(p) == CFI_FAILED) {
3542 error("[%s] map_dso_to_cfi_shadow failed: %m", __FUNCTION__);
3543 longjmp(*rtld_fail, 1);
3544 }
3545
3546 if (mode & RTLD_NODELETE) {
3547 p->flags |= DSO_FLAGS_NODELETE;
3548 }
3549
3550 update_tls_size();
3551 if (tls_cnt != orig_tls_cnt)
3552 install_new_tls();
3553
3554 if (orig_tail != tail) {
3555 notify_addition_to_debugger(orig_tail->next);
3556 }
3557
3558 orig_tail = tail;
3559
3560 p = dlopen_post(p, mode);
3561 end:
3562 #ifdef LOAD_ORDER_RANDOMIZATION
3563 if (!is_task_appended) {
3564 free_task(task);
3565 }
3566 free_loadtasks(tasks);
3567 #endif
3568 __release_ptc();
3569 if (p) gencnt++;
3570 pthread_rwlock_unlock(&lock);
3571 if (ctor_queue) {
3572 do_init_fini(ctor_queue);
3573 free(ctor_queue);
3574 }
3575 pthread_setcancelstate(cs, 0);
3576 trace_marker_end(HITRACE_TAG_MUSL); // "dlopen: " trace end.
3577 return p;
3578 }
3579
dlopen(const char * file,int mode)3580 void *dlopen(const char *file, int mode)
3581 {
3582 const void *caller_addr = __builtin_return_address(0);
3583 musl_log_reset();
3584 ld_log_reset();
3585 LD_LOGI("dlopen file:%{public}s, mode:%{public}x ,caller_addr:%{public}p .", file, mode, caller_addr);
3586 return dlopen_impl(file, mode, NULL, caller_addr, NULL);
3587 }
3588
dlns_init(Dl_namespace * dlns,const char * name)3589 void dlns_init(Dl_namespace *dlns, const char *name)
3590 {
3591 if (!dlns) {
3592 return;
3593 }
3594 if (!name) {
3595 dlns->name[0] = 0;
3596 return;
3597 }
3598
3599 const void *caller_addr = __builtin_return_address(0);
3600 if (is_permitted(caller_addr, name) == false) {
3601 return;
3602 }
3603
3604 snprintf(dlns->name, sizeof dlns->name, name);
3605 LD_LOGI("dlns_init dlns->name:%{public}s .", dlns->name);
3606 }
3607
dlns_get(const char * name,Dl_namespace * dlns)3608 int dlns_get(const char *name, Dl_namespace *dlns)
3609 {
3610 if (!dlns) {
3611 LD_LOGE("dlns_get dlns is null.");
3612 return EINVAL;
3613 }
3614 int ret = 0;
3615 ns_t *ns = NULL;
3616 pthread_rwlock_rdlock(&lock);
3617 if (!name) {
3618 struct dso *caller;
3619 const void *caller_addr = __builtin_return_address(0);
3620 caller = (struct dso *)addr2dso((size_t)caller_addr);
3621 ns = ((caller && caller->namespace) ? caller->namespace : get_default_ns());
3622 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3623 LD_LOGI("dlns_get name is null, current dlns dlns->name:%{public}s.", dlns->name);
3624 } else {
3625 ns = find_ns_by_name(name);
3626 if (ns) {
3627 (void)snprintf(dlns->name, sizeof dlns->name, ns->ns_name);
3628 LD_LOGI("dlns_get found ns, current dlns dlns->name:%{public}s.", dlns->name);
3629 } else {
3630 LD_LOGI("dlns_get not found ns! name:%{public}s.", name);
3631 ret = ENOKEY;
3632 }
3633 }
3634 pthread_rwlock_unlock(&lock);
3635 return ret;
3636 }
3637
dlopen_ns(Dl_namespace * dlns,const char * file,int mode)3638 void *dlopen_ns(Dl_namespace *dlns, const char *file, int mode)
3639 {
3640 const void *caller_addr = __builtin_return_address(0);
3641 if (is_permitted(caller_addr, dlns->name) == false) {
3642 return NULL;
3643 }
3644
3645 musl_log_reset();
3646 ld_log_reset();
3647 LD_LOGI("dlopen_ns file:%{public}s, mode:%{public}x , caller_addr:%{public}p , dlns->name:%{public}s.",
3648 file,
3649 mode,
3650 caller_addr,
3651 dlns ? dlns->name : "NULL");
3652 return dlopen_impl(file, mode, dlns->name, caller_addr, NULL);
3653 }
3654
dlopen_ns_ext(Dl_namespace * dlns,const char * file,int mode,const dl_extinfo * extinfo)3655 void *dlopen_ns_ext(Dl_namespace *dlns, const char *file, int mode, const dl_extinfo *extinfo)
3656 {
3657 const void *caller_addr = __builtin_return_address(0);
3658 if (is_permitted(caller_addr, dlns->name) == false) {
3659 return NULL;
3660 }
3661
3662 musl_log_reset();
3663 ld_log_reset();
3664 LD_LOGI("dlopen_ns_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , "
3665 "dlns->name:%{public}s. , extinfo->flag:%{public}x",
3666 file,
3667 mode,
3668 caller_addr,
3669 dlns->name,
3670 extinfo ? extinfo->flag : 0);
3671 return dlopen_impl(file, mode, dlns->name, caller_addr, extinfo);
3672 }
3673
dlns_create2(Dl_namespace * dlns,const char * lib_path,int flags)3674 int dlns_create2(Dl_namespace *dlns, const char *lib_path, int flags)
3675 {
3676 if (!dlns) {
3677 LD_LOGE("dlns_create2 dlns is null.");
3678 return EINVAL;
3679 }
3680 ns_t *ns;
3681
3682 pthread_rwlock_wrlock(&lock);
3683 const void *caller_addr = __builtin_return_address(0);
3684 if (is_permitted(caller_addr, dlns->name) == false) {
3685 pthread_rwlock_unlock(&lock);
3686 return EPERM;
3687 }
3688
3689 ns = find_ns_by_name(dlns->name);
3690 if (ns) {
3691 LD_LOGE("dlns_create2 ns is exist.");
3692 pthread_rwlock_unlock(&lock);
3693 return EEXIST;
3694 }
3695 ns = ns_alloc();
3696 if (!ns) {
3697 LD_LOGE("dlns_create2 no memery.");
3698 pthread_rwlock_unlock(&lock);
3699 return ENOMEM;
3700 }
3701 ns_set_name(ns, dlns->name);
3702 ns_set_flag(ns, flags);
3703 ns_add_dso(ns, get_default_ns()->ns_dsos->dsos[0]); /* add main app to this namespace*/
3704 nslist_add_ns(ns); /* add ns to list*/
3705 ns_set_lib_paths(ns, lib_path);
3706
3707 if ((flags & CREATE_INHERIT_DEFAULT) != 0) {
3708 ns_add_inherit(ns, get_default_ns(), NULL);
3709 }
3710
3711 if ((flags & CREATE_INHERIT_CURRENT) != 0) {
3712 struct dso *caller;
3713 caller_addr = __builtin_return_address(0);
3714 caller = (struct dso *)addr2dso((size_t)caller_addr);
3715 if (caller && caller->namespace) {
3716 ns_add_inherit(ns, caller->namespace, NULL);
3717 }
3718 }
3719
3720 LD_LOGI("dlns_create2:"
3721 "ns_name: %{public}s ,"
3722 "separated:%{public}d ,"
3723 "lib_paths:%{public}s ",
3724 ns->ns_name, ns->separated, ns->lib_paths);
3725 pthread_rwlock_unlock(&lock);
3726
3727 return 0;
3728 }
3729
dlns_create(Dl_namespace * dlns,const char * lib_path)3730 int dlns_create(Dl_namespace *dlns, const char *lib_path)
3731 {
3732 LD_LOGI("dlns_create lib_paths:%{public}s", lib_path);
3733 return dlns_create2(dlns, lib_path, CREATE_INHERIT_DEFAULT);
3734 }
3735
dlns_inherit(Dl_namespace * dlns,Dl_namespace * inherited,const char * shared_libs)3736 int dlns_inherit(Dl_namespace *dlns, Dl_namespace *inherited, const char *shared_libs)
3737 {
3738 if (!dlns || !inherited) {
3739 LD_LOGE("dlns_inherit dlns or inherited is null.");
3740 return EINVAL;
3741 }
3742
3743 pthread_rwlock_wrlock(&lock);
3744 const void *caller_addr = __builtin_return_address(0);
3745 if (is_permitted(caller_addr, dlns->name) == false) {
3746 pthread_rwlock_unlock(&lock);
3747 return EPERM;
3748 }
3749
3750 ns_t* ns = find_ns_by_name(dlns->name);
3751 ns_t* ns_inherited = find_ns_by_name(inherited->name);
3752 if (!ns || !ns_inherited) {
3753 LD_LOGE("dlns_inherit ns or ns_inherited is not found.");
3754 pthread_rwlock_unlock(&lock);
3755 return ENOKEY;
3756 }
3757 ns_add_inherit(ns, ns_inherited, shared_libs);
3758 pthread_rwlock_unlock(&lock);
3759
3760 return 0;
3761 }
3762
dlclose_ns(struct dso * p)3763 static void dlclose_ns(struct dso *p)
3764 {
3765 if (!p) return;
3766 ns_t * ns = p->namespace;
3767 if (!ns||!ns->ns_dsos) return;
3768 for (size_t i=0; i<ns->ns_dsos->num; i++) {
3769 if (p == ns->ns_dsos->dsos[i]) {
3770 for (size_t j=i+1; j<ns->ns_dsos->num; j++) {
3771 ns->ns_dsos->dsos[j-1] = ns->ns_dsos->dsos[j];
3772 }
3773 ns->ns_dsos->num--;
3774 return;
3775 }
3776 }
3777 }
3778
__dl_invalid_handle(void * h)3779 hidden int __dl_invalid_handle(void *h)
3780 {
3781 struct dso *p;
3782 for (p=head; p; p=p->next) if (h==p) return 0;
3783 error("Invalid library handle %p", (void *)h);
3784 return 1;
3785 }
3786
addr2dso(size_t a)3787 void *addr2dso(size_t a)
3788 {
3789 struct dso *p;
3790 size_t i;
3791 for (p=head; p; p=p->next) {
3792 if (a < p->map || a - (size_t)p->map >= p->map_len) continue;
3793 Phdr *ph = p->phdr;
3794 size_t phcnt = p->phnum;
3795 size_t entsz = p->phentsize;
3796 size_t base = (size_t)p->base;
3797 for (; phcnt--; ph=(void *)((char *)ph+entsz)) {
3798 if (ph->p_type != PT_LOAD) continue;
3799 if (a-base-ph->p_vaddr < ph->p_memsz)
3800 return p;
3801 }
3802 if (a-(size_t)p->map < p->map_len)
3803 return 0;
3804 }
3805 return 0;
3806 }
3807
do_dlsym(struct dso * p,const char * s,const char * v,void * ra)3808 static void *do_dlsym(struct dso *p, const char *s, const char *v, void *ra)
3809 {
3810 int use_deps = 0;
3811 bool ra2dso = false;
3812 ns_t *ns = NULL;
3813 struct dso *caller = NULL;
3814 if (p == head || p == RTLD_DEFAULT) {
3815 p = head;
3816 ra2dso = true;
3817 } else if (p == RTLD_NEXT) {
3818 p = addr2dso((size_t)ra);
3819 if (!p) p=head;
3820 p = p->next;
3821 ra2dso = true;
3822 #ifndef HANDLE_RANDOMIZATION
3823 } else if (__dl_invalid_handle(p)) {
3824 return 0;
3825 #endif
3826 } else {
3827 use_deps = 1;
3828 ns = p->namespace;
3829 }
3830 if (ra2dso) {
3831 caller = (struct dso *)addr2dso((size_t)ra);
3832 if (caller && caller->namespace) {
3833 ns = caller->namespace;
3834 }
3835 }
3836 trace_marker_begin(HITRACE_TAG_MUSL, "dlsym: ", (s == NULL ? "(NULL)" : s));
3837 struct verinfo verinfo = { .s = s, .v = v, .use_vna_hash = false };
3838 struct symdef def = use_deps ? find_sym_by_deps(p, &verinfo, 0, ns) :
3839 find_sym2(p, &verinfo, 0, use_deps, ns);
3840 trace_marker_end(HITRACE_TAG_MUSL);
3841 if (!def.sym) {
3842 LD_LOGW("do_dlsym failed: symbol not found. so=%{public}s s=%{public}s v=%{public}s",
3843 (p == NULL ? "NULL" : p->name), s, v);
3844 error("do_dlsym failed: Symbol not found: %s, version: %s so=%s",
3845 s, strlen(v) > 0 ? v : "null", (p == NULL ? "NULL" : p->name));
3846 return 0;
3847 }
3848 if ((def.sym->st_info&0xf) == STT_TLS)
3849 return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET});
3850 if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC)
3851 return def.dso->funcdescs + (def.sym - def.dso->syms);
3852 return laddr(def.dso, def.sym->st_value);
3853 }
3854
3855 extern int invalidate_exit_funcs(struct dso *p);
3856
so_can_unload(struct dso * p,int check_flag)3857 static int so_can_unload(struct dso *p, int check_flag)
3858 {
3859 if ((check_flag & UNLOAD_COMMON_CHECK) != 0) {
3860 if (__dl_invalid_handle(p)) {
3861 LD_LOGE("[dlclose]: invalid handle %{public}p", p);
3862 error("[dlclose]: Handle is invalid.");
3863 return 0;
3864 }
3865
3866 if (!p->by_dlopen) {
3867 LD_LOGD("[dlclose]: skip unload %{public}s because it's not loaded by dlopen", p->name);
3868 return 0;
3869 }
3870
3871 /* dso is marked as RTLD_NODELETE library, do nothing here. */
3872 if ((p->flags & DSO_FLAGS_NODELETE) != 0) {
3873 LD_LOGD("[dlclose]: skip unload %{public}s because flags is RTLD_NODELETE", p->name);
3874 return 0;
3875 }
3876 }
3877
3878 if ((check_flag & UNLOAD_NR_DLOPEN_CHECK) != 0) {
3879 if (p->nr_dlopen > 0) {
3880 LD_LOGD("[dlclose]: skip unload %{public}s because nr_dlopen=%{public}d > 0", p->name, p->nr_dlopen);
3881 return 0;
3882 }
3883 }
3884
3885 return 1;
3886 }
3887
dlclose_post(struct dso * p)3888 static int dlclose_post(struct dso *p)
3889 {
3890 if (p == NULL) {
3891 return -1;
3892 }
3893 #ifdef ENABLE_HWASAN
3894 if (libc.unload_hook) {
3895 libc.unload_hook((unsigned long int)p->base, p->phdr, p->phnum);
3896 }
3897 #endif
3898 unmap_library(p);
3899 if (p->parents) {
3900 free(p->parents);
3901 }
3902 free_reloc_can_search_dso(p);
3903 if (p->tls.size == 0) {
3904 free(p);
3905 }
3906
3907 return 0;
3908 }
3909
dlclose_impl(struct dso * p)3910 static int dlclose_impl(struct dso *p)
3911 {
3912 struct dso *d;
3913
3914 trace_marker_reset();
3915 trace_marker_begin(HITRACE_TAG_MUSL, "dlclose", p->name);
3916
3917 /* remove dso symbols from global list */
3918 if (p->syms_next) {
3919 for (d = head; d->syms_next != p; d = d->syms_next)
3920 ; /* NOP */
3921 d->syms_next = p->syms_next;
3922 } else if (p == syms_tail) {
3923 for (d = head; d->syms_next != p; d = d->syms_next)
3924 ; /* NOP */
3925 d->syms_next = NULL;
3926 syms_tail = d;
3927 }
3928
3929 /* remove dso from lazy list if needed */
3930 if (p == lazy_head) {
3931 lazy_head = p->lazy_next;
3932 } else if (p->lazy_next) {
3933 for (d = lazy_head; d->lazy_next != p; d = d->lazy_next)
3934 ; /* NOP */
3935 d->lazy_next = p->lazy_next;
3936 }
3937
3938 pthread_mutex_lock(&init_fini_lock);
3939 /* remove dso from fini list */
3940 if (p == fini_head) {
3941 fini_head = p->fini_next;
3942 } else if (p->fini_next) {
3943 for (d = fini_head; d->fini_next != p; d = d->fini_next)
3944 ; /* NOP */
3945 d->fini_next = p->fini_next;
3946 }
3947 pthread_mutex_unlock(&init_fini_lock);
3948
3949 /* empty tls image */
3950 if (p->tls.size != 0) {
3951 p->tls.image = NULL;
3952 }
3953
3954 /* remove dso from global dso list */
3955 if (p == tail) {
3956 tail = p->prev;
3957 tail->next = NULL;
3958 } else {
3959 p->next->prev = p->prev;
3960 p->prev->next = p->next;
3961 }
3962
3963 /* remove dso from namespace */
3964 dlclose_ns(p);
3965
3966 /* */
3967 void* handle = find_handle_by_dso(p);
3968 if (handle) {
3969 remove_handle_node(handle);
3970 }
3971
3972 /* after destruct, invalidate atexit funcs which belong to this dso */
3973 #if (defined(FEATURE_ATEXIT_CB_PROTECT))
3974 invalidate_exit_funcs(p);
3975 #endif
3976
3977 notify_remove_to_debugger(p);
3978
3979 unmap_dso_from_cfi_shadow(p);
3980
3981 if (p->lazy != NULL)
3982 free(p->lazy);
3983 if (p->deps != no_deps)
3984 free(p->deps);
3985
3986 trace_marker_end(HITRACE_TAG_MUSL);
3987
3988 return 0;
3989 }
3990
do_dlclose(struct dso * p)3991 static int do_dlclose(struct dso *p)
3992 {
3993 struct dso_entry *ef = NULL;
3994 struct dso_entry *ef_tmp = NULL;
3995 size_t n;
3996 int unload_check_result;
3997 TAILQ_HEAD(unload_queue, dso_entry) unload_queue;
3998 TAILQ_HEAD(need_unload_queue, dso_entry) need_unload_queue;
3999 unload_check_result = so_can_unload(p, UNLOAD_COMMON_CHECK);
4000 if (unload_check_result != 1) {
4001 return unload_check_result;
4002 }
4003 // Unconditionally subtract 1 because unconditionally add 1 at dlopen_post.
4004 if (p->nr_dlopen > 0) {
4005 --(p->nr_dlopen);
4006 } else {
4007 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4008 p->name, p->name);
4009 return 0;
4010 }
4011
4012 if (p->bfs_built) {
4013 for (int i = 0; p->deps[i]; i++) {
4014 if (p->deps[i]->nr_dlopen > 0) {
4015 p->deps[i]->nr_dlopen--;
4016 } else {
4017 LD_LOGE("[dlclose]: number of dlopen and dlclose of %{public}s doesn't match when dlclose %{public}s",
4018 p->deps[i]->name, p->name);
4019 return 0;
4020 }
4021 }
4022 }
4023 unload_check_result = so_can_unload(p, UNLOAD_NR_DLOPEN_CHECK);
4024 if (unload_check_result != 1) {
4025 return unload_check_result;
4026 }
4027 TAILQ_INIT(&unload_queue);
4028 TAILQ_INIT(&need_unload_queue);
4029 struct dso_entry *start_entry = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4030 start_entry->dso = p;
4031 TAILQ_INSERT_TAIL(&unload_queue, start_entry, entries);
4032
4033 while (!TAILQ_EMPTY(&unload_queue)) {
4034 struct dso_entry *ecur = TAILQ_FIRST(&unload_queue);
4035 struct dso *cur = ecur->dso;
4036 TAILQ_REMOVE(&unload_queue, ecur, entries);
4037 bool already_in_need_unload_queue = false;
4038 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4039 if (ef->dso == cur) {
4040 already_in_need_unload_queue = true;
4041 break;
4042 }
4043 }
4044 if (already_in_need_unload_queue) {
4045 continue;
4046 }
4047 TAILQ_INSERT_TAIL(&need_unload_queue, ecur, entries);
4048 for (int i = 0; i < cur->ndeps_direct; i++) {
4049 remove_dso_parent(cur->deps[i], cur);
4050 if ((cur->deps[i]->parents_count == 0) && (so_can_unload(cur->deps[i], UNLOAD_ALL_CHECK) == 1)) {
4051 bool already_in_unload_queue = false;
4052 TAILQ_FOREACH(ef, &unload_queue, entries) {
4053 if (ef->dso == cur->deps[i]) {
4054 already_in_unload_queue = true;
4055 break;
4056 }
4057 }
4058 if (already_in_unload_queue) {
4059 continue;
4060 }
4061
4062 struct dso_entry *edeps = (struct dso_entry *)malloc(sizeof(struct dso_entry));
4063 edeps->dso = cur->deps[i];
4064 TAILQ_INSERT_TAIL(&unload_queue, edeps, entries);
4065 }
4066 } /* for */
4067 } /* while */
4068
4069 if (is_dlclose_debug_enable()) {
4070 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4071 LD_LOGE("[dlclose]: unload %{public}s succeed when dlclose %{public}s", ef->dso->name, p->name);
4072 }
4073 for (size_t deps_num = 0; p->deps[deps_num]; deps_num++) {
4074 bool ready_to_unload = false;
4075 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4076 if (ef->dso == p->deps[deps_num]) {
4077 ready_to_unload = true;
4078 break;
4079 }
4080 }
4081 if (!ready_to_unload) {
4082 LD_LOGE("[dlclose]: unload %{public}s failed when dlclose %{public}s,"
4083 "nr_dlopen:%{public}d, by_dlopen:%{public}d, parents_count:%{public}d",
4084 p->deps[deps_num]->name, p->name, p->deps[deps_num]->nr_dlopen,
4085 p->deps[deps_num]->by_dlopen, p->deps[deps_num]->parents_count);
4086 }
4087 }
4088 }
4089
4090 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4091 dlclose_impl(ef->dso);
4092 }
4093
4094 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4095 /* call destructors if needed */
4096 pthread_mutex_lock(&init_fini_lock);
4097 int constructed = ef->dso->constructed;
4098 pthread_mutex_unlock(&init_fini_lock);
4099
4100 if (constructed) {
4101 size_t dyn[DYN_CNT];
4102 decode_vec(ef->dso->dynv, dyn, DYN_CNT);
4103 if (dyn[0] & (1<<DT_FINI_ARRAY)) {
4104 n = dyn[DT_FINI_ARRAYSZ] / sizeof(size_t);
4105 size_t *fn = (size_t *)laddr(ef->dso, dyn[DT_FINI_ARRAY]) + n;
4106 trace_marker_begin(HITRACE_TAG_MUSL, "calling destructors:", ef->dso->name);
4107
4108 pthread_rwlock_unlock(&lock);
4109 while (n--)
4110 ((void (*)(void))*--fn)();
4111 pthread_rwlock_wrlock(&lock);
4112
4113 trace_marker_end(HITRACE_TAG_MUSL);
4114 }
4115 pthread_mutex_lock(&init_fini_lock);
4116 ef->dso->constructed = 0;
4117 pthread_mutex_unlock(&init_fini_lock);
4118 }
4119 }
4120 // Unload all sos at the end because weak symbol may cause later unloaded so to access the previous so's function.
4121 TAILQ_FOREACH(ef, &need_unload_queue, entries) {
4122 dlclose_post(ef->dso);
4123 }
4124 // Free dso_entry.
4125 TAILQ_FOREACH_SAFE(ef, &need_unload_queue, entries, ef_tmp) {
4126 if (ef) {
4127 free(ef);
4128 }
4129 }
4130
4131 return 0;
4132 }
4133
__dlclose(void * p)4134 hidden int __dlclose(void *p)
4135 {
4136 int rc;
4137 pthread_rwlock_wrlock(&lock);
4138 if (shutting_down) {
4139 error("Cannot dlclose while program is exiting.");
4140 pthread_rwlock_unlock(&lock);
4141 return -1;
4142 }
4143 #ifdef HANDLE_RANDOMIZATION
4144 struct dso *dso = find_dso_by_handle(p);
4145 if (dso == NULL) {
4146 errno = EINVAL;
4147 error("Handle is invalid.");
4148 LD_LOGE("Handle is not find.");
4149 pthread_rwlock_unlock(&lock);
4150 return -1;
4151 }
4152 rc = do_dlclose(dso);
4153 #else
4154 rc = do_dlclose(p);
4155 #endif
4156 pthread_rwlock_unlock(&lock);
4157 return rc;
4158 }
4159
sym_is_matched(const Sym * sym,size_t addr_offset_so)4160 static inline int sym_is_matched(const Sym* sym, size_t addr_offset_so) {
4161 return sym->st_value &&
4162 (1<<(sym->st_info&0xf) != STT_TLS) &&
4163 (addr_offset_so >= sym->st_value) &&
4164 (addr_offset_so < sym->st_value + sym->st_size);
4165 }
4166
find_addr_by_elf(size_t addr_offset_so,struct dso * p)4167 static inline Sym* find_addr_by_elf(size_t addr_offset_so, struct dso *p) {
4168 uint32_t nsym = p->hashtab[1];
4169 Sym *sym = p->syms;
4170 for (; nsym; nsym--, sym++) {
4171 if (sym_is_matched(sym, addr_offset_so)) {
4172 return sym;
4173 }
4174 }
4175
4176 return NULL;
4177 }
4178
find_addr_by_gnu(size_t addr_offset_so,struct dso * p)4179 static inline Sym* find_addr_by_gnu(size_t addr_offset_so, struct dso *p) {
4180
4181 size_t i, nsym, first_hash_sym_index;
4182 uint32_t *hashval;
4183 Sym *sym_tab = p->syms;
4184 uint32_t *buckets= p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
4185 // Points to the first defined symbol, all symbols before it are undefined.
4186 first_hash_sym_index = buckets[0];
4187 Sym *sym = &sym_tab[first_hash_sym_index];
4188
4189 // Get the location pointed by the last bucket.
4190 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
4191 if (buckets[i] > nsym)
4192 nsym = buckets[i];
4193 }
4194
4195 for (i = first_hash_sym_index; i < nsym; i++) {
4196 if (sym_is_matched(sym, addr_offset_so)) {
4197 return sym;
4198 }
4199 sym++;
4200 }
4201
4202 // Start traversing the hash list from the position pointed to by the last bucket.
4203 if (nsym) {
4204 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
4205 do {
4206 nsym++;
4207 if (sym_is_matched(sym, addr_offset_so)) {
4208 return sym;
4209 }
4210 sym++;
4211 }
4212 while (!(*hashval++ & 1));
4213 }
4214
4215 return NULL;
4216 }
4217
4218
dladdr(const void * addr_arg,Dl_info * info)4219 int dladdr(const void *addr_arg, Dl_info *info)
4220 {
4221 size_t addr = (size_t)addr_arg;
4222 struct dso *p;
4223 Sym *match_sym = NULL;
4224 char *strings;
4225
4226 pthread_rwlock_rdlock(&lock);
4227 p = addr2dso(addr);
4228 pthread_rwlock_unlock(&lock);
4229
4230 if (!p) return 0;
4231
4232 strings = p->strings;
4233 size_t addr_offset_so = addr - (size_t)p->base;
4234
4235 info->dli_fname = p->name;
4236 info->dli_fbase = p->map;
4237
4238 if (p->ghashtab) {
4239 match_sym = find_addr_by_gnu(addr_offset_so, p);
4240
4241 } else {
4242 match_sym = find_addr_by_elf(addr_offset_so, p);
4243 }
4244
4245 if (!match_sym) {
4246 info->dli_sname = 0;
4247 info->dli_saddr = 0;
4248 return 1;
4249 }
4250 info->dli_sname = strings + match_sym->st_name;
4251 info->dli_saddr = (void *)laddr(p, match_sym->st_value);
4252 return 1;
4253 }
4254
__dlsym(void * restrict p,const char * restrict s,void * restrict ra)4255 hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
4256 {
4257 void *res;
4258 pthread_rwlock_rdlock(&lock);
4259 #ifdef HANDLE_RANDOMIZATION
4260 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4261 struct dso *dso = find_dso_by_handle(p);
4262 if (dso == NULL) {
4263 pthread_rwlock_unlock(&lock);
4264 return 0;
4265 }
4266 res = do_dlsym(dso, s, "", ra);
4267 } else {
4268 res = do_dlsym(p, s, "", ra);
4269 }
4270 #else
4271 res = do_dlsym(p, s, "", ra);
4272 #endif
4273 pthread_rwlock_unlock(&lock);
4274 return res;
4275 }
4276
__dlvsym(void * restrict p,const char * restrict s,const char * restrict v,void * restrict ra)4277 hidden void *__dlvsym(void *restrict p, const char *restrict s, const char *restrict v, void *restrict ra)
4278 {
4279 void *res;
4280 pthread_rwlock_rdlock(&lock);
4281 #ifdef HANDLE_RANDOMIZATION
4282 if ((p != RTLD_DEFAULT) && (p != RTLD_NEXT)) {
4283 struct dso *dso = find_dso_by_handle(p);
4284 if (dso == NULL) {
4285 pthread_rwlock_unlock(&lock);
4286 return 0;
4287 }
4288 res = do_dlsym(dso, s, v, ra);
4289 } else {
4290 res = do_dlsym(p, s, v, ra);
4291 }
4292 #else
4293 res = do_dlsym(p, s, v, ra);
4294 #endif
4295 pthread_rwlock_unlock(&lock);
4296 return res;
4297 }
4298
__dlsym_redir_time64(void * restrict p,const char * restrict s,void * restrict ra)4299 hidden void *__dlsym_redir_time64(void *restrict p, const char *restrict s, void *restrict ra)
4300 {
4301 #if _REDIR_TIME64
4302 const char *suffix, *suffix2 = "";
4303 char redir[36];
4304
4305 /* Map the symbol name to a time64 version of itself according to the
4306 * pattern used for naming the redirected time64 symbols. */
4307 size_t l = strnlen(s, sizeof redir);
4308 if (l<4 || l==sizeof redir) goto no_redir;
4309 if (s[l-2]=='_' && s[l-1]=='r') {
4310 l -= 2;
4311 suffix2 = s+l;
4312 }
4313 if (l<4) goto no_redir;
4314 if (!strcmp(s+l-4, "time")) suffix = "64";
4315 else suffix = "_time64";
4316
4317 /* Use the presence of the remapped symbol name in libc to determine
4318 * whether it's one that requires time64 redirection; replace if so. */
4319 snprintf(redir, sizeof redir, "__%.*s%s%s", (int)l, s, suffix, suffix2);
4320 if (find_sym(&ldso, redir, 1).sym) s = redir;
4321 no_redir:
4322 #endif
4323 return __dlsym(p, s, ra);
4324 }
4325
dl_iterate_phdr(int (* callback)(struct dl_phdr_info * info,size_t size,void * data),void * data)4326 int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
4327 {
4328 struct dso *current;
4329 struct dl_phdr_info info;
4330 int ret = 0;
4331 for(current = head; current;) {
4332 info.dlpi_addr = (uintptr_t)current->base;
4333 info.dlpi_name = current->name;
4334 info.dlpi_phdr = current->phdr;
4335 info.dlpi_phnum = current->phnum;
4336 info.dlpi_adds = gencnt;
4337 info.dlpi_subs = 0;
4338 info.dlpi_tls_modid = current->tls_id;
4339 info.dlpi_tls_data = !current->tls_id ? 0 :
4340 __tls_get_addr((tls_mod_off_t[]){current->tls_id,0});
4341
4342 ret = (callback)(&info, sizeof (info), data);
4343
4344 if (ret != 0) break;
4345
4346 pthread_rwlock_rdlock(&lock);
4347 current = current->next;
4348 pthread_rwlock_unlock(&lock);
4349 }
4350 return ret;
4351 }
4352
error(const char * fmt,...)4353 static void error(const char *fmt, ...)
4354 {
4355 va_list ap;
4356 va_start(ap, fmt);
4357 if (!runtime) {
4358 vdprintf(2, fmt, ap);
4359 dprintf(2, "\n");
4360 ldso_fail = 1;
4361 va_end(ap);
4362 return;
4363 }
4364 __dl_vseterr(fmt, ap);
4365 va_end(ap);
4366 }
4367
dlns_set_namespace_lib_path(const char * name,const char * lib_path)4368 int dlns_set_namespace_lib_path(const char * name, const char * lib_path)
4369 {
4370 if (!name || !lib_path) {
4371 LD_LOGE("dlns_set_namespace_lib_path name or lib_path is null.");
4372 return EINVAL;
4373 }
4374
4375 pthread_rwlock_wrlock(&lock);
4376 const void *caller_addr = __builtin_return_address(0);
4377 if (is_permitted(caller_addr, name) == false) {
4378 pthread_rwlock_unlock(&lock);
4379 return EPERM;
4380 }
4381
4382 ns_t* ns = find_ns_by_name(name);
4383 if (!ns) {
4384 pthread_rwlock_unlock(&lock);
4385 LD_LOGE("dlns_set_namespace_lib_path fail, input ns name : [%{public}s] is not found.", name);
4386 return ENOKEY;
4387 }
4388
4389 ns_set_lib_paths(ns, lib_path);
4390 pthread_rwlock_unlock(&lock);
4391 return 0;
4392 }
4393
dlns_set_namespace_separated(const char * name,const bool separated)4394 int dlns_set_namespace_separated(const char * name, const bool separated)
4395 {
4396 if (!name) {
4397 LD_LOGE("dlns_set_namespace_separated name is null.");
4398 return EINVAL;
4399 }
4400
4401 pthread_rwlock_wrlock(&lock);
4402 const void *caller_addr = __builtin_return_address(0);
4403 if (is_permitted(caller_addr, name) == false) {
4404 pthread_rwlock_unlock(&lock);
4405 return EPERM;
4406 }
4407
4408 ns_t* ns = find_ns_by_name(name);
4409 if (!ns) {
4410 pthread_rwlock_unlock(&lock);
4411 LD_LOGE("dlns_set_namespace_separated fail, input ns name : [%{public}s] is not found.", name);
4412 return ENOKEY;
4413 }
4414
4415 ns_set_separated(ns, separated);
4416 pthread_rwlock_unlock(&lock);
4417 return 0;
4418 }
4419
dlns_set_namespace_permitted_paths(const char * name,const char * permitted_paths)4420 int dlns_set_namespace_permitted_paths(const char * name, const char * permitted_paths)
4421 {
4422 if (!name || !permitted_paths) {
4423 LD_LOGE("dlns_set_namespace_permitted_paths name or permitted_paths is null.");
4424 return EINVAL;
4425 }
4426
4427 pthread_rwlock_wrlock(&lock);
4428 const void *caller_addr = __builtin_return_address(0);
4429 if (is_permitted(caller_addr, name) == false) {
4430 pthread_rwlock_unlock(&lock);
4431 return EPERM;
4432 }
4433
4434 ns_t* ns = find_ns_by_name(name);
4435 if (!ns) {
4436 pthread_rwlock_unlock(&lock);
4437 LD_LOGE("dlns_set_namespace_permitted_paths fail, input ns name : [%{public}s] is not found.", name);
4438 return ENOKEY;
4439 }
4440
4441 ns_set_permitted_paths(ns, permitted_paths);
4442 pthread_rwlock_unlock(&lock);
4443 return 0;
4444 }
4445
dlns_set_namespace_allowed_libs(const char * name,const char * allowed_libs)4446 int dlns_set_namespace_allowed_libs(const char * name, const char * allowed_libs)
4447 {
4448 if (!name || !allowed_libs) {
4449 LD_LOGE("dlns_set_namespace_allowed_libs name or allowed_libs is null.");
4450 return EINVAL;
4451 }
4452
4453 pthread_rwlock_wrlock(&lock);
4454 const void *caller_addr = __builtin_return_address(0);
4455 if (is_permitted(caller_addr, name) == false) {
4456 pthread_rwlock_unlock(&lock);
4457 return EPERM;
4458 }
4459
4460 ns_t* ns = find_ns_by_name(name);
4461 if (!ns) {
4462 pthread_rwlock_unlock(&lock);
4463 LD_LOGE("dlns_set_namespace_allowed_libs fail, input ns name : [%{public}s] is not found.", name);
4464 return ENOKEY;
4465 }
4466
4467 ns_set_allowed_libs(ns, allowed_libs);
4468 pthread_rwlock_unlock(&lock);
4469 return 0;
4470 }
4471
handle_asan_path_open(int fd,const char * name,ns_t * namespace,char * buf,size_t buf_size)4472 int handle_asan_path_open(int fd, const char *name, ns_t *namespace, char *buf, size_t buf_size)
4473 {
4474 LD_LOGD("handle_asan_path_open fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4475 fd,
4476 name,
4477 namespace ? namespace->ns_name : "NULL");
4478 int fd_tmp = fd;
4479 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4480 if (namespace->lib_paths && namespace->asan_lib_paths) {
4481 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4482 char *new_lib_paths = malloc(newlen);
4483 memset(new_lib_paths, 0, newlen);
4484 strcpy(new_lib_paths, namespace->asan_lib_paths);
4485 strcat(new_lib_paths, ":");
4486 strcat(new_lib_paths, namespace->lib_paths);
4487 fd_tmp = path_open(name, new_lib_paths, buf, buf_size);
4488 LD_LOGD("handle_asan_path_open path_open new_lib_paths:%{public}s ,fd: %{public}d.", new_lib_paths, fd_tmp);
4489 free(new_lib_paths);
4490 } else if (namespace->asan_lib_paths) {
4491 fd_tmp = path_open(name, namespace->asan_lib_paths, buf, buf_size);
4492 LD_LOGD("handle_asan_path_open path_open asan_lib_paths:%{public}s ,fd: %{public}d.",
4493 namespace->asan_lib_paths,
4494 fd_tmp);
4495 } else {
4496 fd_tmp = path_open(name, namespace->lib_paths, buf, buf_size);
4497 LD_LOGD(
4498 "handle_asan_path_open path_open lib_paths:%{public}s ,fd: %{public}d.", namespace->lib_paths, fd_tmp);
4499 }
4500 }
4501 return fd_tmp;
4502 }
4503
dlopen_ext(const char * file,int mode,const dl_extinfo * extinfo)4504 void* dlopen_ext(const char *file, int mode, const dl_extinfo *extinfo)
4505 {
4506 const void *caller_addr = __builtin_return_address(0);
4507 musl_log_reset();
4508 ld_log_reset();
4509 if (extinfo != NULL) {
4510 if ((extinfo->flag & ~(DL_EXT_VALID_FLAG_BITS)) != 0) {
4511 LD_LOGE("Error dlopen_ext %{public}s: invalid flag %{public}x", file, extinfo->flag);
4512 return NULL;
4513 }
4514 }
4515 LD_LOGI("dlopen_ext file:%{public}s, mode:%{public}x , caller_addr:%{public}p , extinfo->flag:%{public}x",
4516 file,
4517 mode,
4518 caller_addr,
4519 extinfo ? extinfo->flag : 0);
4520 return dlopen_impl(file, mode, NULL, caller_addr, extinfo);
4521 }
4522
4523 #ifdef LOAD_ORDER_RANDOMIZATION
open_library_by_path(const char * name,const char * s,struct loadtask * task,struct zip_info * z_info)4524 static void open_library_by_path(const char *name, const char *s, struct loadtask *task, struct zip_info *z_info)
4525 {
4526 char *buf = task->buf;
4527 size_t buf_size = sizeof task->buf;
4528 size_t l;
4529 for (;;) {
4530 s += strspn(s, ":\n");
4531 l = strcspn(s, ":\n");
4532 if (l-1 >= INT_MAX) return;
4533 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
4534 char *separator = strstr(buf, ZIP_FILE_PATH_SEPARATOR);
4535 if (separator != NULL) {
4536 int res = open_uncompressed_library_in_zipfile(buf, z_info, separator);
4537 if (res == 0) {
4538 task->fd = z_info->fd;
4539 task->file_offset = z_info->file_offset;
4540 break;
4541 } else {
4542 memset(z_info->path_buf, 0, sizeof(z_info->path_buf));
4543 }
4544 } else {
4545 if ((task->fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) break;
4546 }
4547 }
4548 s += l;
4549 }
4550 return;
4551 }
4552
handle_asan_path_open_by_task(int fd,const char * name,ns_t * namespace,struct loadtask * task,struct zip_info * z_info)4553 static void handle_asan_path_open_by_task(int fd, const char *name, ns_t *namespace, struct loadtask *task,
4554 struct zip_info *z_info)
4555 {
4556 LD_LOGD("handle_asan_path_open_by_task fd:%{public}d, name:%{public}s , namespace:%{public}s .",
4557 fd,
4558 name,
4559 namespace ? namespace->ns_name : "NULL");
4560 if (fd == -1 && (namespace->asan_lib_paths || namespace->lib_paths)) {
4561 if (namespace->lib_paths && namespace->asan_lib_paths) {
4562 size_t newlen = strlen(namespace->asan_lib_paths) + strlen(namespace->lib_paths) + 2;
4563 char *new_lib_paths = malloc(newlen);
4564 memset(new_lib_paths, 0, newlen);
4565 strcpy(new_lib_paths, namespace->asan_lib_paths);
4566 strcat(new_lib_paths, ":");
4567 strcat(new_lib_paths, namespace->lib_paths);
4568 open_library_by_path(name, new_lib_paths, task, z_info);
4569 LD_LOGD("handle_asan_path_open_by_task open_library_by_path new_lib_paths:%{public}s ,fd: %{public}d.",
4570 new_lib_paths,
4571 task->fd);
4572 free(new_lib_paths);
4573 } else if (namespace->asan_lib_paths) {
4574 open_library_by_path(name, namespace->asan_lib_paths, task, z_info);
4575 LD_LOGD("handle_asan_path_open_by_task open_library_by_path asan_lib_paths:%{public}s ,fd: %{public}d.",
4576 namespace->asan_lib_paths,
4577 task->fd);
4578 } else {
4579 open_library_by_path(name, namespace->lib_paths, task, z_info);
4580 LD_LOGD("handle_asan_path_open_by_task open_library_by_path lib_paths:%{public}s ,fd: %{public}d.",
4581 namespace->lib_paths,
4582 task->fd);
4583 }
4584 }
4585 return;
4586 }
4587
4588 /* Used to get an uncompress library offset in zip file, then we can use the offset to mmap the library directly. */
open_uncompressed_library_in_zipfile(const char * path,struct zip_info * z_info,char * separator)4589 int open_uncompressed_library_in_zipfile(const char *path, struct zip_info *z_info, char *separator)
4590 {
4591 struct local_file_header zip_file_header;
4592 struct central_dir_entry c_dir_entry;
4593 struct zip_end_locator end_locator;
4594
4595 /* Use "'!/' to split the path into zipfile path and library path in zipfile.
4596 * For example:
4597 * - path: x/xx/xxx.zip!/x/xx/xxx.so
4598 * - zipfile path: x/xx/xxx.zip
4599 * - library path in zipfile: x/xx/xxx.so */
4600 if (strlcpy(z_info->path_buf, path, PATH_BUF_SIZE) >= PATH_BUF_SIZE) {
4601 LD_LOGE("Open uncompressed library: input path %{public}s is too long.", path);
4602 return -1;
4603 }
4604 z_info->path_buf[separator - path] = '\0';
4605 z_info->file_path_index = separator - path + 2;
4606 char *zip_file_path = z_info->path_buf;
4607 char *lib_path = &z_info->path_buf[z_info->file_path_index];
4608 if (zip_file_path == NULL || lib_path == NULL) {
4609 LD_LOGE("Open uncompressed library: get zip and lib path failed.");
4610 return -1;
4611 }
4612 LD_LOGD("Open uncompressed library: input path: %{public}s, zip file path: %{public}s, library path: %{public}s.",
4613 path, zip_file_path, lib_path);
4614
4615 // Get zip file length
4616 FILE *zip_file = fopen(zip_file_path, "re");
4617 if (zip_file == NULL) {
4618 LD_LOGE("Open uncompressed library: fopen %{public}s failed.", zip_file_path);
4619 return -1;
4620 }
4621 if (fseek(zip_file, 0, SEEK_END) != 0) {
4622 LD_LOGE("Open uncompressed library: fseek SEEK_END failed.");
4623 fclose(zip_file);
4624 return -1;
4625 }
4626 int64_t zip_file_len = ftell(zip_file);
4627 if (zip_file_len == -1) {
4628 LD_LOGE("Open uncompressed library: get zip file length failed.");
4629 fclose(zip_file);
4630 return -1;
4631 }
4632
4633 // Read end of central directory record.
4634 size_t end_locator_len = sizeof(end_locator);
4635 size_t end_locator_pos = zip_file_len - end_locator_len;
4636 if (fseek(zip_file, end_locator_pos, SEEK_SET) != 0) {
4637 LD_LOGE("Open uncompressed library: fseek end locator position failed.");
4638 fclose(zip_file);
4639 return -1;
4640 }
4641 if (fread(&end_locator, sizeof(end_locator), 1, zip_file) != 1 || end_locator.signature != EOCD_SIGNATURE) {
4642 LD_LOGE("Open uncompressed library: fread end locator failed.");
4643 fclose(zip_file);
4644 return -1;
4645 }
4646
4647 char file_name[PATH_BUF_SIZE];
4648 uint64_t current_dir_pos = end_locator.offset;
4649 for (uint16_t i = 0; i < end_locator.total_entries; i++) {
4650 // Read central dir entry.
4651 if (fseek(zip_file, current_dir_pos, SEEK_SET) != 0) {
4652 LD_LOGE("Open uncompressed library: fseek current centra dir entry position failed.");
4653 fclose(zip_file);
4654 return -1;
4655 }
4656 if (fread(&c_dir_entry, sizeof(c_dir_entry), 1, zip_file) != 1 || c_dir_entry.signature != CENTRAL_SIGNATURE) {
4657 LD_LOGE("Open uncompressed library: fread centra dir entry failed.");
4658 fclose(zip_file);
4659 return -1;
4660 }
4661
4662 if (fread(file_name, c_dir_entry.name_size, 1, zip_file) != 1) {
4663 LD_LOGE("Open uncompressed library: fread file name failed.");
4664 fclose(zip_file);
4665 return -1;
4666 }
4667 if (strcmp(file_name, lib_path) == 0) {
4668 // Read local file header.
4669 if (fseek(zip_file, c_dir_entry.local_header_offset, SEEK_SET) != 0) {
4670 LD_LOGE("Open uncompressed library: fseek local file header failed.");
4671 fclose(zip_file);
4672 return -1;
4673 }
4674 if (fread(&zip_file_header, sizeof(zip_file_header), 1, zip_file) != 1) {
4675 LD_LOGE("Open uncompressed library: fread local file header failed.");
4676 fclose(zip_file);
4677 return -1;
4678 }
4679 if (zip_file_header.signature != LOCAL_FILE_HEADER_SIGNATURE) {
4680 LD_LOGE("Open uncompressed library: read local file header signature error.");
4681 fclose(zip_file);
4682 return -1;
4683 }
4684
4685 z_info->file_offset = c_dir_entry.local_header_offset + sizeof(zip_file_header) +
4686 zip_file_header.name_size + zip_file_header.extra_size;
4687 if (zip_file_header.compression_method != COMPRESS_STORED || z_info->file_offset % PAGE_SIZE != 0) {
4688 LD_LOGE("Open uncompressed library: open %{public}s in %{public}s failed because of misalignment or saved with compression."
4689 "compress method %{public}d, file offset %{public}lu",
4690 lib_path, zip_file_path, zip_file_header.compression_method, z_info->file_offset);
4691 fclose(zip_file);
4692 return -2;
4693 }
4694 z_info->found = true;
4695 break;
4696 }
4697
4698 memset(file_name, 0, sizeof(file_name));
4699 current_dir_pos += sizeof(c_dir_entry);
4700 current_dir_pos += c_dir_entry.name_size + c_dir_entry.extra_size + c_dir_entry.comment_size;
4701 }
4702 if(!z_info->found) {
4703 LD_LOGE("Open uncompressed library: %{public}s was not found in %{public}s.", lib_path, zip_file_path);
4704 fclose(zip_file);
4705 return -3;
4706 }
4707 z_info->fd = fileno(zip_file);
4708
4709 return 0;
4710 }
4711
map_library_header(struct loadtask * task)4712 static bool map_library_header(struct loadtask *task)
4713 {
4714 off_t off_start;
4715 Phdr *ph;
4716 size_t i;
4717 size_t str_size;
4718 off_t str_table;
4719
4720 ssize_t l = pread(task->fd, task->ehdr_buf, sizeof task->ehdr_buf, task->file_offset);
4721 task->eh = task->ehdr_buf;
4722 if (l < 0) {
4723 LD_LOGE("Error mapping header %{public}s: failed to read fd errno: %{public}d", task->name, errno);
4724 return false;
4725 }
4726 if (l < sizeof(Ehdr) || (task->eh->e_type != ET_DYN && task->eh->e_type != ET_EXEC)) {
4727 LD_LOGE("Error mapping header %{public}s: invaliled Ehdr l=%{public}d", task->name, l);
4728 goto noexec;
4729 }
4730 task->phsize = task->eh->e_phentsize * task->eh->e_phnum;
4731 if (task->phsize > sizeof task->ehdr_buf - sizeof(Ehdr)) {
4732 task->allocated_buf = malloc(task->phsize);
4733 if (!task->allocated_buf) {
4734 LD_LOGE("Error mapping header %{public}s: failed to alloc memory errno: %{public}d", task->name, errno);
4735 return false;
4736 }
4737 l = pread(task->fd, task->allocated_buf, task->phsize, task->eh->e_phoff + task->file_offset);
4738 if (l < 0) {
4739 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
4740 goto error;
4741 }
4742 if (l != task->phsize) {
4743 LD_LOGE("Error mapping header %{public}s: unmatched phsize errno: %{public}d", task->name, errno);
4744 goto noexec;
4745 }
4746 ph = task->ph0 = task->allocated_buf;
4747 } else if (task->eh->e_phoff + task->phsize > l) {
4748 l = pread(task->fd, task->ehdr_buf + 1, task->phsize, task->eh->e_phoff + task->file_offset);
4749 if (l < 0) {
4750 LD_LOGE("Error mapping header %{public}s: failed to pread errno: %{public}d", task->name, errno);
4751 goto error;
4752 }
4753 if (l != task->phsize) {
4754 LD_LOGE("Error mapping header %{public}s: unmatched phsize", task->name);
4755 goto noexec;
4756 }
4757 ph = task->ph0 = (void *)(task->ehdr_buf + 1);
4758 } else {
4759 ph = task->ph0 = (void *)((char *)task->ehdr_buf + task->eh->e_phoff);
4760 }
4761
4762 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4763 if (ph->p_type == PT_DYNAMIC) {
4764 task->dyn = ph->p_vaddr;
4765 } else if (ph->p_type == PT_TLS) {
4766 task->tls_image = ph->p_vaddr;
4767 task->tls.align = ph->p_align;
4768 task->tls.len = ph->p_filesz;
4769 task->tls.size = ph->p_memsz;
4770 }
4771
4772 if (ph->p_type != PT_DYNAMIC) {
4773 continue;
4774 }
4775 // map the dynamic segment and the string table of the library
4776 off_start = ph->p_offset;
4777 off_start &= -PAGE_SIZE;
4778 task->dyn_map_len = ph->p_memsz + (ph->p_offset - off_start);
4779 /* The default value of file_offset is 0.
4780 * The value of file_offset may be greater than 0 when opening library from zip file.
4781 * The value of file_offset ensures PAGE_SIZE aligned. */
4782 task->dyn_map = mmap(0, task->dyn_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4783 if (task->dyn_map == MAP_FAILED) {
4784 LD_LOGE("Error mapping header %{public}s: failed to map dynamic section errno: %{public}d", task->name, errno);
4785 goto error;
4786 }
4787 task->dyn_addr = (size_t *)((unsigned char *)task->dyn_map + (ph->p_offset - off_start));
4788 size_t dyn_tmp;
4789 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRTAB)) {
4790 str_table = dyn_tmp;
4791 } else {
4792 LD_LOGE("Error mapping header %{public}s: DT_STRTAB not found", task->name);
4793 goto error;
4794 }
4795 if (search_vec(task->dyn_addr, &dyn_tmp, DT_STRSZ)) {
4796 str_size = dyn_tmp;
4797 } else {
4798 LD_LOGE("Error mapping header %{public}s: DT_STRSZ not found", task->name);
4799 goto error;
4800 }
4801 }
4802
4803 task->shsize = task->eh->e_shentsize * task->eh->e_shnum;
4804 off_start = task->eh->e_shoff;
4805 off_start &= -PAGE_SIZE;
4806 task->shsize += task->eh->e_shoff - off_start;
4807 task->shdr_allocated_buf = mmap(0, task->shsize, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4808 if (task->shdr_allocated_buf == MAP_FAILED) {
4809 LD_LOGE("Error mapping section header %{public}s: failed to map shdr_allocated_buf errno: %{public}d",
4810 task->name, errno);
4811 goto error;
4812 }
4813 Shdr *sh = (char *)task->shdr_allocated_buf + task->eh->e_shoff - off_start;
4814 for (i = task->eh->e_shnum; i; i--, sh = (void *)((char *)sh + task->eh->e_shentsize)) {
4815 if (sh->sh_type != SHT_STRTAB || sh->sh_addr != str_table || sh->sh_size != str_size) {
4816 continue;
4817 }
4818 off_start = sh->sh_offset;
4819 off_start &= -PAGE_SIZE;
4820 task->str_map_len = sh->sh_size + (sh->sh_offset - off_start);
4821 task->str_map = mmap(0, task->str_map_len, PROT_READ, MAP_PRIVATE, task->fd, off_start + task->file_offset);
4822 if (task->str_map == MAP_FAILED) {
4823 LD_LOGE("Error mapping section header %{public}s: failed to map string section errno: %{public}d",
4824 task->name, errno);
4825 goto error;
4826 }
4827 task->str_addr = (char *)task->str_map + sh->sh_offset - off_start;
4828 break;
4829 }
4830 if (!task->dyn) {
4831 LD_LOGE("Error mapping header %{public}s: dynamic section not found", task->name);
4832 goto noexec;
4833 }
4834 return true;
4835 noexec:
4836 errno = ENOEXEC;
4837 error:
4838 free(task->allocated_buf);
4839 task->allocated_buf = NULL;
4840 if (task->shdr_allocated_buf != MAP_FAILED) {
4841 munmap(task->shdr_allocated_buf, task->shsize);
4842 task->shdr_allocated_buf = MAP_FAILED;
4843 }
4844 return false;
4845 }
4846
task_map_library(struct loadtask * task,struct reserved_address_params * reserved_params)4847 static bool task_map_library(struct loadtask *task, struct reserved_address_params *reserved_params)
4848 {
4849 size_t addr_min = SIZE_MAX, addr_max = 0, map_len;
4850 size_t this_min, this_max;
4851 size_t nsegs = 0;
4852 off_t off_start;
4853 Phdr *ph = task->ph0;
4854 unsigned prot;
4855 unsigned char *map = MAP_FAILED, *base;
4856 size_t i;
4857 int map_flags = MAP_PRIVATE;
4858 size_t start_addr;
4859 size_t start_alignment = PAGE_SIZE;
4860 bool hugepage_enabled = false;
4861
4862 for (i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4863 if (ph->p_type == PT_GNU_RELRO) {
4864 task->p->relro_start = ph->p_vaddr & -PAGE_SIZE;
4865 task->p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
4866 } else if (ph->p_type == PT_GNU_STACK) {
4867 if (!runtime && ph->p_memsz > __default_stacksize) {
4868 __default_stacksize =
4869 ph->p_memsz < DEFAULT_STACK_MAX ?
4870 ph->p_memsz : DEFAULT_STACK_MAX;
4871 }
4872 }
4873 if (ph->p_type != PT_LOAD) {
4874 continue;
4875 }
4876 nsegs++;
4877 if (ph->p_vaddr < addr_min) {
4878 addr_min = ph->p_vaddr;
4879 off_start = ph->p_offset;
4880 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
4881 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
4882 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
4883 }
4884 if (ph->p_vaddr + ph->p_memsz > addr_max) {
4885 addr_max = ph->p_vaddr + ph->p_memsz;
4886 }
4887 }
4888 if (!task->dyn) {
4889 LD_LOGE("Error mapping library: !task->dyn dynamic section not found task->name=%{public}s", task->name);
4890 goto noexec;
4891 }
4892 if (DL_FDPIC && !(task->eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
4893 task->p->loadmap = calloc(1, sizeof(struct fdpic_loadmap) + nsegs * sizeof(struct fdpic_loadseg));
4894 if (!task->p->loadmap) {
4895 LD_LOGE("Error mapping library: calloc failed errno=%{public}d nsegs=%{public}d", errno, nsegs);
4896 goto error;
4897 }
4898 task->p->loadmap->nsegs = nsegs;
4899 for (ph = task->ph0, i = 0; i < nsegs; ph = (void *)((char *)ph + task->eh->e_phentsize)) {
4900 if (ph->p_type != PT_LOAD) {
4901 continue;
4902 }
4903 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
4904 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
4905 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
4906 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1),
4907 prot, MAP_PRIVATE,
4908 task->fd, ph->p_offset & -PAGE_SIZE + task->file_offset);
4909 if (map == MAP_FAILED) {
4910 unmap_library(task->p);
4911 LD_LOGE("Error mapping library: PT_LOAD mmap failed task->name=%{public}s errno=%{public}d map_len=%{public}d",
4912 task->name, errno, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE - 1));
4913 goto error;
4914 }
4915 task->p->loadmap->segs[i].addr = (size_t)map +
4916 (ph->p_vaddr & PAGE_SIZE - 1);
4917 task->p->loadmap->segs[i].p_vaddr = ph->p_vaddr;
4918 task->p->loadmap->segs[i].p_memsz = ph->p_memsz;
4919 i++;
4920 if (prot & PROT_WRITE) {
4921 size_t brk = (ph->p_vaddr & PAGE_SIZE - 1) + ph->p_filesz;
4922 size_t pgbrk = (brk + PAGE_SIZE - 1) & -PAGE_SIZE;
4923 size_t pgend = (brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
4924 if (pgend > pgbrk && mmap_fixed(map + pgbrk,
4925 pgend - pgbrk, prot,
4926 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
4927 -1, off_start) == MAP_FAILED)
4928 LD_LOGE("Error mapping library: PROT_WRITE mmap_fixed failed errno=%{public}d", errno);
4929 goto error;
4930 memset(map + brk, 0, pgbrk - brk);
4931 }
4932 }
4933 map = (void *)task->p->loadmap->segs[0].addr;
4934 map_len = 0;
4935 goto done_mapping;
4936 }
4937 addr_max += PAGE_SIZE - 1;
4938 addr_max &= -PAGE_SIZE;
4939 addr_min &= -PAGE_SIZE;
4940 off_start &= -PAGE_SIZE;
4941 map_len = addr_max - addr_min + off_start;
4942 start_addr = addr_min;
4943
4944 hugepage_enabled = get_transparent_hugepages_supported();
4945 if (hugepage_enabled) {
4946 size_t maxinum_alignment = phdr_table_get_maxinum_alignment(task->ph0, task->eh->e_phnum);
4947
4948 start_alignment = maxinum_alignment == KPMD_SIZE ? KPMD_SIZE : PAGE_SIZE;
4949 }
4950
4951 if (reserved_params) {
4952 if (map_len > reserved_params->reserved_size) {
4953 if (reserved_params->must_use_reserved) {
4954 LD_LOGE("Error mapping library: map len is larger than reserved address task->name=%{public}s", task->name);
4955 goto error;
4956 }
4957 } else {
4958 start_addr = ((size_t)reserved_params->start_addr - 1 + PAGE_SIZE) & -PAGE_SIZE;
4959 map_flags |= MAP_FIXED;
4960 }
4961 }
4962
4963 /* we will find a mapping_align aligned address as the start of dso
4964 * so we need a tmp_map_len as map_len + mapping_align to make sure
4965 * we have enough space to shift the dso to the correct location. */
4966 size_t mapping_align = start_alignment > LIBRARY_ALIGNMENT ? start_alignment : LIBRARY_ALIGNMENT;
4967 size_t tmp_map_len = ALIGN(map_len, mapping_align) + mapping_align - PAGE_SIZE;
4968
4969 /* map the whole load segments with PROT_READ first for security consideration. */
4970 prot = PROT_READ;
4971
4972 /* if reserved_params exists, we should use start_addr as prefered result to do the mmap operation */
4973 if (reserved_params) {
4974 map = DL_NOMMU_SUPPORT
4975 ? mmap((void *)start_addr, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
4976 : mmap((void *)start_addr, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
4977 if (map == MAP_FAILED) {
4978 LD_LOGE("Error mapping library: reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
4979 " task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
4980 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
4981 goto error;
4982 }
4983 if (reserved_params && map_len < reserved_params->reserved_size) {
4984 reserved_params->reserved_size -= (map_len + (start_addr - (size_t)reserved_params->start_addr));
4985 reserved_params->start_addr = (void *)((uint8_t *)map + map_len);
4986 }
4987 /* if reserved_params does not exist, we should use real_map as prefered result to do the mmap operation */
4988 } else {
4989 /* use tmp_map_len to mmap enough space for the dso with anonymous mapping */
4990 unsigned char *temp_map = mmap((void *)NULL, tmp_map_len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
4991 if (temp_map == MAP_FAILED) {
4992 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d tmp_map_len=%{public}d",
4993 errno, tmp_map_len);
4994 goto error;
4995 }
4996
4997 /* find the mapping_align aligned address */
4998 unsigned char *real_map = (unsigned char*)ALIGN((uintptr_t)temp_map, mapping_align);
4999
5000 /* mummap the space we mmap before so that we can mmap correct space again */
5001 munmap(temp_map, tmp_map_len);
5002
5003 map = DL_NOMMU_SUPPORT
5004 ? mmap(real_map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
5005 /* use map_len to mmap correct space for the dso with file mapping */
5006 : mmap(real_map, map_len, prot, map_flags, task->fd, off_start + task->file_offset);
5007 if (map == MAP_FAILED) {
5008 LD_LOGE("Error mapping library: !reserved_params mmap failed errno=%{public}d DL_NOMMU_SUPPORT=%{public}d"
5009 "task->fd=%{public}d task->name=%{public}s map_len=%{public}d",
5010 errno, DL_NOMMU_SUPPORT, task->fd, task->name, map_len);
5011 goto error;
5012 }
5013 }
5014 task->p->map = map;
5015 task->p->map_len = map_len;
5016 /* If the loaded file is not relocatable and the requested address is
5017 * not available, then the load operation must fail. */
5018 if (task->eh->e_type != ET_DYN && addr_min && map != (void *)addr_min) {
5019 LD_LOGE("Error mapping library: ET_DYN task->name=%{public}s", task->name);
5020 errno = EBUSY;
5021 goto error;
5022 }
5023 base = map - addr_min;
5024 task->p->phdr = 0;
5025 task->p->phnum = 0;
5026 for (ph = task->ph0, i = task->eh->e_phnum; i; i--, ph = (void *)((char *)ph + task->eh->e_phentsize)) {
5027 if (ph->p_type == PT_OHOS_RANDOMDATA) {
5028 fill_random_data((void *)(ph->p_vaddr + base), ph->p_memsz);
5029 continue;
5030 }
5031 if (ph->p_type != PT_LOAD) {
5032 continue;
5033 }
5034 /* Check if the programs headers are in this load segment, and
5035 * if so, record the address for use by dl_iterate_phdr. */
5036 if (!task->p->phdr && task->eh->e_phoff >= ph->p_offset
5037 && task->eh->e_phoff + task->phsize <= ph->p_offset + ph->p_filesz) {
5038 task->p->phdr = (void *)(base + ph->p_vaddr + (task->eh->e_phoff - ph->p_offset));
5039 task->p->phnum = task->eh->e_phnum;
5040 task->p->phentsize = task->eh->e_phentsize;
5041 }
5042 this_min = ph->p_vaddr & -PAGE_SIZE;
5043 this_max = ph->p_vaddr + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5044 off_start = ph->p_offset & -PAGE_SIZE;
5045 prot = (((ph->p_flags & PF_R) ? PROT_READ : 0) |
5046 ((ph->p_flags & PF_W) ? PROT_WRITE : 0) |
5047 ((ph->p_flags & PF_X) ? PROT_EXEC : 0));
5048 /* Reuse the existing mapping for the lowest-address LOAD */
5049 if (mmap_fixed(
5050 base + this_min,
5051 this_max - this_min,
5052 prot, MAP_PRIVATE | MAP_FIXED,
5053 task->fd,
5054 off_start + task->file_offset) == MAP_FAILED) {
5055 LD_LOGE("Error mapping library: mmap fix failed task->name=%{public}s errno=%{public}d", task->name, errno);
5056 goto error;
5057 }
5058 if ((ph->p_flags & PF_X) && (ph->p_align == KPMD_SIZE) && hugepage_enabled)
5059 madvise(base + this_min, this_max - this_min, MADV_HUGEPAGE);
5060 if (ph->p_memsz > ph->p_filesz && (ph->p_flags & PF_W)) {
5061 size_t brk = (size_t)base + ph->p_vaddr + ph->p_filesz;
5062 size_t pgbrk = brk + PAGE_SIZE - 1 & -PAGE_SIZE;
5063 size_t zeromap_size = (size_t)base + this_max - pgbrk;
5064 memset((void *)brk, 0, pgbrk - brk & PAGE_SIZE - 1);
5065 if (pgbrk - (size_t)base < this_max && mmap_fixed(
5066 (void *)pgbrk,
5067 zeromap_size,
5068 prot,
5069 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
5070 -1,
5071 0) == MAP_FAILED) {
5072 LD_LOGE("Error mapping library: PF_W mmap fix failed errno=%{public}d task->name=%{public}s zeromap_size=%{public}d",
5073 errno, task->name, zeromap_size);
5074 goto error;
5075 }
5076 set_bss_vma_name(task->p->name, (void *)pgbrk, zeromap_size);
5077 }
5078 }
5079 for (i = 0; ((size_t *)(base + task->dyn))[i]; i += NEXT_DYNAMIC_INDEX) {
5080 if (((size_t *)(base + task->dyn))[i] == DT_TEXTREL) {
5081 if (mprotect(map, map_len, PROT_READ | PROT_WRITE | PROT_EXEC) && errno != ENOSYS) {
5082 LD_LOGE("Error mapping library: mprotect failed task->name=%{public}s errno=%{public}d", task->name, errno);
5083 goto error;
5084 }
5085 break;
5086 }
5087 }
5088 done_mapping:
5089 task->p->base = base;
5090 task->p->dynv = laddr(task->p, task->dyn);
5091 if (task->p->tls.size) {
5092 task->p->tls.image = laddr(task->p, task->tls_image);
5093 }
5094 free(task->allocated_buf);
5095 task->allocated_buf = NULL;
5096 if (task->shdr_allocated_buf != MAP_FAILED) {
5097 munmap(task->shdr_allocated_buf, task->shsize);
5098 task->shdr_allocated_buf = MAP_FAILED;
5099 }
5100 return true;
5101 noexec:
5102 errno = ENOEXEC;
5103 error:
5104 if (map != MAP_FAILED) {
5105 unmap_library(task->p);
5106 }
5107 free(task->allocated_buf);
5108 task->allocated_buf = NULL;
5109 if (task->shdr_allocated_buf != MAP_FAILED) {
5110 munmap(task->shdr_allocated_buf, task->shsize);
5111 task->shdr_allocated_buf = MAP_FAILED;
5112 }
5113 return false;
5114 }
5115
resolve_fd_to_realpath(struct loadtask * task)5116 static bool resolve_fd_to_realpath(struct loadtask *task)
5117 {
5118 char proc_self_fd[32];
5119 static char resolved_path[PATH_MAX];
5120
5121 int ret = snprintf(proc_self_fd, sizeof(proc_self_fd), "/proc/self/fd/%d", task->fd);
5122 if (ret < 0 || ret >= sizeof(proc_self_fd)) {
5123 return false;
5124 }
5125 ssize_t len = readlink(proc_self_fd, resolved_path, sizeof(resolved_path) - 1);
5126 if (len < 0) {
5127 return false;
5128 }
5129 resolved_path[len] = '\0';
5130 strncpy(task->buf, resolved_path, PATH_MAX);
5131
5132 return true;
5133 }
5134
load_library_header(struct loadtask * task)5135 static bool load_library_header(struct loadtask *task)
5136 {
5137 const char *name = task->name;
5138 struct dso *needed_by = task->needed_by;
5139 ns_t *namespace = task->namespace;
5140 bool check_inherited = task->check_inherited;
5141 struct zip_info z_info;
5142
5143 bool map = false;
5144 struct stat st;
5145 size_t alloc_size;
5146 int n_th = 0;
5147 int is_self = 0;
5148
5149 if (!*name) {
5150 errno = EINVAL;
5151 return false;
5152 }
5153
5154 /* Catch and block attempts to reload the implementation itself */
5155 if (name[NAME_INDEX_ZERO] == 'l' && name[NAME_INDEX_ONE] == 'i' && name[NAME_INDEX_TWO] == 'b') {
5156 static const char reserved[] =
5157 "c.pthread.rt.m.dl.util.xnet.";
5158 const char *rp, *next;
5159 for (rp = reserved; *rp; rp = next) {
5160 next = strchr(rp, '.') + 1;
5161 if (strncmp(name + NAME_INDEX_THREE, rp, next - rp) == 0) {
5162 break;
5163 }
5164 }
5165 if (*rp) {
5166 if (ldd_mode) {
5167 /* Track which names have been resolved
5168 * and only report each one once. */
5169 static unsigned reported;
5170 unsigned mask = 1U << (rp - reserved);
5171 if (!(reported & mask)) {
5172 reported |= mask;
5173 dprintf(1, "\t%s => %s (%p)\n",
5174 name, ldso.name,
5175 ldso.base);
5176 }
5177 }
5178 is_self = 1;
5179 }
5180 }
5181 if (!strcmp(name, ldso.name)) {
5182 is_self = 1;
5183 }
5184 if (is_self) {
5185 if (!ldso.prev) {
5186 tail->next = &ldso;
5187 ldso.prev = tail;
5188 tail = &ldso;
5189 ldso.namespace = namespace;
5190 ns_add_dso(namespace, &ldso);
5191 }
5192 task->isloaded = true;
5193 task->p = &ldso;
5194 return true;
5195 }
5196 if (strchr(name, '/')) {
5197 char *separator = strstr(name, ZIP_FILE_PATH_SEPARATOR);
5198 if (separator != NULL) {
5199 int res = open_uncompressed_library_in_zipfile(name, &z_info, separator);
5200 if (!res) {
5201 task->pathname = name;
5202 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5203 LD_LOGE("Open uncompressed library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5204 task->pathname, namespace ? namespace->ns_name : "NULL");
5205 task->fd = -1;
5206 } else {
5207 task->fd = z_info.fd;
5208 task->file_offset = z_info.file_offset;
5209 }
5210 } else {
5211 LD_LOGE("Open uncompressed library in zip file failed, name:%{public}s res:%{public}d", name, res);
5212 return false;
5213 }
5214 } else {
5215 task->pathname = name;
5216 if (!is_accessible(namespace, task->pathname, g_is_asan, check_inherited)) {
5217 LD_LOGE("Open absolute_path library: check ns accessible failed, pathname %{public}s namespace %{public}s.",
5218 task->pathname, namespace ? namespace->ns_name : "NULL");
5219 task->fd = -1;
5220 } else {
5221 task->fd = open(name, O_RDONLY | O_CLOEXEC);
5222 }
5223 }
5224 } else {
5225 /* Search for the name to see if it's already loaded */
5226 /* Search in namespace */
5227 task->p = find_library_by_name(name, namespace, check_inherited);
5228 if (task->p) {
5229 task->isloaded = true;
5230 LD_LOGD("find_library_by_name(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5231 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5232 return true;
5233 }
5234 if (strlen(name) > NAME_MAX) {
5235 LD_LOGE("load_library name length is larger than NAME_MAX:%{public}s.", name);
5236 return false;
5237 }
5238 task->fd = -1;
5239 if (namespace->env_paths) {
5240 open_library_by_path(name, namespace->env_paths, task, &z_info);
5241 }
5242 for (task->p = needed_by; task->fd == -1 && task->p; task->p = task->p->needed_by) {
5243 if (fixup_rpath(task->p, task->buf, sizeof task->buf) < 0) {
5244 task->fd = INVALID_FD_INHIBIT_FURTHER_SEARCH; /* Inhibit further search. */
5245 }
5246 if (task->p->rpath) {
5247 open_library_by_path(name, task->p->rpath, task, &z_info);
5248 if (task->fd != -1 && resolve_fd_to_realpath(task)) {
5249 if (!is_accessible(namespace, task->buf, g_is_asan, check_inherited)) {
5250 LD_LOGE("Open library: check ns accessible failed, name %{public}s namespace %{public}s.",
5251 name, namespace ? namespace->ns_name : "NULL");
5252 close(task->fd);
5253 task->fd = -1;
5254 }
5255 }
5256 }
5257 }
5258 if (g_is_asan) {
5259 handle_asan_path_open_by_task(task->fd, name, namespace, task, &z_info);
5260 LD_LOGD("load_library handle_asan_path_open_by_task fd:%{public}d.", task->fd);
5261 } else {
5262 if (task->fd == -1 && namespace->lib_paths) {
5263 open_library_by_path(name, namespace->lib_paths, task, &z_info);
5264 LD_LOGD("load_library no asan lib_paths path_open fd:%{public}d.", task->fd);
5265 }
5266 }
5267 task->pathname = task->buf;
5268 }
5269 if (task->fd < 0) {
5270 if (!check_inherited || !namespace->ns_inherits) {
5271 LD_LOGE("Error loading header %{public}s, namespace %{public}s has no inherits, errno=%{public}d",
5272 task->name, namespace->ns_name, errno);
5273 return false;
5274 }
5275 /* Load lib in inherited namespace. Do not check inherited again.*/
5276 for (size_t i = 0; i < namespace->ns_inherits->num; i++) {
5277 ns_inherit *inherit = namespace->ns_inherits->inherits[i];
5278 if (strchr(name, '/') == 0 && !is_sharable(inherit, name)) {
5279 continue;
5280 }
5281 task->namespace = inherit->inherited_ns;
5282 task->check_inherited = false;
5283 if (load_library_header(task)) {
5284 return true;
5285 }
5286 }
5287 LD_LOGE("Error loading header: can't find library %{public}s in namespace: %{public}s",
5288 task->name, namespace->ns_name);
5289 return false;
5290 }
5291
5292 if (fstat(task->fd, &st) < 0) {
5293 LD_LOGE("Error loading header %{public}s: failed to get file state errno=%{public}d", task->name, errno);
5294 close(task->fd);
5295 task->fd = -1;
5296 return false;
5297 }
5298 /* Search in namespace */
5299 task->p = find_library_by_fstat(&st, namespace, check_inherited, task->file_offset);
5300 if (task->p) {
5301 /* If this library was previously loaded with a
5302 * pathname but a search found the same inode,
5303 * setup its shortname so it can be found by name. */
5304 if (!task->p->shortname && task->pathname != name) {
5305 task->p->shortname = strrchr(task->p->name, '/') + 1;
5306 }
5307 close(task->fd);
5308 task->fd = -1;
5309 task->isloaded = true;
5310 LD_LOGD("find_library_by_fstat(name=%{public}s ns=%{public}s) already loaded by %{public}s in %{public}s namespace ",
5311 name, namespace->ns_name, task->p->name, task->p->namespace->ns_name);
5312 return true;
5313 }
5314
5315 map = noload ? 0 : map_library_header(task);
5316 if (!map) {
5317 LD_LOGE("Error loading header %{public}s: failed to map header", task->name);
5318 close(task->fd);
5319 task->fd = -1;
5320 return false;
5321 }
5322
5323 /* Allocate storage for the new DSO. When there is TLS, this
5324 * storage must include a reservation for all pre-existing
5325 * threads to obtain copies of both the new TLS, and an
5326 * extended DTV capable of storing an additional slot for
5327 * the newly-loaded DSO. */
5328 alloc_size = sizeof(struct dso) + strlen(task->pathname) + 1;
5329 if (runtime && task->tls.size) {
5330 size_t per_th = task->tls.size + task->tls.align + sizeof(void *) * (tls_cnt + TLS_CNT_INCREASE);
5331 n_th = libc.threads_minus_1 + 1;
5332 if (n_th > SSIZE_MAX / per_th) {
5333 alloc_size = SIZE_MAX;
5334 } else {
5335 alloc_size += n_th * per_th;
5336 }
5337 }
5338 task->p = calloc(1, alloc_size);
5339 if (!task->p) {
5340 LD_LOGE("Error loading header %{public}s: failed to allocate dso", task->name);
5341 close(task->fd);
5342 task->fd = -1;
5343 return false;
5344 }
5345 task->p->dev = st.st_dev;
5346 task->p->ino = st.st_ino;
5347 task->p->file_offset = task->file_offset;
5348 task->p->needed_by = needed_by;
5349 task->p->name = task->p->buf;
5350 strcpy(task->p->name, task->pathname);
5351 task->p->tls = task->tls;
5352 task->p->dynv = task->dyn_addr;
5353 task->p->strings = task->str_addr;
5354 size_t rpath_offset;
5355 size_t runpath_offset;
5356 if (search_vec(task->p->dynv, &rpath_offset, DT_RPATH))
5357 task->p->rpath_orig = task->p->strings + rpath_offset;
5358 if (search_vec(task->p->dynv, &runpath_offset, DT_RUNPATH))
5359 task->p->rpath_orig = task->p->strings + runpath_offset;
5360
5361 /* Add a shortname only if name arg was not an explicit pathname. */
5362 if (task->pathname != name) {
5363 task->p->shortname = strrchr(task->p->name, '/') + 1;
5364 }
5365
5366 if (task->p->tls.size) {
5367 task->p->tls_id = ++tls_cnt;
5368 task->p->new_dtv = (void *)(-sizeof(size_t) &
5369 (uintptr_t)(task->p->name + strlen(task->p->name) + sizeof(size_t)));
5370 task->p->new_tls = (void *)(task->p->new_dtv + n_th * (tls_cnt + 1));
5371 }
5372
5373 tail->next = task->p;
5374 task->p->prev = tail;
5375 tail = task->p;
5376
5377 /* Add dso to namespace */
5378 task->p->namespace = namespace;
5379 ns_add_dso(namespace, task->p);
5380 return true;
5381 }
5382
task_load_library(struct loadtask * task,struct reserved_address_params * reserved_params)5383 static void task_load_library(struct loadtask *task, struct reserved_address_params *reserved_params)
5384 {
5385 LD_LOGD("load_library loading ns=%{public}s name=%{public}s by_dlopen=%{public}d", task->namespace->ns_name, task->p->name, runtime);
5386 bool map = noload ? 0 : task_map_library(task, reserved_params);
5387 __close(task->fd);
5388 task->fd = -1;
5389 if (!map) {
5390 LD_LOGE("Error loading library %{public}s: failed to map library noload=%{public}d errno=%{public}d",
5391 task->name, noload, errno);
5392 error("Error loading library %s: failed to map library noload=%d errno=%d", task->name, noload, errno);
5393 if (runtime) {
5394 longjmp(*rtld_fail, 1);
5395 }
5396 return;
5397 };
5398
5399 /* Avoid the danger of getting two versions of libc mapped into the
5400 * same process when an absolute pathname was used. The symbols
5401 * checked are chosen to catch both musl and glibc, and to avoid
5402 * false positives from interposition-hack libraries. */
5403 decode_dyn(task->p);
5404 if (find_sym(task->p, "__libc_start_main", 1).sym &&
5405 find_sym(task->p, "stdin", 1).sym) {
5406 do_dlclose(task->p);
5407 task->p = NULL;
5408 free((void*)task->name);
5409 task->name = ld_strdup("libc.so");
5410 task->check_inherited = true;
5411 if (!load_library_header(task)) {
5412 LD_LOGE("Error loading library %{public}s: failed to load libc.so", task->name);
5413 error("Error loading library %s: failed to load libc.so", task->name);
5414 if (runtime) {
5415 longjmp(*rtld_fail, 1);
5416 }
5417 }
5418 return;
5419 }
5420 /* Past this point, if we haven't reached runtime yet, ldso has
5421 * committed either to use the mapped library or to abort execution.
5422 * Unmapping is not possible, so we can safely reclaim gaps. */
5423 if (!runtime) {
5424 reclaim_gaps(task->p);
5425 }
5426 task->p->runtime_loaded = runtime;
5427 if (runtime)
5428 task->p->by_dlopen = 1;
5429
5430 if (DL_FDPIC) {
5431 makefuncdescs(task->p);
5432 }
5433
5434 if (ldd_mode) {
5435 dprintf(1, "\t%s => %s (%p)\n", task->name, task->pathname, task->p->base);
5436 }
5437
5438 #ifdef ENABLE_HWASAN
5439 if (libc.load_hook) {
5440 libc.load_hook((long unsigned int)task->p->base, task->p->phdr, task->p->phnum);
5441 }
5442 #endif
5443 }
5444
preload_direct_deps(struct dso * p,ns_t * namespace,struct loadtasks * tasks)5445 static void preload_direct_deps(struct dso *p, ns_t *namespace, struct loadtasks *tasks)
5446 {
5447 size_t i, cnt = 0;
5448 if (p->deps) {
5449 return;
5450 }
5451 /* For head, all preloads are direct pseudo-dependencies.
5452 * Count and include them now to avoid realloc later. */
5453 if (p == head) {
5454 for (struct dso *q = p->next; q; q = q->next) {
5455 cnt++;
5456 }
5457 }
5458 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5459 if (p->dynv[i] == DT_NEEDED) {
5460 cnt++;
5461 }
5462 }
5463 /* Use builtin buffer for apps with no external deps, to
5464 * preserve property of no runtime failure paths. */
5465 p->deps = (p == head && cnt < MIN_DEPS_COUNT) ? builtin_deps :
5466 calloc(cnt + 1, sizeof *p->deps);
5467 if (!p->deps) {
5468 LD_LOGE("Error loading dependencies for %{public}s", p->name);
5469 error("Error loading dependencies for %s", p->name);
5470 if (runtime) {
5471 longjmp(*rtld_fail, 1);
5472 }
5473 }
5474 cnt = 0;
5475 if (p == head) {
5476 for (struct dso *q = p->next; q; q = q->next) {
5477 p->deps[cnt++] = q;
5478 }
5479 }
5480 for (i = 0; p->dynv[i]; i += NEXT_DYNAMIC_INDEX) {
5481 if (p->dynv[i] != DT_NEEDED) {
5482 continue;
5483 }
5484 const char* dtneed_name = p->strings + p->dynv[i + 1];
5485 LD_LOGD("load_library %{public}s adding DT_NEEDED task %{public}s namespace(%{public}s)", p->name, dtneed_name, namespace->ns_name);
5486 struct loadtask *task = create_loadtask(dtneed_name, p, namespace, true);
5487 if (!task) {
5488 LD_LOGE("Error loading dependencies %{public}s : create load task failed", p->name);
5489 error("Error loading dependencies for %s : create load task failed", p->name);
5490 if (runtime) {
5491 longjmp(*rtld_fail, 1);
5492 }
5493 continue;
5494 }
5495 LD_LOGD("loading shared library %{public}s: (needed by %{public}s)", p->strings + p->dynv[i+1], p->name);
5496 if (!load_library_header(task)) {
5497 free_task(task);
5498 task = NULL;
5499 LD_LOGE("Error loading shared library %{public}s: (needed by %{public}s)",
5500 p->strings + p->dynv[i + 1],
5501 p->name);
5502 error("Error loading shared library %s: %m (needed by %s)",
5503 p->strings + p->dynv[i + 1], p->name);
5504 if (runtime) {
5505 longjmp(*rtld_fail, 1);
5506 }
5507 continue;
5508 }
5509 p->deps[cnt++] = task->p;
5510 if (task->isloaded) {
5511 free_task(task);
5512 task = NULL;
5513 } else {
5514 append_loadtasks(tasks, task);
5515 }
5516 }
5517 p->deps[cnt] = 0;
5518 p->ndeps_direct = cnt;
5519 for (i = 0; i < p->ndeps_direct; i++) {
5520 add_dso_parent(p->deps[i], p);
5521 }
5522 }
5523
unmap_preloaded_sections(struct loadtasks * tasks)5524 static void unmap_preloaded_sections(struct loadtasks *tasks)
5525 {
5526 struct loadtask *task = NULL;
5527 for (size_t i = 0; i < tasks->length; i++) {
5528 task = get_loadtask(tasks, i);
5529 if (!task) {
5530 continue;
5531 }
5532 if (task->dyn_map_len) {
5533 munmap(task->dyn_map, task->dyn_map_len);
5534 task->dyn_map = NULL;
5535 task->dyn_map_len = 0;
5536 if (task->p) {
5537 task->p->dynv = NULL;
5538 }
5539 }
5540 if (task->str_map_len) {
5541 munmap(task->str_map, task->str_map_len);
5542 task->str_map = NULL;
5543 task->str_map_len = 0;
5544 if (task->p) {
5545 task->p->strings = NULL;
5546 }
5547 }
5548 }
5549 }
5550
preload_deps(struct dso * p,struct loadtasks * tasks)5551 static void preload_deps(struct dso *p, struct loadtasks *tasks)
5552 {
5553 if (p->deps) {
5554 return;
5555 }
5556 for (; p; p = p->next) {
5557 preload_direct_deps(p, p->namespace, tasks);
5558 }
5559 }
5560
run_loadtasks(struct loadtasks * tasks,struct reserved_address_params * reserved_params)5561 static void run_loadtasks(struct loadtasks *tasks, struct reserved_address_params *reserved_params)
5562 {
5563 struct loadtask *task = NULL;
5564 bool reserved_address = false;
5565 for (size_t i = 0; i < tasks->length; i++) {
5566 task = get_loadtask(tasks, i);
5567 if (task) {
5568 if (reserved_params) {
5569 reserved_address = reserved_params->reserved_address_recursive || (reserved_params->target == task->p);
5570 }
5571 task_load_library(task, reserved_address ? reserved_params : NULL);
5572 }
5573 }
5574 }
5575
assign_tls(struct dso * p)5576 UT_STATIC void assign_tls(struct dso *p)
5577 {
5578 while (p) {
5579 if (p->tls.image) {
5580 tls_align = MAXP2(tls_align, p->tls.align);
5581 #ifdef TLS_ABOVE_TP
5582 p->tls.offset = tls_offset + ((p->tls.align - 1) &
5583 (-tls_offset + (uintptr_t)p->tls.image));
5584 tls_offset = p->tls.offset + p->tls.size;
5585 #else
5586 tls_offset += p->tls.size + p->tls.align - 1;
5587 tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
5588 & (p->tls.align - 1);
5589 p->tls.offset = tls_offset;
5590 #endif
5591 if (tls_tail) {
5592 tls_tail->next = &p->tls;
5593 } else {
5594 libc.tls_head = &p->tls;
5595 }
5596 tls_tail = &p->tls;
5597 }
5598
5599 p = p->next;
5600 }
5601 }
5602
load_preload(char * s,ns_t * ns,struct loadtasks * tasks)5603 UT_STATIC void load_preload(char *s, ns_t *ns, struct loadtasks *tasks)
5604 {
5605 int tmp;
5606 char *z;
5607
5608 struct loadtask *task = NULL;
5609 for (z = s; *z; s = z) {
5610 for (; *s && (isspace(*s) || *s == ':'); s++) {
5611 ;
5612 }
5613 for (z = s; *z && !isspace(*z) && *z != ':'; z++) {
5614 ;
5615 }
5616 tmp = *z;
5617 *z = 0;
5618 task = create_loadtask(s, NULL, ns, true);
5619 if (!task) {
5620 continue;
5621 }
5622 if (load_library_header(task)) {
5623 if (!task->isloaded) {
5624 append_loadtasks(tasks, task);
5625 task = NULL;
5626 }
5627 }
5628 if (task) {
5629 free_task(task);
5630 }
5631 *z = tmp;
5632 }
5633 }
5634 #endif
5635
serialize_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)5636 static int serialize_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
5637 {
5638 ssize_t count = dso->relro_end - dso->relro_start;
5639 ssize_t offset = 0;
5640 while (count > 0) {
5641 ssize_t write_size = TEMP_FAILURE_RETRY(write(fd, laddr(dso, dso->relro_start + offset), count));
5642 if (-1 == write_size) {
5643 LD_LOGE("Error serializing relro %{public}s: failed to write GNU_RELRO", dso->name);
5644 return -1;
5645 }
5646 offset += write_size;
5647 count -= write_size;
5648 }
5649
5650 ssize_t size = dso->relro_end - dso->relro_start;
5651 void *map = mmap(
5652 laddr(dso, dso->relro_start),
5653 size,
5654 PROT_READ,
5655 MAP_PRIVATE | MAP_FIXED,
5656 fd,
5657 *file_offset);
5658 if (map == MAP_FAILED) {
5659 LD_LOGE("Error serializing relro %{public}s: failed to map GNU_RELRO", dso->name);
5660 return -1;
5661 }
5662 *file_offset += size;
5663 return 0;
5664 }
5665
map_gnu_relro(int fd,struct dso * dso,ssize_t * file_offset)5666 static int map_gnu_relro(int fd, struct dso *dso, ssize_t *file_offset)
5667 {
5668 ssize_t ext_fd_file_size = 0;
5669 struct stat ext_fd_file_stat;
5670 if (TEMP_FAILURE_RETRY(fstat(fd, &ext_fd_file_stat)) != 0) {
5671 LD_LOGE("Error mapping relro %{public}s: failed to get file state", dso->name);
5672 return -1;
5673 }
5674 ext_fd_file_size = ext_fd_file_stat.st_size;
5675
5676 void *ext_temp_map = MAP_FAILED;
5677 ext_temp_map = mmap(NULL, ext_fd_file_size, PROT_READ, MAP_PRIVATE, fd, 0);
5678 if (ext_temp_map == MAP_FAILED) {
5679 LD_LOGE("Error mapping relro %{public}s: failed to map fd", dso->name);
5680 return -1;
5681 }
5682
5683 char *file_base = (char *)(ext_temp_map) + *file_offset;
5684 char *mem_base = (char *)(laddr(dso, dso->relro_start));
5685 ssize_t start_offset = 0;
5686 ssize_t size = dso->relro_end - dso->relro_start;
5687
5688 if (size > ext_fd_file_size - *file_offset) {
5689 LD_LOGE("Error mapping relro %{public}s: invalid file size", dso->name);
5690 return -1;
5691 }
5692 while (start_offset < size) {
5693 // Find start location.
5694 while (start_offset < size) {
5695 if (memcmp(mem_base + start_offset, file_base + start_offset, PAGE_SIZE) == 0) {
5696 break;
5697 }
5698 start_offset += PAGE_SIZE;
5699 }
5700
5701 // Find end location.
5702 ssize_t end_offset = start_offset;
5703 while (end_offset < size) {
5704 if (memcmp(mem_base + end_offset, file_base + end_offset, PAGE_SIZE) != 0) {
5705 break;
5706 }
5707 end_offset += PAGE_SIZE;
5708 }
5709
5710 // Map pages.
5711 ssize_t map_length = end_offset - start_offset;
5712 ssize_t map_offset = *file_offset + start_offset;
5713 if (map_length > 0) {
5714 void *map = mmap(
5715 mem_base + start_offset, map_length, PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, map_offset);
5716 if (map == MAP_FAILED) {
5717 LD_LOGE("Error mapping relro %{public}s: failed to map GNU_RELRO", dso->name);
5718 munmap(ext_temp_map, ext_fd_file_size);
5719 return -1;
5720 }
5721 }
5722
5723 start_offset = end_offset;
5724 }
5725 *file_offset += size;
5726 munmap(ext_temp_map, ext_fd_file_size);
5727 return 0;
5728 }
5729
handle_relro_sharing(struct dso * p,const dl_extinfo * extinfo,ssize_t * relro_fd_offset)5730 static void handle_relro_sharing(struct dso *p, const dl_extinfo *extinfo, ssize_t *relro_fd_offset)
5731 {
5732 if (extinfo == NULL) {
5733 return;
5734 }
5735 if (extinfo->flag & DL_EXT_WRITE_RELRO) {
5736 LD_LOGD("Serializing GNU_RELRO %{public}s", p->name);
5737 if (serialize_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
5738 LD_LOGE("Error serializing GNU_RELRO %{public}s", p->name);
5739 error("Error serializing GNU_RELRO");
5740 if (runtime) longjmp(*rtld_fail, 1);
5741 }
5742 } else if (extinfo->flag & DL_EXT_USE_RELRO) {
5743 LD_LOGD("Mapping GNU_RELRO %{public}s", p->name);
5744 if (map_gnu_relro(extinfo->relro_fd, p, relro_fd_offset) < 0) {
5745 LD_LOGE("Error mapping GNU_RELRO %{public}s", p->name);
5746 error("Error mapping GNU_RELRO");
5747 if (runtime) longjmp(*rtld_fail, 1);
5748 }
5749 }
5750 }
5751
set_bss_vma_name(char * path_name,void * addr,size_t zeromap_size)5752 static void set_bss_vma_name(char *path_name, void *addr, size_t zeromap_size)
5753 {
5754 char so_bss_name[ANON_NAME_MAX_LEN];
5755 if (path_name == NULL) {
5756 snprintf(so_bss_name, ANON_NAME_MAX_LEN, ".bss");
5757 } else {
5758 char *t = strrchr(path_name, '/');
5759 if (t) {
5760 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", ++t);
5761 } else {
5762 snprintf(so_bss_name, ANON_NAME_MAX_LEN, "%s.bss", path_name);
5763 }
5764 }
5765
5766 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, zeromap_size, so_bss_name);
5767 }
5768
find_and_set_bss_name(struct dso * p)5769 static void find_and_set_bss_name(struct dso *p)
5770 {
5771 size_t cnt;
5772 Phdr *ph = p->phdr;
5773 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
5774 if (ph->p_type != PT_LOAD) continue;
5775 size_t seg_start = p->base + ph->p_vaddr;
5776 size_t seg_file_end = seg_start + ph->p_filesz + PAGE_SIZE - 1 & -PAGE_SIZE;
5777 size_t seg_max_addr = seg_start + ph->p_memsz + PAGE_SIZE - 1 & -PAGE_SIZE;
5778 size_t zeromap_size = seg_max_addr - seg_file_end;
5779 if (zeromap_size > 0 && (ph->p_flags & PF_W)) {
5780 set_bss_vma_name(p->name, (void *)seg_file_end, zeromap_size);
5781 }
5782 }
5783 }
5784
sync_with_debugger(void)5785 static void sync_with_debugger(void)
5786 {
5787 debug.ver = 1;
5788 debug.bp = dl_debug_state;
5789 debug.head = NULL;
5790 debug.base = ldso.base;
5791
5792 add_dso_info_to_debug_map(head);
5793
5794 debug.state = RT_CONSISTENT;
5795 _dl_debug_state();
5796 }
5797
notify_addition_to_debugger(struct dso * p)5798 static void notify_addition_to_debugger(struct dso *p)
5799 {
5800 debug.state = RT_ADD;
5801 _dl_debug_state();
5802
5803 add_dso_info_to_debug_map(p);
5804
5805 debug.state = RT_CONSISTENT;
5806 _dl_debug_state();
5807 }
5808
notify_remove_to_debugger(struct dso * p)5809 static void notify_remove_to_debugger(struct dso *p)
5810 {
5811 debug.state = RT_DELETE;
5812 _dl_debug_state();
5813
5814 remove_dso_info_from_debug_map(p);
5815
5816 debug.state = RT_CONSISTENT;
5817 _dl_debug_state();
5818 }
5819
add_dso_info_to_debug_map(struct dso * p)5820 static void add_dso_info_to_debug_map(struct dso *p)
5821 {
5822 for (struct dso *so = p; so != NULL; so = so->next) {
5823 struct dso_debug_info *debug_info = malloc(sizeof(struct dso_debug_info));
5824 if (debug_info == NULL) {
5825 LD_LOGE("malloc error! dso name: %{public}s.", so->name);
5826 continue;
5827 }
5828 #if DL_FDPIC
5829 debug_info->loadmap = so->loadmap;
5830 #else
5831 debug_info->base = so->base;
5832 #endif
5833 debug_info->name = so->name;
5834 debug_info->dynv = so->dynv;
5835 if (debug.head == NULL) {
5836 debug_info->prev = NULL;
5837 debug_info->next = NULL;
5838 debug.head = debug_tail = debug_info;
5839 } else {
5840 debug_info->prev = debug_tail;
5841 debug_info->next = NULL;
5842 debug_tail->next = debug_info;
5843 debug_tail = debug_info;
5844 }
5845 so->debug_info = debug_info;
5846 }
5847 }
5848
remove_dso_info_from_debug_map(struct dso * p)5849 static void remove_dso_info_from_debug_map(struct dso *p)
5850 {
5851 struct dso_debug_info *debug_info = p->debug_info;
5852 if (debug_info == debug_tail) {
5853 debug_tail = debug_tail->prev;
5854 debug_tail->next = NULL;
5855 } else {
5856 debug_info->next->prev = debug_info->prev;
5857 debug_info->prev->next = debug_info->next;
5858 }
5859 free(debug_info);
5860 }
5861