1 //===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements linux-specific functions from
12 // sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX
17
18 #include "sanitizer_common.h"
19 #include "sanitizer_flags.h"
20 #include "sanitizer_linux.h"
21 #include "sanitizer_placement_new.h"
22 #include "sanitizer_procmaps.h"
23 #include "sanitizer_stacktrace.h"
24 #include "sanitizer_atomic.h"
25 #include "sanitizer_symbolizer.h"
26
27 #include <dlfcn.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <sys/resource.h>
31 #if SANITIZER_FREEBSD
32 #define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
33 #endif
34 #include <unwind.h>
35
36 #if SANITIZER_FREEBSD
37 #include <pthread_np.h>
38 #define pthread_getattr_np pthread_attr_get_np
39 #endif
40
41 #if SANITIZER_LINUX
42 #include <sys/prctl.h>
43 #endif
44
45 #if !SANITIZER_ANDROID
46 #include <elf.h>
47 #include <link.h>
48 #include <unistd.h>
49 #endif
50
51 namespace __sanitizer {
52
53 // This function is defined elsewhere if we intercepted pthread_attr_getstack.
54 extern "C" {
55 SANITIZER_WEAK_ATTRIBUTE int
56 real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
57 } // extern "C"
58
my_pthread_attr_getstack(void * attr,void ** addr,size_t * size)59 static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
60 if (real_pthread_attr_getstack)
61 return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
62 return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
63 }
64
65 SANITIZER_WEAK_ATTRIBUTE int
66 real_sigaction(int signum, const void *act, void *oldact);
67
internal_sigaction(int signum,const void * act,void * oldact)68 int internal_sigaction(int signum, const void *act, void *oldact) {
69 if (real_sigaction)
70 return real_sigaction(signum, act, oldact);
71 return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact);
72 }
73
GetThreadStackTopAndBottom(bool at_initialization,uptr * stack_top,uptr * stack_bottom)74 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
75 uptr *stack_bottom) {
76 CHECK(stack_top);
77 CHECK(stack_bottom);
78 if (at_initialization) {
79 // This is the main thread. Libpthread may not be initialized yet.
80 struct rlimit rl;
81 CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
82
83 // Find the mapping that contains a stack variable.
84 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
85 uptr start, end, offset;
86 uptr prev_end = 0;
87 while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
88 if ((uptr)&rl < end)
89 break;
90 prev_end = end;
91 }
92 CHECK((uptr)&rl >= start && (uptr)&rl < end);
93
94 // Get stacksize from rlimit, but clip it so that it does not overlap
95 // with other mappings.
96 uptr stacksize = rl.rlim_cur;
97 if (stacksize > end - prev_end)
98 stacksize = end - prev_end;
99 // When running with unlimited stack size, we still want to set some limit.
100 // The unlimited stack size is caused by 'ulimit -s unlimited'.
101 // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
102 if (stacksize > kMaxThreadStackSize)
103 stacksize = kMaxThreadStackSize;
104 *stack_top = end;
105 *stack_bottom = end - stacksize;
106 return;
107 }
108 pthread_attr_t attr;
109 pthread_attr_init(&attr);
110 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
111 uptr stacksize = 0;
112 void *stackaddr = 0;
113 my_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
114 pthread_attr_destroy(&attr);
115
116 CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
117 *stack_top = (uptr)stackaddr + stacksize;
118 *stack_bottom = (uptr)stackaddr;
119 }
120
SetEnv(const char * name,const char * value)121 bool SetEnv(const char *name, const char *value) {
122 void *f = dlsym(RTLD_NEXT, "setenv");
123 if (f == 0)
124 return false;
125 typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
126 setenv_ft setenv_f;
127 CHECK_EQ(sizeof(setenv_f), sizeof(f));
128 internal_memcpy(&setenv_f, &f, sizeof(f));
129 return IndirectExternCall(setenv_f)(name, value, 1) == 0;
130 }
131
SanitizerSetThreadName(const char * name)132 bool SanitizerSetThreadName(const char *name) {
133 #ifdef PR_SET_NAME
134 return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
135 #else
136 return false;
137 #endif
138 }
139
SanitizerGetThreadName(char * name,int max_len)140 bool SanitizerGetThreadName(char *name, int max_len) {
141 #ifdef PR_GET_NAME
142 char buff[17];
143 if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
144 return false;
145 internal_strncpy(name, buff, max_len);
146 name[max_len] = 0;
147 return true;
148 #else
149 return false;
150 #endif
151 }
152
153 //------------------------- SlowUnwindStack -----------------------------------
154
155 typedef struct {
156 uptr absolute_pc;
157 uptr stack_top;
158 uptr stack_size;
159 } backtrace_frame_t;
160
161 extern "C" {
162 typedef void *(*acquire_my_map_info_list_func)();
163 typedef void (*release_my_map_info_list_func)(void *map);
164 typedef sptr (*unwind_backtrace_signal_arch_func)(
165 void *siginfo, void *sigcontext, void *map_info_list,
166 backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
167 acquire_my_map_info_list_func acquire_my_map_info_list;
168 release_my_map_info_list_func release_my_map_info_list;
169 unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
170 } // extern "C"
171
172 #if SANITIZER_ANDROID
SanitizerInitializeUnwinder()173 void SanitizerInitializeUnwinder() {
174 void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
175 if (!p) {
176 VReport(1,
177 "Failed to open libcorkscrew.so. You may see broken stack traces "
178 "in SEGV reports.");
179 return;
180 }
181 acquire_my_map_info_list =
182 (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
183 release_my_map_info_list =
184 (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
185 unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
186 p, "unwind_backtrace_signal_arch");
187 if (!acquire_my_map_info_list || !release_my_map_info_list ||
188 !unwind_backtrace_signal_arch) {
189 VReport(1,
190 "Failed to find one of the required symbols in libcorkscrew.so. "
191 "You may see broken stack traces in SEGV reports.");
192 acquire_my_map_info_list = NULL;
193 unwind_backtrace_signal_arch = NULL;
194 release_my_map_info_list = NULL;
195 }
196 }
197 #endif
198
199 #ifdef __arm__
200 #define UNWIND_STOP _URC_END_OF_STACK
201 #define UNWIND_CONTINUE _URC_NO_REASON
202 #else
203 #define UNWIND_STOP _URC_NORMAL_STOP
204 #define UNWIND_CONTINUE _URC_NO_REASON
205 #endif
206
Unwind_GetIP(struct _Unwind_Context * ctx)207 uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
208 #ifdef __arm__
209 uptr val;
210 _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
211 15 /* r15 = PC */, _UVRSD_UINT32, &val);
212 CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
213 // Clear the Thumb bit.
214 return val & ~(uptr)1;
215 #else
216 return _Unwind_GetIP(ctx);
217 #endif
218 }
219
220 struct UnwindTraceArg {
221 StackTrace *stack;
222 uptr max_depth;
223 };
224
Unwind_Trace(struct _Unwind_Context * ctx,void * param)225 _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
226 UnwindTraceArg *arg = (UnwindTraceArg*)param;
227 CHECK_LT(arg->stack->size, arg->max_depth);
228 uptr pc = Unwind_GetIP(ctx);
229 arg->stack->trace[arg->stack->size++] = pc;
230 if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
231 return UNWIND_CONTINUE;
232 }
233
SlowUnwindStack(uptr pc,uptr max_depth)234 void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
235 CHECK_GE(max_depth, 2);
236 size = 0;
237 UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
238 _Unwind_Backtrace(Unwind_Trace, &arg);
239 // We need to pop a few frames so that pc is on top.
240 uptr to_pop = LocatePcInTrace(pc);
241 // trace[0] belongs to the current function so we always pop it.
242 if (to_pop == 0)
243 to_pop = 1;
244 PopStackFrames(to_pop);
245 trace[0] = pc;
246 }
247
SlowUnwindStackWithContext(uptr pc,void * context,uptr max_depth)248 void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
249 uptr max_depth) {
250 CHECK_GE(max_depth, 2);
251 if (!unwind_backtrace_signal_arch) {
252 SlowUnwindStack(pc, max_depth);
253 return;
254 }
255
256 void *map = acquire_my_map_info_list();
257 CHECK(map);
258 InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
259 // siginfo argument appears to be unused.
260 sptr res = unwind_backtrace_signal_arch(/* siginfo */ NULL, context, map,
261 frames.data(),
262 /* ignore_depth */ 0, max_depth);
263 release_my_map_info_list(map);
264 if (res < 0) return;
265 CHECK_LE((uptr)res, kStackTraceMax);
266
267 size = 0;
268 // +2 compensate for libcorkscrew unwinder returning addresses of call
269 // instructions instead of raw return addresses.
270 for (sptr i = 0; i < res; ++i)
271 trace[size++] = frames[i].absolute_pc + 2;
272 }
273
274 #if !SANITIZER_FREEBSD
275 static uptr g_tls_size;
276 #endif
277
278 #ifdef __i386__
279 # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
280 #else
281 # define DL_INTERNAL_FUNCTION
282 #endif
283
InitTlsSize()284 void InitTlsSize() {
285 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
286 typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
287 get_tls_func get_tls;
288 void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
289 CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
290 internal_memcpy(&get_tls, &get_tls_static_info_ptr,
291 sizeof(get_tls_static_info_ptr));
292 CHECK_NE(get_tls, 0);
293 size_t tls_size = 0;
294 size_t tls_align = 0;
295 IndirectExternCall(get_tls)(&tls_size, &tls_align);
296 g_tls_size = tls_size;
297 #endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
298 }
299
300 #if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
301 // sizeof(struct thread) from glibc.
302 static atomic_uintptr_t kThreadDescriptorSize;
303
ThreadDescriptorSize()304 uptr ThreadDescriptorSize() {
305 uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
306 if (val)
307 return val;
308 #ifdef _CS_GNU_LIBC_VERSION
309 char buf[64];
310 uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
311 if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
312 char *end;
313 int minor = internal_simple_strtoll(buf + 8, &end, 10);
314 if (end != buf + 8 && (*end == '\0' || *end == '.')) {
315 /* sizeof(struct thread) values from various glibc versions. */
316 if (SANITIZER_X32)
317 val = 1728; // Assume only one particular version for x32.
318 else if (minor <= 3)
319 val = FIRST_32_SECOND_64(1104, 1696);
320 else if (minor == 4)
321 val = FIRST_32_SECOND_64(1120, 1728);
322 else if (minor == 5)
323 val = FIRST_32_SECOND_64(1136, 1728);
324 else if (minor <= 9)
325 val = FIRST_32_SECOND_64(1136, 1712);
326 else if (minor == 10)
327 val = FIRST_32_SECOND_64(1168, 1776);
328 else if (minor <= 12)
329 val = FIRST_32_SECOND_64(1168, 2288);
330 else
331 val = FIRST_32_SECOND_64(1216, 2304);
332 }
333 if (val)
334 atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
335 return val;
336 }
337 #endif
338 return 0;
339 }
340
341 // The offset at which pointer to self is located in the thread descriptor.
342 const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
343
ThreadSelfOffset()344 uptr ThreadSelfOffset() {
345 return kThreadSelfOffset;
346 }
347
ThreadSelf()348 uptr ThreadSelf() {
349 uptr descr_addr;
350 # if defined(__i386__)
351 asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
352 # elif defined(__x86_64__)
353 asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
354 # else
355 # error "unsupported CPU arch"
356 # endif
357 return descr_addr;
358 }
359 #endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
360
361 #if SANITIZER_FREEBSD
ThreadSelfSegbase()362 static void **ThreadSelfSegbase() {
363 void **segbase = 0;
364 # if defined(__i386__)
365 // sysarch(I386_GET_GSBASE, segbase);
366 __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
367 # elif defined(__x86_64__)
368 // sysarch(AMD64_GET_FSBASE, segbase);
369 __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
370 # else
371 # error "unsupported CPU arch for FreeBSD platform"
372 # endif
373 return segbase;
374 }
375
ThreadSelf()376 uptr ThreadSelf() {
377 return (uptr)ThreadSelfSegbase()[2];
378 }
379 #endif // SANITIZER_FREEBSD
380
GetTls(uptr * addr,uptr * size)381 static void GetTls(uptr *addr, uptr *size) {
382 #if SANITIZER_LINUX
383 # if defined(__x86_64__) || defined(__i386__)
384 *addr = ThreadSelf();
385 *size = GetTlsSize();
386 *addr -= *size;
387 *addr += ThreadDescriptorSize();
388 # else
389 *addr = 0;
390 *size = 0;
391 # endif
392 #elif SANITIZER_FREEBSD
393 void** segbase = ThreadSelfSegbase();
394 *addr = 0;
395 *size = 0;
396 if (segbase != 0) {
397 // tcbalign = 16
398 // tls_size = round(tls_static_space, tcbalign);
399 // dtv = segbase[1];
400 // dtv[2] = segbase - tls_static_space;
401 void **dtv = (void**) segbase[1];
402 *addr = (uptr) dtv[2];
403 *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
404 }
405 #else
406 # error "Unknown OS"
407 #endif
408 }
409
GetTlsSize()410 uptr GetTlsSize() {
411 #if SANITIZER_FREEBSD
412 uptr addr, size;
413 GetTls(&addr, &size);
414 return size;
415 #else
416 return g_tls_size;
417 #endif
418 }
419
GetThreadStackAndTls(bool main,uptr * stk_addr,uptr * stk_size,uptr * tls_addr,uptr * tls_size)420 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
421 uptr *tls_addr, uptr *tls_size) {
422 GetTls(tls_addr, tls_size);
423
424 uptr stack_top, stack_bottom;
425 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
426 *stk_addr = stack_bottom;
427 *stk_size = stack_top - stack_bottom;
428
429 if (!main) {
430 // If stack and tls intersect, make them non-intersecting.
431 if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
432 CHECK_GT(*tls_addr + *tls_size, *stk_addr);
433 CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
434 *stk_size -= *tls_size;
435 *tls_addr = *stk_addr + *stk_size;
436 }
437 }
438 }
439
AdjustStackSize(void * attr_)440 void AdjustStackSize(void *attr_) {
441 pthread_attr_t *attr = (pthread_attr_t *)attr_;
442 uptr stackaddr = 0;
443 size_t stacksize = 0;
444 my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
445 // GLibC will return (0 - stacksize) as the stack address in the case when
446 // stacksize is set, but stackaddr is not.
447 bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
448 // We place a lot of tool data into TLS, account for that.
449 const uptr minstacksize = GetTlsSize() + 128*1024;
450 if (stacksize < minstacksize) {
451 if (!stack_set) {
452 if (stacksize != 0) {
453 VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
454 minstacksize);
455 pthread_attr_setstacksize(attr, minstacksize);
456 }
457 } else {
458 Printf("Sanitizer: pre-allocated stack size is insufficient: "
459 "%zu < %zu\n", stacksize, minstacksize);
460 Printf("Sanitizer: pthread_create is likely to fail.\n");
461 }
462 }
463 }
464
465 #if SANITIZER_ANDROID
GetListOfModules(LoadedModule * modules,uptr max_modules,string_predicate_t filter)466 uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
467 string_predicate_t filter) {
468 MemoryMappingLayout memory_mapping(false);
469 return memory_mapping.DumpListOfModules(modules, max_modules, filter);
470 }
471 #else // SANITIZER_ANDROID
472 # if !SANITIZER_FREEBSD
473 typedef ElfW(Phdr) Elf_Phdr;
474 # endif
475
476 struct DlIteratePhdrData {
477 LoadedModule *modules;
478 uptr current_n;
479 bool first;
480 uptr max_n;
481 string_predicate_t filter;
482 };
483
dl_iterate_phdr_cb(dl_phdr_info * info,size_t size,void * arg)484 static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
485 DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
486 if (data->current_n == data->max_n)
487 return 0;
488 InternalScopedBuffer<char> module_name(kMaxPathLength);
489 module_name.data()[0] = '\0';
490 if (data->first) {
491 data->first = false;
492 // First module is the binary itself.
493 ReadBinaryName(module_name.data(), module_name.size());
494 } else if (info->dlpi_name) {
495 internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
496 }
497 if (module_name.data()[0] == '\0')
498 return 0;
499 if (data->filter && !data->filter(module_name.data()))
500 return 0;
501 void *mem = &data->modules[data->current_n];
502 LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
503 info->dlpi_addr);
504 data->current_n++;
505 for (int i = 0; i < info->dlpi_phnum; i++) {
506 const Elf_Phdr *phdr = &info->dlpi_phdr[i];
507 if (phdr->p_type == PT_LOAD) {
508 uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
509 uptr cur_end = cur_beg + phdr->p_memsz;
510 bool executable = phdr->p_flags & PF_X;
511 cur_module->addAddressRange(cur_beg, cur_end, executable);
512 }
513 }
514 return 0;
515 }
516
GetListOfModules(LoadedModule * modules,uptr max_modules,string_predicate_t filter)517 uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
518 string_predicate_t filter) {
519 CHECK(modules);
520 DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
521 dl_iterate_phdr(dl_iterate_phdr_cb, &data);
522 return data.current_n;
523 }
524 #endif // SANITIZER_ANDROID
525
526 uptr indirect_call_wrapper;
527
SetIndirectCallWrapper(uptr wrapper)528 void SetIndirectCallWrapper(uptr wrapper) {
529 CHECK(!indirect_call_wrapper);
530 CHECK(wrapper);
531 indirect_call_wrapper = wrapper;
532 }
533
PrepareForSandboxing(__sanitizer_sandbox_arguments * args)534 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
535 // Some kinds of sandboxes may forbid filesystem access, so we won't be able
536 // to read the file mappings from /proc/self/maps. Luckily, neither the
537 // process will be able to load additional libraries, so it's fine to use the
538 // cached mappings.
539 MemoryMappingLayout::CacheMemoryMappings();
540 // Same for /proc/self/exe in the symbolizer.
541 #if !SANITIZER_GO
542 if (Symbolizer *sym = Symbolizer::GetOrNull())
543 sym->PrepareForSandboxing();
544 CovPrepareForSandboxing(args);
545 #endif
546 }
547
548 } // namespace __sanitizer
549
550 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX
551