1 //===-- sanitizer_common_libcdep.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
12
13 #include "sanitizer_allocator_interface.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_flags.h"
16 #include "sanitizer_procmaps.h"
17
18
19 namespace __sanitizer {
20
21 static void (*SoftRssLimitExceededCallback)(bool exceeded);
SetSoftRssLimitExceededCallback(void (* Callback)(bool exceeded))22 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
23 CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
24 SoftRssLimitExceededCallback = Callback;
25 }
26
27 #if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
28 // Weak default implementation for when sanitizer_stackdepot is not linked in.
StackDepotGetStats()29 SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
30 return nullptr;
31 }
32
BackgroundThread(void * arg)33 void *BackgroundThread(void *arg) {
34 const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
35 const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
36 const bool heap_profile = common_flags()->heap_profile;
37 uptr prev_reported_rss = 0;
38 uptr prev_reported_stack_depot_size = 0;
39 bool reached_soft_rss_limit = false;
40 uptr rss_during_last_reported_profile = 0;
41 while (true) {
42 SleepForMillis(100);
43 const uptr current_rss_mb = GetRSS() >> 20;
44 if (Verbosity()) {
45 // If RSS has grown 10% since last time, print some information.
46 if (prev_reported_rss * 11 / 10 < current_rss_mb) {
47 Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
48 prev_reported_rss = current_rss_mb;
49 }
50 // If stack depot has grown 10% since last time, print it too.
51 StackDepotStats *stack_depot_stats = StackDepotGetStats();
52 if (stack_depot_stats) {
53 if (prev_reported_stack_depot_size * 11 / 10 <
54 stack_depot_stats->allocated) {
55 Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
56 SanitizerToolName,
57 stack_depot_stats->n_uniq_ids,
58 stack_depot_stats->allocated >> 20);
59 prev_reported_stack_depot_size = stack_depot_stats->allocated;
60 }
61 }
62 }
63 // Check RSS against the limit.
64 if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
65 Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
66 SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
67 DumpProcessMap();
68 Die();
69 }
70 if (soft_rss_limit_mb) {
71 if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
72 reached_soft_rss_limit = true;
73 Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
74 SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
75 if (SoftRssLimitExceededCallback)
76 SoftRssLimitExceededCallback(true);
77 } else if (soft_rss_limit_mb >= current_rss_mb &&
78 reached_soft_rss_limit) {
79 reached_soft_rss_limit = false;
80 if (SoftRssLimitExceededCallback)
81 SoftRssLimitExceededCallback(false);
82 }
83 }
84 if (heap_profile &&
85 current_rss_mb > rss_during_last_reported_profile * 1.1) {
86 Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
87 __sanitizer_print_memory_profile(90, 20);
88 rss_during_last_reported_profile = current_rss_mb;
89 }
90 }
91 }
92 #endif
93
WriteToSyslog(const char * msg)94 void WriteToSyslog(const char *msg) {
95 InternalScopedString msg_copy(kErrorMessageBufferSize);
96 msg_copy.append("%s", msg);
97 char *p = msg_copy.data();
98 char *q;
99
100 // Print one line at a time.
101 // syslog, at least on Android, has an implicit message length limit.
102 while ((q = internal_strchr(p, '\n'))) {
103 *q = '\0';
104 WriteOneLineToSyslog(p);
105 p = q + 1;
106 }
107 // Print remaining characters, if there are any.
108 // Note that this will add an extra newline at the end.
109 // FIXME: buffer extra output. This would need a thread-local buffer, which
110 // on Android requires plugging into the tools (ex. ASan's) Thread class.
111 if (*p)
112 WriteOneLineToSyslog(p);
113 }
114
MaybeStartBackgroudThread()115 void MaybeStartBackgroudThread() {
116 #if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
117 !SANITIZER_GO // Need to implement/test on other platforms.
118 // Start the background thread if one of the rss limits is given.
119 if (!common_flags()->hard_rss_limit_mb &&
120 !common_flags()->soft_rss_limit_mb &&
121 !common_flags()->heap_profile) return;
122 if (!&real_pthread_create) return; // Can't spawn the thread anyway.
123 internal_start_thread(BackgroundThread, nullptr);
124 #endif
125 }
126
127 static void (*sandboxing_callback)();
SetSandboxingCallback(void (* f)())128 void SetSandboxingCallback(void (*f)()) {
129 sandboxing_callback = f;
130 }
131
InitAligned(uptr size,uptr align,const char * name)132 uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
133 const char *name) {
134 CHECK(IsPowerOfTwo(align));
135 if (align <= GetPageSizeCached())
136 return Init(size, name);
137 uptr start = Init(size + align, name);
138 start += align - (start & (align - 1));
139 return start;
140 }
141
142 #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
143
144 // Reserve memory range [beg, end].
145 // We need to use inclusive range because end+1 may not be representable.
ReserveShadowMemoryRange(uptr beg,uptr end,const char * name,bool madvise_shadow)146 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
147 bool madvise_shadow) {
148 CHECK_EQ((beg % GetMmapGranularity()), 0);
149 CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
150 uptr size = end - beg + 1;
151 DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
152 if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
153 : !MmapFixedNoReserve(beg, size, name)) {
154 Report(
155 "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
156 "Perhaps you're using ulimit -v\n",
157 size);
158 Abort();
159 }
160 if (madvise_shadow && common_flags()->use_madv_dontdump)
161 DontDumpShadowMemory(beg, size);
162 }
163
ProtectGap(uptr addr,uptr size,uptr zero_base_shadow_start,uptr zero_base_max_shadow_start)164 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
165 uptr zero_base_max_shadow_start) {
166 if (!size)
167 return;
168 void *res = MmapFixedNoAccess(addr, size, "shadow gap");
169 if (addr == (uptr)res)
170 return;
171 // A few pages at the start of the address space can not be protected.
172 // But we really want to protect as much as possible, to prevent this memory
173 // being returned as a result of a non-FIXED mmap().
174 if (addr == zero_base_shadow_start) {
175 uptr step = GetMmapGranularity();
176 while (size > step && addr < zero_base_max_shadow_start) {
177 addr += step;
178 size -= step;
179 void *res = MmapFixedNoAccess(addr, size, "shadow gap");
180 if (addr == (uptr)res)
181 return;
182 }
183 }
184
185 Report(
186 "ERROR: Failed to protect the shadow gap. "
187 "%s cannot proceed correctly. ABORTING.\n",
188 SanitizerToolName);
189 DumpProcessMap();
190 Die();
191 }
192
193 #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
194
195 } // namespace __sanitizer
196
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_sandbox_on_notify,__sanitizer_sandbox_arguments * args)197 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
198 __sanitizer_sandbox_arguments *args) {
199 __sanitizer::PlatformPrepareForSandboxing(args);
200 if (__sanitizer::sandboxing_callback)
201 __sanitizer::sandboxing_callback();
202 }
203