1 //===-- asan_poisoning.cc -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Shadow memory poisoning by ASan RTL and by user application.
13 //===----------------------------------------------------------------------===//
14
15 #include "asan_poisoning.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17
18 namespace __asan {
19
PoisonShadow(uptr addr,uptr size,u8 value)20 void PoisonShadow(uptr addr, uptr size, u8 value) {
21 if (!flags()->poison_heap) return;
22 CHECK(AddrIsAlignedByGranularity(addr));
23 CHECK(AddrIsInMem(addr));
24 CHECK(AddrIsAlignedByGranularity(addr + size));
25 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
26 CHECK(REAL(memset));
27 FastPoisonShadow(addr, size, value);
28 }
29
PoisonShadowPartialRightRedzone(uptr addr,uptr size,uptr redzone_size,u8 value)30 void PoisonShadowPartialRightRedzone(uptr addr,
31 uptr size,
32 uptr redzone_size,
33 u8 value) {
34 if (!flags()->poison_heap) return;
35 CHECK(AddrIsAlignedByGranularity(addr));
36 CHECK(AddrIsInMem(addr));
37 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
38 }
39
40 struct ShadowSegmentEndpoint {
41 u8 *chunk;
42 s8 offset; // in [0, SHADOW_GRANULARITY)
43 s8 value; // = *chunk;
44
ShadowSegmentEndpoint__asan::ShadowSegmentEndpoint45 explicit ShadowSegmentEndpoint(uptr address) {
46 chunk = (u8*)MemToShadow(address);
47 offset = address & (SHADOW_GRANULARITY - 1);
48 value = *chunk;
49 }
50 };
51
52 } // namespace __asan
53
54 // ---------------------- Interface ---------------- {{{1
55 using namespace __asan; // NOLINT
56
57 // Current implementation of __asan_(un)poison_memory_region doesn't check
58 // that user program (un)poisons the memory it owns. It poisons memory
59 // conservatively, and unpoisons progressively to make sure asan shadow
60 // mapping invariant is preserved (see detailed mapping description here:
61 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
62 //
63 // * if user asks to poison region [left, right), the program poisons
64 // at least [left, AlignDown(right)).
65 // * if user asks to unpoison region [left, right), the program unpoisons
66 // at most [AlignDown(left), right).
__asan_poison_memory_region(void const volatile * addr,uptr size)67 void __asan_poison_memory_region(void const volatile *addr, uptr size) {
68 if (!flags()->allow_user_poisoning || size == 0) return;
69 uptr beg_addr = (uptr)addr;
70 uptr end_addr = beg_addr + size;
71 if (flags()->verbosity >= 1) {
72 Printf("Trying to poison memory region [%p, %p)\n",
73 (void*)beg_addr, (void*)end_addr);
74 }
75 ShadowSegmentEndpoint beg(beg_addr);
76 ShadowSegmentEndpoint end(end_addr);
77 if (beg.chunk == end.chunk) {
78 CHECK(beg.offset < end.offset);
79 s8 value = beg.value;
80 CHECK(value == end.value);
81 // We can only poison memory if the byte in end.offset is unaddressable.
82 // No need to re-poison memory if it is poisoned already.
83 if (value > 0 && value <= end.offset) {
84 if (beg.offset > 0) {
85 *beg.chunk = Min(value, beg.offset);
86 } else {
87 *beg.chunk = kAsanUserPoisonedMemoryMagic;
88 }
89 }
90 return;
91 }
92 CHECK(beg.chunk < end.chunk);
93 if (beg.offset > 0) {
94 // Mark bytes from beg.offset as unaddressable.
95 if (beg.value == 0) {
96 *beg.chunk = beg.offset;
97 } else {
98 *beg.chunk = Min(beg.value, beg.offset);
99 }
100 beg.chunk++;
101 }
102 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
103 // Poison if byte in end.offset is unaddressable.
104 if (end.value > 0 && end.value <= end.offset) {
105 *end.chunk = kAsanUserPoisonedMemoryMagic;
106 }
107 }
108
__asan_unpoison_memory_region(void const volatile * addr,uptr size)109 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
110 if (!flags()->allow_user_poisoning || size == 0) return;
111 uptr beg_addr = (uptr)addr;
112 uptr end_addr = beg_addr + size;
113 if (flags()->verbosity >= 1) {
114 Printf("Trying to unpoison memory region [%p, %p)\n",
115 (void*)beg_addr, (void*)end_addr);
116 }
117 ShadowSegmentEndpoint beg(beg_addr);
118 ShadowSegmentEndpoint end(end_addr);
119 if (beg.chunk == end.chunk) {
120 CHECK(beg.offset < end.offset);
121 s8 value = beg.value;
122 CHECK(value == end.value);
123 // We unpoison memory bytes up to enbytes up to end.offset if it is not
124 // unpoisoned already.
125 if (value != 0) {
126 *beg.chunk = Max(value, end.offset);
127 }
128 return;
129 }
130 CHECK(beg.chunk < end.chunk);
131 if (beg.offset > 0) {
132 *beg.chunk = 0;
133 beg.chunk++;
134 }
135 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
136 if (end.offset > 0 && end.value != 0) {
137 *end.chunk = Max(end.value, end.offset);
138 }
139 }
140
__asan_address_is_poisoned(void const volatile * addr)141 bool __asan_address_is_poisoned(void const volatile *addr) {
142 return __asan::AddressIsPoisoned((uptr)addr);
143 }
144
__asan_region_is_poisoned(uptr beg,uptr size)145 uptr __asan_region_is_poisoned(uptr beg, uptr size) {
146 if (!size) return 0;
147 uptr end = beg + size;
148 if (!AddrIsInMem(beg)) return beg;
149 if (!AddrIsInMem(end)) return end;
150 uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
151 uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
152 uptr shadow_beg = MemToShadow(aligned_b);
153 uptr shadow_end = MemToShadow(aligned_e);
154 // First check the first and the last application bytes,
155 // then check the SHADOW_GRANULARITY-aligned region by calling
156 // mem_is_zero on the corresponding shadow.
157 if (!__asan::AddressIsPoisoned(beg) &&
158 !__asan::AddressIsPoisoned(end - 1) &&
159 (shadow_end <= shadow_beg ||
160 __sanitizer::mem_is_zero((const char *)shadow_beg,
161 shadow_end - shadow_beg)))
162 return 0;
163 // The fast check failed, so we have a poisoned byte somewhere.
164 // Find it slowly.
165 for (; beg < end; beg++)
166 if (__asan::AddressIsPoisoned(beg))
167 return beg;
168 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
169 return 0;
170 }
171
172 #define CHECK_SMALL_REGION(p, size, isWrite) \
173 do { \
174 uptr __p = reinterpret_cast<uptr>(p); \
175 uptr __size = size; \
176 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
177 __asan::AddressIsPoisoned(__p + __size - 1))) { \
178 GET_CURRENT_PC_BP_SP; \
179 uptr __bad = __asan_region_is_poisoned(__p, __size); \
180 __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
181 } \
182 } while (false); \
183
184
185 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load16(const uu16 * p)186 u16 __sanitizer_unaligned_load16(const uu16 *p) {
187 CHECK_SMALL_REGION(p, sizeof(*p), false);
188 return *p;
189 }
190
191 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load32(const uu32 * p)192 u32 __sanitizer_unaligned_load32(const uu32 *p) {
193 CHECK_SMALL_REGION(p, sizeof(*p), false);
194 return *p;
195 }
196
197 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load64(const uu64 * p)198 u64 __sanitizer_unaligned_load64(const uu64 *p) {
199 CHECK_SMALL_REGION(p, sizeof(*p), false);
200 return *p;
201 }
202
203 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store16(uu16 * p,u16 x)204 void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
205 CHECK_SMALL_REGION(p, sizeof(*p), true);
206 *p = x;
207 }
208
209 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store32(uu32 * p,u32 x)210 void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
211 CHECK_SMALL_REGION(p, sizeof(*p), true);
212 *p = x;
213 }
214
215 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store64(uu64 * p,u64 x)216 void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
217 CHECK_SMALL_REGION(p, sizeof(*p), true);
218 *p = x;
219 }
220
221 // This is a simplified version of __asan_(un)poison_memory_region, which
222 // assumes that left border of region to be poisoned is properly aligned.
PoisonAlignedStackMemory(uptr addr,uptr size,bool do_poison)223 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
224 if (size == 0) return;
225 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
226 PoisonShadow(addr, aligned_size,
227 do_poison ? kAsanStackUseAfterScopeMagic : 0);
228 if (size == aligned_size)
229 return;
230 s8 end_offset = (s8)(size - aligned_size);
231 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
232 s8 end_value = *shadow_end;
233 if (do_poison) {
234 // If possible, mark all the bytes mapping to last shadow byte as
235 // unaddressable.
236 if (end_value > 0 && end_value <= end_offset)
237 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
238 } else {
239 // If necessary, mark few first bytes mapping to last shadow byte
240 // as addressable
241 if (end_value != 0)
242 *shadow_end = Max(end_value, end_offset);
243 }
244 }
245
__asan_poison_stack_memory(uptr addr,uptr size)246 void __asan_poison_stack_memory(uptr addr, uptr size) {
247 if (flags()->verbosity > 0)
248 Report("poisoning: %p %zx\n", (void*)addr, size);
249 PoisonAlignedStackMemory(addr, size, true);
250 }
251
__asan_unpoison_stack_memory(uptr addr,uptr size)252 void __asan_unpoison_stack_memory(uptr addr, uptr size) {
253 if (flags()->verbosity > 0)
254 Report("unpoisoning: %p %zx\n", (void*)addr, size);
255 PoisonAlignedStackMemory(addr, size, false);
256 }
257