1 //===-- asan_noinst_test.cc ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
14
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_stack.h"
19 #include "asan_test_utils.h"
20 #include "asan_test_config.h"
21 #include "sanitizer/asan_interface.h"
22
23 #include <assert.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h> // for memset()
27 #include <algorithm>
28 #include <vector>
29 #include "gtest/gtest.h"
30
31 // Simple stand-alone pseudorandom number generator.
32 // Current algorithm is ANSI C linear congruential PRNG.
my_rand(u32 * state)33 static inline u32 my_rand(u32* state) {
34 return (*state = *state * 1103515245 + 12345) >> 16;
35 }
36
37 static u32 global_seed = 0;
38
39
TEST(AddressSanitizer,InternalSimpleDeathTest)40 TEST(AddressSanitizer, InternalSimpleDeathTest) {
41 EXPECT_DEATH(exit(1), "");
42 }
43
MallocStress(size_t n)44 static void MallocStress(size_t n) {
45 u32 seed = my_rand(&global_seed);
46 __asan::StackTrace stack1;
47 stack1.trace[0] = 0xa123;
48 stack1.trace[1] = 0xa456;
49 stack1.size = 2;
50
51 __asan::StackTrace stack2;
52 stack2.trace[0] = 0xb123;
53 stack2.trace[1] = 0xb456;
54 stack2.size = 2;
55
56 __asan::StackTrace stack3;
57 stack3.trace[0] = 0xc123;
58 stack3.trace[1] = 0xc456;
59 stack3.size = 2;
60
61 std::vector<void *> vec;
62 for (size_t i = 0; i < n; i++) {
63 if ((i % 3) == 0) {
64 if (vec.empty()) continue;
65 size_t idx = my_rand(&seed) % vec.size();
66 void *ptr = vec[idx];
67 vec[idx] = vec.back();
68 vec.pop_back();
69 __asan::asan_free(ptr, &stack1);
70 } else {
71 size_t size = my_rand(&seed) % 1000 + 1;
72 switch ((my_rand(&seed) % 128)) {
73 case 0: size += 1024; break;
74 case 1: size += 2048; break;
75 case 2: size += 4096; break;
76 }
77 size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
78 char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
79 vec.push_back(ptr);
80 ptr[0] = 0;
81 ptr[size-1] = 0;
82 ptr[size/2] = 0;
83 }
84 }
85 for (size_t i = 0; i < vec.size(); i++)
86 __asan::asan_free(vec[i], &stack3);
87 }
88
89
TEST(AddressSanitizer,NoInstMallocTest)90 TEST(AddressSanitizer, NoInstMallocTest) {
91 #ifdef __arm__
92 MallocStress(300000);
93 #else
94 MallocStress(1000000);
95 #endif
96 }
97
PrintShadow(const char * tag,uptr ptr,size_t size)98 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
99 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
100 uptr prev_shadow = 0;
101 for (sptr i = -32; i < (sptr)size + 32; i++) {
102 uptr shadow = __asan::MemToShadow(ptr + i);
103 if (i == 0 || i == (sptr)size)
104 fprintf(stderr, ".");
105 if (shadow != prev_shadow) {
106 prev_shadow = shadow;
107 fprintf(stderr, "%02x", (int)*(u8*)shadow);
108 }
109 }
110 fprintf(stderr, "\n");
111 }
112
TEST(AddressSanitizer,DISABLED_InternalPrintShadow)113 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
114 for (size_t size = 1; size <= 513; size++) {
115 char *ptr = new char[size];
116 PrintShadow("m", (uptr)ptr, size);
117 delete [] ptr;
118 PrintShadow("f", (uptr)ptr, size);
119 }
120 }
121
122 static uptr pc_array[] = {
123 #if __WORDSIZE == 64
124 0x7effbf756068ULL,
125 0x7effbf75e5abULL,
126 0x7effc0625b7cULL,
127 0x7effc05b8997ULL,
128 0x7effbf990577ULL,
129 0x7effbf990c56ULL,
130 0x7effbf992f3cULL,
131 0x7effbf950c22ULL,
132 0x7effc036dba0ULL,
133 0x7effc03638a3ULL,
134 0x7effc035be4aULL,
135 0x7effc0539c45ULL,
136 0x7effc0539a65ULL,
137 0x7effc03db9b3ULL,
138 0x7effc03db100ULL,
139 0x7effc037c7b8ULL,
140 0x7effc037bfffULL,
141 0x7effc038b777ULL,
142 0x7effc038021cULL,
143 0x7effc037c7d1ULL,
144 0x7effc037bfffULL,
145 0x7effc038b777ULL,
146 0x7effc038021cULL,
147 0x7effc037c7d1ULL,
148 0x7effc037bfffULL,
149 0x7effc038b777ULL,
150 0x7effc038021cULL,
151 0x7effc037c7d1ULL,
152 0x7effc037bfffULL,
153 0x7effc0520d26ULL,
154 0x7effc009ddffULL,
155 0x7effbf90bb50ULL,
156 0x7effbdddfa69ULL,
157 0x7effbdde1fe2ULL,
158 0x7effbdde2424ULL,
159 0x7effbdde27b3ULL,
160 0x7effbddee53bULL,
161 0x7effbdde1988ULL,
162 0x7effbdde0904ULL,
163 0x7effc106ce0dULL,
164 0x7effbcc3fa04ULL,
165 0x7effbcc3f6a4ULL,
166 0x7effbcc3e726ULL,
167 0x7effbcc40852ULL,
168 0x7effb681ec4dULL,
169 #endif // __WORDSIZE
170 0xB0B5E768,
171 0x7B682EC1,
172 0x367F9918,
173 0xAE34E13,
174 0xBA0C6C6,
175 0x13250F46,
176 0xA0D6A8AB,
177 0x2B07C1A8,
178 0x6C844F4A,
179 0x2321B53,
180 0x1F3D4F8F,
181 0x3FE2924B,
182 0xB7A2F568,
183 0xBD23950A,
184 0x61020930,
185 0x33E7970C,
186 0x405998A1,
187 0x59F3551D,
188 0x350E3028,
189 0xBC55A28D,
190 0x361F3AED,
191 0xBEAD0F73,
192 0xAEF28479,
193 0x757E971F,
194 0xAEBA450,
195 0x43AD22F5,
196 0x8C2C50C4,
197 0x7AD8A2E1,
198 0x69EE4EE8,
199 0xC08DFF,
200 0x4BA6538,
201 0x3708AB2,
202 0xC24B6475,
203 0x7C8890D7,
204 0x6662495F,
205 0x9B641689,
206 0xD3596B,
207 0xA1049569,
208 0x44CBC16,
209 0x4D39C39F
210 };
211
CompressStackTraceTest(size_t n_iter)212 void CompressStackTraceTest(size_t n_iter) {
213 u32 seed = my_rand(&global_seed);
214 const size_t kNumPcs = ARRAY_SIZE(pc_array);
215 u32 compressed[2 * kNumPcs];
216
217 for (size_t iter = 0; iter < n_iter; iter++) {
218 std::random_shuffle(pc_array, pc_array + kNumPcs);
219 __asan::StackTrace stack0, stack1;
220 stack0.CopyFrom(pc_array, kNumPcs);
221 stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
222 size_t compress_size =
223 std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
224 size_t n_frames =
225 __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
226 Ident(n_frames);
227 assert(n_frames <= stack0.size);
228 __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
229 assert(stack1.size == n_frames);
230 for (size_t i = 0; i < stack1.size; i++) {
231 assert(stack0.trace[i] == stack1.trace[i]);
232 }
233 }
234 }
235
TEST(AddressSanitizer,CompressStackTraceTest)236 TEST(AddressSanitizer, CompressStackTraceTest) {
237 CompressStackTraceTest(10000);
238 }
239
CompressStackTraceBenchmark(size_t n_iter)240 void CompressStackTraceBenchmark(size_t n_iter) {
241 const size_t kNumPcs = ARRAY_SIZE(pc_array);
242 u32 compressed[2 * kNumPcs];
243 std::random_shuffle(pc_array, pc_array + kNumPcs);
244
245 __asan::StackTrace stack0;
246 stack0.CopyFrom(pc_array, kNumPcs);
247 stack0.size = kNumPcs;
248 for (size_t iter = 0; iter < n_iter; iter++) {
249 size_t compress_size = kNumPcs;
250 size_t n_frames =
251 __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
252 Ident(n_frames);
253 }
254 }
255
TEST(AddressSanitizer,CompressStackTraceBenchmark)256 TEST(AddressSanitizer, CompressStackTraceBenchmark) {
257 CompressStackTraceBenchmark(1 << 24);
258 }
259
TEST(AddressSanitizer,QuarantineTest)260 TEST(AddressSanitizer, QuarantineTest) {
261 __asan::StackTrace stack;
262 stack.trace[0] = 0x890;
263 stack.size = 1;
264
265 const int size = 32;
266 void *p = __asan::asan_malloc(size, &stack);
267 __asan::asan_free(p, &stack);
268 size_t i;
269 size_t max_i = 1 << 30;
270 for (i = 0; i < max_i; i++) {
271 void *p1 = __asan::asan_malloc(size, &stack);
272 __asan::asan_free(p1, &stack);
273 if (p1 == p) break;
274 }
275 // fprintf(stderr, "i=%ld\n", i);
276 EXPECT_GE(i, 100000U);
277 EXPECT_LT(i, max_i);
278 }
279
ThreadedQuarantineTestWorker(void * unused)280 void *ThreadedQuarantineTestWorker(void *unused) {
281 (void)unused;
282 u32 seed = my_rand(&global_seed);
283 __asan::StackTrace stack;
284 stack.trace[0] = 0x890;
285 stack.size = 1;
286
287 for (size_t i = 0; i < 1000; i++) {
288 void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
289 __asan::asan_free(p, &stack);
290 }
291 return NULL;
292 }
293
294 // Check that the thread local allocators are flushed when threads are
295 // destroyed.
TEST(AddressSanitizer,ThreadedQuarantineTest)296 TEST(AddressSanitizer, ThreadedQuarantineTest) {
297 const int n_threads = 3000;
298 size_t mmaped1 = __asan_get_heap_size();
299 for (int i = 0; i < n_threads; i++) {
300 pthread_t t;
301 pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
302 pthread_join(t, 0);
303 size_t mmaped2 = __asan_get_heap_size();
304 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
305 }
306 }
307
ThreadedOneSizeMallocStress(void * unused)308 void *ThreadedOneSizeMallocStress(void *unused) {
309 (void)unused;
310 __asan::StackTrace stack;
311 stack.trace[0] = 0x890;
312 stack.size = 1;
313 const size_t kNumMallocs = 1000;
314 for (int iter = 0; iter < 1000; iter++) {
315 void *p[kNumMallocs];
316 for (size_t i = 0; i < kNumMallocs; i++) {
317 p[i] = __asan::asan_malloc(32, &stack);
318 }
319 for (size_t i = 0; i < kNumMallocs; i++) {
320 __asan::asan_free(p[i], &stack);
321 }
322 }
323 return NULL;
324 }
325
TEST(AddressSanitizer,ThreadedOneSizeMallocStressTest)326 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
327 const int kNumThreads = 4;
328 pthread_t t[kNumThreads];
329 for (int i = 0; i < kNumThreads; i++) {
330 pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
331 }
332 for (int i = 0; i < kNumThreads; i++) {
333 pthread_join(t[i], 0);
334 }
335 }
336
TEST(AddressSanitizer,MemsetWildAddressTest)337 TEST(AddressSanitizer, MemsetWildAddressTest) {
338 typedef void*(*memset_p)(void*, int, size_t);
339 // Prevent inlining of memset().
340 volatile memset_p libc_memset = (memset_p)memset;
341 EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
342 "unknown-crash.*low shadow");
343 EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
344 "unknown-crash.*shadow gap");
345 EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
346 "unknown-crash.*high shadow");
347 }
348
TEST(AddressSanitizerInterface,GetEstimatedAllocatedSize)349 TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
350 EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
351 const size_t sizes[] = { 1, 30, 1<<30 };
352 for (size_t i = 0; i < 3; i++) {
353 EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
354 }
355 }
356
357 static const char* kGetAllocatedSizeErrorMsg =
358 "attempting to call __asan_get_allocated_size()";
359
TEST(AddressSanitizerInterface,GetAllocatedSizeAndOwnershipTest)360 TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
361 const size_t kArraySize = 100;
362 char *array = Ident((char*)malloc(kArraySize));
363 int *int_ptr = Ident(new int);
364
365 // Allocated memory is owned by allocator. Allocated size should be
366 // equal to requested size.
367 EXPECT_EQ(true, __asan_get_ownership(array));
368 EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
369 EXPECT_EQ(true, __asan_get_ownership(int_ptr));
370 EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
371
372 // We cannot call GetAllocatedSize from the memory we didn't map,
373 // and from the interior pointers (not returned by previous malloc).
374 void *wild_addr = (void*)0x1;
375 EXPECT_EQ(false, __asan_get_ownership(wild_addr));
376 EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
377 EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
378 EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
379 kGetAllocatedSizeErrorMsg);
380
381 // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
382 EXPECT_EQ(false, __asan_get_ownership(NULL));
383 EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
384
385 // When memory is freed, it's not owned, and call to GetAllocatedSize
386 // is forbidden.
387 free(array);
388 EXPECT_EQ(false, __asan_get_ownership(array));
389 EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
390
391 delete int_ptr;
392 }
393
TEST(AddressSanitizerInterface,GetCurrentAllocatedBytesTest)394 TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
395 size_t before_malloc, after_malloc, after_free;
396 char *array;
397 const size_t kMallocSize = 100;
398 before_malloc = __asan_get_current_allocated_bytes();
399
400 array = Ident((char*)malloc(kMallocSize));
401 after_malloc = __asan_get_current_allocated_bytes();
402 EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
403
404 free(array);
405 after_free = __asan_get_current_allocated_bytes();
406 EXPECT_EQ(before_malloc, after_free);
407 }
408
DoDoubleFree()409 static void DoDoubleFree() {
410 int *x = Ident(new int);
411 delete Ident(x);
412 delete Ident(x);
413 }
414
415 // This test is run in a separate process, so that large malloced
416 // chunk won't remain in the free lists after the test.
417 // Note: use ASSERT_* instead of EXPECT_* here.
RunGetHeapSizeTestAndDie()418 static void RunGetHeapSizeTestAndDie() {
419 size_t old_heap_size, new_heap_size, heap_growth;
420 // We unlikely have have chunk of this size in free list.
421 static const size_t kLargeMallocSize = 1 << 29; // 512M
422 old_heap_size = __asan_get_heap_size();
423 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
424 free(Ident(malloc(kLargeMallocSize)));
425 new_heap_size = __asan_get_heap_size();
426 heap_growth = new_heap_size - old_heap_size;
427 fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
428 ASSERT_GE(heap_growth, kLargeMallocSize);
429 ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
430
431 // Now large chunk should fall into free list, and can be
432 // allocated without increasing heap size.
433 old_heap_size = new_heap_size;
434 free(Ident(malloc(kLargeMallocSize)));
435 heap_growth = __asan_get_heap_size() - old_heap_size;
436 fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
437 ASSERT_LT(heap_growth, kLargeMallocSize);
438
439 // Test passed. Now die with expected double-free.
440 DoDoubleFree();
441 }
442
TEST(AddressSanitizerInterface,GetHeapSizeTest)443 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
444 EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
445 }
446
447 // Note: use ASSERT_* instead of EXPECT_* here.
DoLargeMallocForGetFreeBytesTestAndDie()448 static void DoLargeMallocForGetFreeBytesTestAndDie() {
449 size_t old_free_bytes, new_free_bytes;
450 static const size_t kLargeMallocSize = 1 << 29; // 512M
451 // If we malloc and free a large memory chunk, it will not fall
452 // into quarantine and will be available for future requests.
453 old_free_bytes = __asan_get_free_bytes();
454 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
455 fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
456 free(Ident(malloc(kLargeMallocSize)));
457 new_free_bytes = __asan_get_free_bytes();
458 fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
459 ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
460 // Test passed.
461 DoDoubleFree();
462 }
463
TEST(AddressSanitizerInterface,GetFreeBytesTest)464 TEST(AddressSanitizerInterface, GetFreeBytesTest) {
465 static const size_t kNumOfChunks = 100;
466 static const size_t kChunkSize = 100;
467 char *chunks[kNumOfChunks];
468 size_t i;
469 size_t old_free_bytes, new_free_bytes;
470 // Allocate a small chunk. Now allocator probably has a lot of these
471 // chunks to fulfill future requests. So, future requests will decrease
472 // the number of free bytes.
473 chunks[0] = Ident((char*)malloc(kChunkSize));
474 old_free_bytes = __asan_get_free_bytes();
475 for (i = 1; i < kNumOfChunks; i++) {
476 chunks[i] = Ident((char*)malloc(kChunkSize));
477 new_free_bytes = __asan_get_free_bytes();
478 EXPECT_LT(new_free_bytes, old_free_bytes);
479 old_free_bytes = new_free_bytes;
480 }
481 EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
482 }
483
484 static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
485 static const size_t kManyThreadsIterations = 250;
486 static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
487
ManyThreadsWithStatsWorker(void * arg)488 void *ManyThreadsWithStatsWorker(void *arg) {
489 (void)arg;
490 for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
491 for (size_t size_index = 0; size_index < 4; size_index++) {
492 free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
493 }
494 }
495 return 0;
496 }
497
TEST(AddressSanitizerInterface,ManyThreadsWithStatsStressTest)498 TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
499 size_t before_test, after_test, i;
500 pthread_t threads[kManyThreadsNumThreads];
501 before_test = __asan_get_current_allocated_bytes();
502 for (i = 0; i < kManyThreadsNumThreads; i++) {
503 pthread_create(&threads[i], 0,
504 (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
505 }
506 for (i = 0; i < kManyThreadsNumThreads; i++) {
507 pthread_join(threads[i], 0);
508 }
509 after_test = __asan_get_current_allocated_bytes();
510 // ASan stats also reflect memory usage of internal ASan RTL structs,
511 // so we can't check for equality here.
512 EXPECT_LT(after_test, before_test + (1UL<<20));
513 }
514
TEST(AddressSanitizerInterface,ExitCode)515 TEST(AddressSanitizerInterface, ExitCode) {
516 int original_exit_code = __asan_set_error_exit_code(7);
517 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
518 EXPECT_EQ(7, __asan_set_error_exit_code(8));
519 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
520 EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
521 EXPECT_EXIT(DoDoubleFree(),
522 ::testing::ExitedWithCode(original_exit_code), "");
523 }
524
MyDeathCallback()525 static void MyDeathCallback() {
526 fprintf(stderr, "MyDeathCallback\n");
527 }
528
TEST(AddressSanitizerInterface,DeathCallbackTest)529 TEST(AddressSanitizerInterface, DeathCallbackTest) {
530 __asan_set_death_callback(MyDeathCallback);
531 EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
532 __asan_set_death_callback(NULL);
533 }
534
TEST(AddressSanitizerInterface,OnErrorCallbackTest)535 TEST(AddressSanitizerInterface, OnErrorCallbackTest) {
536 __asan_set_on_error_callback(MyDeathCallback);
537 EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback.*double-free");
538 __asan_set_on_error_callback(NULL);
539 }
540
541 static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
542
543 #define GOOD_ACCESS(ptr, offset) \
544 EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
545
546 #define BAD_ACCESS(ptr, offset) \
547 EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
548
TEST(AddressSanitizerInterface,SimplePoisonMemoryRegionTest)549 TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
550 char *array = Ident((char*)malloc(120));
551 // poison array[40..80)
552 __asan_poison_memory_region(array + 40, 40);
553 GOOD_ACCESS(array, 39);
554 GOOD_ACCESS(array, 80);
555 BAD_ACCESS(array, 40);
556 BAD_ACCESS(array, 60);
557 BAD_ACCESS(array, 79);
558 EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
559 kUseAfterPoisonErrorMessage);
560 __asan_unpoison_memory_region(array + 40, 40);
561 // access previously poisoned memory.
562 GOOD_ACCESS(array, 40);
563 GOOD_ACCESS(array, 79);
564 free(array);
565 }
566
TEST(AddressSanitizerInterface,OverlappingPoisonMemoryRegionTest)567 TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
568 char *array = Ident((char*)malloc(120));
569 // Poison [0..40) and [80..120)
570 __asan_poison_memory_region(array, 40);
571 __asan_poison_memory_region(array + 80, 40);
572 BAD_ACCESS(array, 20);
573 GOOD_ACCESS(array, 60);
574 BAD_ACCESS(array, 100);
575 // Poison whole array - [0..120)
576 __asan_poison_memory_region(array, 120);
577 BAD_ACCESS(array, 60);
578 // Unpoison [24..96)
579 __asan_unpoison_memory_region(array + 24, 72);
580 BAD_ACCESS(array, 23);
581 GOOD_ACCESS(array, 24);
582 GOOD_ACCESS(array, 60);
583 GOOD_ACCESS(array, 95);
584 BAD_ACCESS(array, 96);
585 free(array);
586 }
587
TEST(AddressSanitizerInterface,PushAndPopWithPoisoningTest)588 TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
589 // Vector of capacity 20
590 char *vec = Ident((char*)malloc(20));
591 __asan_poison_memory_region(vec, 20);
592 for (size_t i = 0; i < 7; i++) {
593 // Simulate push_back.
594 __asan_unpoison_memory_region(vec + i, 1);
595 GOOD_ACCESS(vec, i);
596 BAD_ACCESS(vec, i + 1);
597 }
598 for (size_t i = 7; i > 0; i--) {
599 // Simulate pop_back.
600 __asan_poison_memory_region(vec + i - 1, 1);
601 BAD_ACCESS(vec, i - 1);
602 if (i > 1) GOOD_ACCESS(vec, i - 2);
603 }
604 free(vec);
605 }
606
607 // Make sure that each aligned block of size "2^granularity" doesn't have
608 // "true" value before "false" value.
MakeShadowValid(bool * shadow,int length,int granularity)609 static void MakeShadowValid(bool *shadow, int length, int granularity) {
610 bool can_be_poisoned = true;
611 for (int i = length - 1; i >= 0; i--) {
612 if (!shadow[i])
613 can_be_poisoned = false;
614 if (!can_be_poisoned)
615 shadow[i] = false;
616 if (i % (1 << granularity) == 0) {
617 can_be_poisoned = true;
618 }
619 }
620 }
621
TEST(AddressSanitizerInterface,PoisoningStressTest)622 TEST(AddressSanitizerInterface, PoisoningStressTest) {
623 const size_t kSize = 24;
624 bool expected[kSize];
625 char *arr = Ident((char*)malloc(kSize));
626 for (size_t l1 = 0; l1 < kSize; l1++) {
627 for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
628 for (size_t l2 = 0; l2 < kSize; l2++) {
629 for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
630 // Poison [l1, l1+s1), [l2, l2+s2) and check result.
631 __asan_unpoison_memory_region(arr, kSize);
632 __asan_poison_memory_region(arr + l1, s1);
633 __asan_poison_memory_region(arr + l2, s2);
634 memset(expected, false, kSize);
635 memset(expected + l1, true, s1);
636 MakeShadowValid(expected, kSize, /*granularity*/ 3);
637 memset(expected + l2, true, s2);
638 MakeShadowValid(expected, kSize, /*granularity*/ 3);
639 for (size_t i = 0; i < kSize; i++) {
640 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
641 }
642 // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
643 __asan_poison_memory_region(arr, kSize);
644 __asan_unpoison_memory_region(arr + l1, s1);
645 __asan_unpoison_memory_region(arr + l2, s2);
646 memset(expected, true, kSize);
647 memset(expected + l1, false, s1);
648 MakeShadowValid(expected, kSize, /*granularity*/ 3);
649 memset(expected + l2, false, s2);
650 MakeShadowValid(expected, kSize, /*granularity*/ 3);
651 for (size_t i = 0; i < kSize; i++) {
652 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
653 }
654 }
655 }
656 }
657 }
658 }
659
660 static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
661 static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
662
TEST(AddressSanitizerInterface,DISABLED_InvalidPoisonAndUnpoisonCallsTest)663 TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
664 char *array = Ident((char*)malloc(120));
665 __asan_unpoison_memory_region(array, 120);
666 // Try to unpoison not owned memory
667 EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
668 kInvalidUnpoisonMessage);
669 EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
670 kInvalidUnpoisonMessage);
671
672 __asan_poison_memory_region(array, 120);
673 // Try to poison not owned memory.
674 EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
675 EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
676 kInvalidPoisonMessage);
677 free(array);
678 }
679
ErrorReportCallbackOneToZ(const char * report)680 static void ErrorReportCallbackOneToZ(const char *report) {
681 write(2, "ABCDEF", 6);
682 write(2, report, strlen(report));
683 write(2, "ABCDEF", 6);
684 _exit(1);
685 }
686
TEST(AddressSanitizerInterface,SetErrorReportCallbackTest)687 TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
688 __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
689 EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
690 ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
691 __asan_set_error_report_callback(NULL);
692 }
693
TEST(AddressSanitizerInterface,GetOwnershipStressTest)694 TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
695 std::vector<char *> pointers;
696 std::vector<size_t> sizes;
697 const size_t kNumMallocs =
698 (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
699 for (size_t i = 0; i < kNumMallocs; i++) {
700 size_t size = i * 100 + 1;
701 pointers.push_back((char*)malloc(size));
702 sizes.push_back(size);
703 }
704 for (size_t i = 0; i < 4000000; i++) {
705 EXPECT_FALSE(__asan_get_ownership(&pointers));
706 EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
707 size_t idx = i % kNumMallocs;
708 EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
709 EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
710 }
711 for (size_t i = 0, n = pointers.size(); i < n; i++)
712 free(pointers[i]);
713 }
714