• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- asan_noinst_test.cc -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
14 
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_stack.h"
19 #include "asan_test_utils.h"
20 
21 #include <assert.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>  // for memset()
25 #include <algorithm>
26 #include <vector>
27 #include <limits>
28 
29 
TEST(AddressSanitizer,InternalSimpleDeathTest)30 TEST(AddressSanitizer, InternalSimpleDeathTest) {
31   EXPECT_DEATH(exit(1), "");
32 }
33 
MallocStress(size_t n)34 static void MallocStress(size_t n) {
35   u32 seed = my_rand();
36   __asan::StackTrace stack1;
37   stack1.trace[0] = 0xa123;
38   stack1.trace[1] = 0xa456;
39   stack1.size = 2;
40 
41   __asan::StackTrace stack2;
42   stack2.trace[0] = 0xb123;
43   stack2.trace[1] = 0xb456;
44   stack2.size = 2;
45 
46   __asan::StackTrace stack3;
47   stack3.trace[0] = 0xc123;
48   stack3.trace[1] = 0xc456;
49   stack3.size = 2;
50 
51   std::vector<void *> vec;
52   for (size_t i = 0; i < n; i++) {
53     if ((i % 3) == 0) {
54       if (vec.empty()) continue;
55       size_t idx = my_rand_r(&seed) % vec.size();
56       void *ptr = vec[idx];
57       vec[idx] = vec.back();
58       vec.pop_back();
59       __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
60     } else {
61       size_t size = my_rand_r(&seed) % 1000 + 1;
62       switch ((my_rand_r(&seed) % 128)) {
63         case 0: size += 1024; break;
64         case 1: size += 2048; break;
65         case 2: size += 4096; break;
66       }
67       size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
68       char *ptr = (char*)__asan::asan_memalign(alignment, size,
69                                                &stack2, __asan::FROM_MALLOC);
70       vec.push_back(ptr);
71       ptr[0] = 0;
72       ptr[size-1] = 0;
73       ptr[size/2] = 0;
74     }
75   }
76   for (size_t i = 0; i < vec.size(); i++)
77     __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
78 }
79 
80 
TEST(AddressSanitizer,NoInstMallocTest)81 TEST(AddressSanitizer, NoInstMallocTest) {
82   MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
83 }
84 
TEST(AddressSanitizer,ThreadedMallocStressTest)85 TEST(AddressSanitizer, ThreadedMallocStressTest) {
86   const int kNumThreads = 4;
87   const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
88   pthread_t t[kNumThreads];
89   for (int i = 0; i < kNumThreads; i++) {
90     PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
91         (void*)kNumIterations);
92   }
93   for (int i = 0; i < kNumThreads; i++) {
94     PTHREAD_JOIN(t[i], 0);
95   }
96 }
97 
PrintShadow(const char * tag,uptr ptr,size_t size)98 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
99   fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
100   uptr prev_shadow = 0;
101   for (sptr i = -32; i < (sptr)size + 32; i++) {
102     uptr shadow = __asan::MemToShadow(ptr + i);
103     if (i == 0 || i == (sptr)size)
104       fprintf(stderr, ".");
105     if (shadow != prev_shadow) {
106       prev_shadow = shadow;
107       fprintf(stderr, "%02x", (int)*(u8*)shadow);
108     }
109   }
110   fprintf(stderr, "\n");
111 }
112 
TEST(AddressSanitizer,DISABLED_InternalPrintShadow)113 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
114   for (size_t size = 1; size <= 513; size++) {
115     char *ptr = new char[size];
116     PrintShadow("m", (uptr)ptr, size);
117     delete [] ptr;
118     PrintShadow("f", (uptr)ptr, size);
119   }
120 }
121 
122 static uptr pc_array[] = {
123 #if SANITIZER_WORDSIZE == 64
124   0x7effbf756068ULL,
125   0x7effbf75e5abULL,
126   0x7effc0625b7cULL,
127   0x7effc05b8997ULL,
128   0x7effbf990577ULL,
129   0x7effbf990c56ULL,
130   0x7effbf992f3cULL,
131   0x7effbf950c22ULL,
132   0x7effc036dba0ULL,
133   0x7effc03638a3ULL,
134   0x7effc035be4aULL,
135   0x7effc0539c45ULL,
136   0x7effc0539a65ULL,
137   0x7effc03db9b3ULL,
138   0x7effc03db100ULL,
139   0x7effc037c7b8ULL,
140   0x7effc037bfffULL,
141   0x7effc038b777ULL,
142   0x7effc038021cULL,
143   0x7effc037c7d1ULL,
144   0x7effc037bfffULL,
145   0x7effc038b777ULL,
146   0x7effc038021cULL,
147   0x7effc037c7d1ULL,
148   0x7effc037bfffULL,
149   0x7effc038b777ULL,
150   0x7effc038021cULL,
151   0x7effc037c7d1ULL,
152   0x7effc037bfffULL,
153   0x7effc0520d26ULL,
154   0x7effc009ddffULL,
155   0x7effbf90bb50ULL,
156   0x7effbdddfa69ULL,
157   0x7effbdde1fe2ULL,
158   0x7effbdde2424ULL,
159   0x7effbdde27b3ULL,
160   0x7effbddee53bULL,
161   0x7effbdde1988ULL,
162   0x7effbdde0904ULL,
163   0x7effc106ce0dULL,
164   0x7effbcc3fa04ULL,
165   0x7effbcc3f6a4ULL,
166   0x7effbcc3e726ULL,
167   0x7effbcc40852ULL,
168   0x7effb681ec4dULL,
169 #endif  // SANITIZER_WORDSIZE
170   0xB0B5E768,
171   0x7B682EC1,
172   0x367F9918,
173   0xAE34E13,
174   0xBA0C6C6,
175   0x13250F46,
176   0xA0D6A8AB,
177   0x2B07C1A8,
178   0x6C844F4A,
179   0x2321B53,
180   0x1F3D4F8F,
181   0x3FE2924B,
182   0xB7A2F568,
183   0xBD23950A,
184   0x61020930,
185   0x33E7970C,
186   0x405998A1,
187   0x59F3551D,
188   0x350E3028,
189   0xBC55A28D,
190   0x361F3AED,
191   0xBEAD0F73,
192   0xAEF28479,
193   0x757E971F,
194   0xAEBA450,
195   0x43AD22F5,
196   0x8C2C50C4,
197   0x7AD8A2E1,
198   0x69EE4EE8,
199   0xC08DFF,
200   0x4BA6538,
201   0x3708AB2,
202   0xC24B6475,
203   0x7C8890D7,
204   0x6662495F,
205   0x9B641689,
206   0xD3596B,
207   0xA1049569,
208   0x44CBC16,
209   0x4D39C39F
210 };
211 
CompressStackTraceTest(size_t n_iter)212 void CompressStackTraceTest(size_t n_iter) {
213   u32 seed = my_rand();
214   const size_t kNumPcs = ARRAY_SIZE(pc_array);
215   u32 compressed[2 * kNumPcs];
216 
217   for (size_t iter = 0; iter < n_iter; iter++) {
218     std::random_shuffle(pc_array, pc_array + kNumPcs);
219     __asan::StackTrace stack0, stack1;
220     stack0.CopyFrom(pc_array, kNumPcs);
221     stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
222     size_t compress_size =
223       std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
224     size_t n_frames =
225       __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
226     Ident(n_frames);
227     assert(n_frames <= stack0.size);
228     __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
229     assert(stack1.size == n_frames);
230     for (size_t i = 0; i < stack1.size; i++) {
231       assert(stack0.trace[i] == stack1.trace[i]);
232     }
233   }
234 }
235 
TEST(AddressSanitizer,CompressStackTraceTest)236 TEST(AddressSanitizer, CompressStackTraceTest) {
237   CompressStackTraceTest(10000);
238 }
239 
CompressStackTraceBenchmark(size_t n_iter)240 void CompressStackTraceBenchmark(size_t n_iter) {
241   const size_t kNumPcs = ARRAY_SIZE(pc_array);
242   u32 compressed[2 * kNumPcs];
243   std::random_shuffle(pc_array, pc_array + kNumPcs);
244 
245   __asan::StackTrace stack0;
246   stack0.CopyFrom(pc_array, kNumPcs);
247   stack0.size = kNumPcs;
248   for (size_t iter = 0; iter < n_iter; iter++) {
249     size_t compress_size = kNumPcs;
250     size_t n_frames =
251       __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
252     Ident(n_frames);
253   }
254 }
255 
TEST(AddressSanitizer,CompressStackTraceBenchmark)256 TEST(AddressSanitizer, CompressStackTraceBenchmark) {
257   CompressStackTraceBenchmark(1 << 24);
258 }
259 
TEST(AddressSanitizer,QuarantineTest)260 TEST(AddressSanitizer, QuarantineTest) {
261   __asan::StackTrace stack;
262   stack.trace[0] = 0x890;
263   stack.size = 1;
264 
265   const int size = 1024;
266   void *p = __asan::asan_malloc(size, &stack);
267   __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
268   size_t i;
269   size_t max_i = 1 << 30;
270   for (i = 0; i < max_i; i++) {
271     void *p1 = __asan::asan_malloc(size, &stack);
272     __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
273     if (p1 == p) break;
274   }
275   EXPECT_GE(i, 10000U);
276   EXPECT_LT(i, max_i);
277 }
278 
ThreadedQuarantineTestWorker(void * unused)279 void *ThreadedQuarantineTestWorker(void *unused) {
280   (void)unused;
281   u32 seed = my_rand();
282   __asan::StackTrace stack;
283   stack.trace[0] = 0x890;
284   stack.size = 1;
285 
286   for (size_t i = 0; i < 1000; i++) {
287     void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
288     __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
289   }
290   return NULL;
291 }
292 
293 // Check that the thread local allocators are flushed when threads are
294 // destroyed.
TEST(AddressSanitizer,ThreadedQuarantineTest)295 TEST(AddressSanitizer, ThreadedQuarantineTest) {
296   const int n_threads = 3000;
297   size_t mmaped1 = __asan_get_heap_size();
298   for (int i = 0; i < n_threads; i++) {
299     pthread_t t;
300     PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
301     PTHREAD_JOIN(t, 0);
302     size_t mmaped2 = __asan_get_heap_size();
303     EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
304   }
305 }
306 
ThreadedOneSizeMallocStress(void * unused)307 void *ThreadedOneSizeMallocStress(void *unused) {
308   (void)unused;
309   __asan::StackTrace stack;
310   stack.trace[0] = 0x890;
311   stack.size = 1;
312   const size_t kNumMallocs = 1000;
313   for (int iter = 0; iter < 1000; iter++) {
314     void *p[kNumMallocs];
315     for (size_t i = 0; i < kNumMallocs; i++) {
316       p[i] = __asan::asan_malloc(32, &stack);
317     }
318     for (size_t i = 0; i < kNumMallocs; i++) {
319       __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
320     }
321   }
322   return NULL;
323 }
324 
TEST(AddressSanitizer,ThreadedOneSizeMallocStressTest)325 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
326   const int kNumThreads = 4;
327   pthread_t t[kNumThreads];
328   for (int i = 0; i < kNumThreads; i++) {
329     PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
330   }
331   for (int i = 0; i < kNumThreads; i++) {
332     PTHREAD_JOIN(t[i], 0);
333   }
334 }
335 
TEST(AddressSanitizer,MemsetWildAddressTest)336 TEST(AddressSanitizer, MemsetWildAddressTest) {
337   using __asan::kHighMemEnd;
338   typedef void*(*memset_p)(void*, int, size_t);
339   // Prevent inlining of memset().
340   volatile memset_p libc_memset = (memset_p)memset;
341   EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
342                (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
343                                     : "unknown-crash.*low shadow");
344   EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
345                "unknown-crash.*shadow gap");
346   EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
347                "unknown-crash.*high shadow");
348 }
349 
TEST(AddressSanitizerInterface,GetEstimatedAllocatedSize)350 TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
351 #if ASAN_ALLOCATOR_VERSION == 1
352   EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
353 #elif ASAN_ALLOCATOR_VERSION == 2
354   EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
355 #endif
356   const size_t sizes[] = { 1, 30, 1<<30 };
357   for (size_t i = 0; i < 3; i++) {
358     EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
359   }
360 }
361 
362 static const char* kGetAllocatedSizeErrorMsg =
363   "attempting to call __asan_get_allocated_size()";
364 
TEST(AddressSanitizerInterface,GetAllocatedSizeAndOwnershipTest)365 TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
366   const size_t kArraySize = 100;
367   char *array = Ident((char*)malloc(kArraySize));
368   int *int_ptr = Ident(new int);
369 
370   // Allocated memory is owned by allocator. Allocated size should be
371   // equal to requested size.
372   EXPECT_EQ(true, __asan_get_ownership(array));
373   EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
374   EXPECT_EQ(true, __asan_get_ownership(int_ptr));
375   EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
376 
377   // We cannot call GetAllocatedSize from the memory we didn't map,
378   // and from the interior pointers (not returned by previous malloc).
379   void *wild_addr = (void*)0x1;
380   EXPECT_FALSE(__asan_get_ownership(wild_addr));
381   EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
382   EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
383   EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
384                kGetAllocatedSizeErrorMsg);
385 
386   // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
387   EXPECT_FALSE(__asan_get_ownership(NULL));
388   EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
389 
390   // When memory is freed, it's not owned, and call to GetAllocatedSize
391   // is forbidden.
392   free(array);
393   EXPECT_FALSE(__asan_get_ownership(array));
394   EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
395   delete int_ptr;
396 
397   void *zero_alloc = Ident(malloc(0));
398   if (zero_alloc != 0) {
399     // If malloc(0) is not null, this pointer is owned and should have valid
400     // allocated size.
401     EXPECT_TRUE(__asan_get_ownership(zero_alloc));
402     // Allocated size is 0 or 1 depending on the allocator used.
403     EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
404   }
405   free(zero_alloc);
406 }
407 
TEST(AddressSanitizerInterface,GetCurrentAllocatedBytesTest)408 TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
409   size_t before_malloc, after_malloc, after_free;
410   char *array;
411   const size_t kMallocSize = 100;
412   before_malloc = __asan_get_current_allocated_bytes();
413 
414   array = Ident((char*)malloc(kMallocSize));
415   after_malloc = __asan_get_current_allocated_bytes();
416   EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
417 
418   free(array);
419   after_free = __asan_get_current_allocated_bytes();
420   EXPECT_EQ(before_malloc, after_free);
421 }
422 
DoDoubleFree()423 static void DoDoubleFree() {
424   int *x = Ident(new int);
425   delete Ident(x);
426   delete Ident(x);
427 }
428 
429 #if ASAN_ALLOCATOR_VERSION == 1
430 // This test is run in a separate process, so that large malloced
431 // chunk won't remain in the free lists after the test.
432 // Note: use ASSERT_* instead of EXPECT_* here.
RunGetHeapSizeTestAndDie()433 static void RunGetHeapSizeTestAndDie() {
434   size_t old_heap_size, new_heap_size, heap_growth;
435   // We unlikely have have chunk of this size in free list.
436   static const size_t kLargeMallocSize = 1 << 29;  // 512M
437   old_heap_size = __asan_get_heap_size();
438   fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
439   free(Ident(malloc(kLargeMallocSize)));
440   new_heap_size = __asan_get_heap_size();
441   heap_growth = new_heap_size - old_heap_size;
442   fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
443   ASSERT_GE(heap_growth, kLargeMallocSize);
444   ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
445 
446   // Now large chunk should fall into free list, and can be
447   // allocated without increasing heap size.
448   old_heap_size = new_heap_size;
449   free(Ident(malloc(kLargeMallocSize)));
450   heap_growth = __asan_get_heap_size() - old_heap_size;
451   fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
452   ASSERT_LT(heap_growth, kLargeMallocSize);
453 
454   // Test passed. Now die with expected double-free.
455   DoDoubleFree();
456 }
457 
TEST(AddressSanitizerInterface,GetHeapSizeTest)458 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
459   EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
460 }
461 #elif ASAN_ALLOCATOR_VERSION == 2
TEST(AddressSanitizerInterface,GetHeapSizeTest)462 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
463   // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
464   // The chunk should be greater than the quarantine size,
465   // otherwise it will be stuck in quarantine instead of being unmaped.
466   static const size_t kLargeMallocSize = (1 << 28) + 1;  // 256M
467   free(Ident(malloc(kLargeMallocSize)));  // Drain quarantine.
468   uptr old_heap_size = __asan_get_heap_size();
469   for (int i = 0; i < 3; i++) {
470     // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
471     free(Ident(malloc(kLargeMallocSize)));
472     EXPECT_EQ(old_heap_size, __asan_get_heap_size());
473   }
474 }
475 #endif
476 
477 // Note: use ASSERT_* instead of EXPECT_* here.
DoLargeMallocForGetFreeBytesTestAndDie()478 static void DoLargeMallocForGetFreeBytesTestAndDie() {
479 #if ASAN_ALLOCATOR_VERSION == 1
480   // asan_allocator2 does not keep large chunks in free_lists, so this test
481   // will not work.
482   size_t old_free_bytes, new_free_bytes;
483   static const size_t kLargeMallocSize = 1 << 29;  // 512M
484   // If we malloc and free a large memory chunk, it will not fall
485   // into quarantine and will be available for future requests.
486   old_free_bytes = __asan_get_free_bytes();
487   fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
488   fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
489   free(Ident(malloc(kLargeMallocSize)));
490   new_free_bytes = __asan_get_free_bytes();
491   fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
492   ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
493 #endif  // ASAN_ALLOCATOR_VERSION
494   // Test passed.
495   DoDoubleFree();
496 }
497 
TEST(AddressSanitizerInterface,GetFreeBytesTest)498 TEST(AddressSanitizerInterface, GetFreeBytesTest) {
499 #if ASAN_ALLOCATOR_VERSION == 1
500   // Allocate a small chunk. Now allocator probably has a lot of these
501   // chunks to fulfill future requests. So, future requests will decrease
502   // the number of free bytes. Do this only on systems where there
503   // is enough memory for such assumptions.
504   if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
505     static const size_t kNumOfChunks = 100;
506     static const size_t kChunkSize = 100;
507     char *chunks[kNumOfChunks];
508     size_t i;
509     size_t old_free_bytes, new_free_bytes;
510     chunks[0] = Ident((char*)malloc(kChunkSize));
511     old_free_bytes = __asan_get_free_bytes();
512     for (i = 1; i < kNumOfChunks; i++) {
513       chunks[i] = Ident((char*)malloc(kChunkSize));
514       new_free_bytes = __asan_get_free_bytes();
515       EXPECT_LT(new_free_bytes, old_free_bytes);
516       old_free_bytes = new_free_bytes;
517     }
518     for (i = 0; i < kNumOfChunks; i++)
519       free(chunks[i]);
520   }
521 #endif
522   EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
523 }
524 
525 static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
526 static const size_t kManyThreadsIterations = 250;
527 static const size_t kManyThreadsNumThreads =
528   (SANITIZER_WORDSIZE == 32) ? 40 : 200;
529 
ManyThreadsWithStatsWorker(void * arg)530 void *ManyThreadsWithStatsWorker(void *arg) {
531   (void)arg;
532   for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
533     for (size_t size_index = 0; size_index < 4; size_index++) {
534       free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
535     }
536   }
537   // Just one large allocation.
538   free(Ident(malloc(1 << 20)));
539   return 0;
540 }
541 
TEST(AddressSanitizerInterface,ManyThreadsWithStatsStressTest)542 TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
543   size_t before_test, after_test, i;
544   pthread_t threads[kManyThreadsNumThreads];
545   before_test = __asan_get_current_allocated_bytes();
546   for (i = 0; i < kManyThreadsNumThreads; i++) {
547     PTHREAD_CREATE(&threads[i], 0,
548                    (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
549   }
550   for (i = 0; i < kManyThreadsNumThreads; i++) {
551     PTHREAD_JOIN(threads[i], 0);
552   }
553   after_test = __asan_get_current_allocated_bytes();
554   // ASan stats also reflect memory usage of internal ASan RTL structs,
555   // so we can't check for equality here.
556   EXPECT_LT(after_test, before_test + (1UL<<20));
557 }
558 
TEST(AddressSanitizerInterface,ExitCode)559 TEST(AddressSanitizerInterface, ExitCode) {
560   int original_exit_code = __asan_set_error_exit_code(7);
561   EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
562   EXPECT_EQ(7, __asan_set_error_exit_code(8));
563   EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
564   EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
565   EXPECT_EXIT(DoDoubleFree(),
566               ::testing::ExitedWithCode(original_exit_code), "");
567 }
568 
MyDeathCallback()569 static void MyDeathCallback() {
570   fprintf(stderr, "MyDeathCallback\n");
571 }
572 
TEST(AddressSanitizerInterface,DeathCallbackTest)573 TEST(AddressSanitizerInterface, DeathCallbackTest) {
574   __asan_set_death_callback(MyDeathCallback);
575   EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
576   __asan_set_death_callback(NULL);
577 }
578 
579 static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
580 
581 #define GOOD_ACCESS(ptr, offset)  \
582     EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
583 
584 #define BAD_ACCESS(ptr, offset) \
585     EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
586 
TEST(AddressSanitizerInterface,SimplePoisonMemoryRegionTest)587 TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
588   char *array = Ident((char*)malloc(120));
589   // poison array[40..80)
590   __asan_poison_memory_region(array + 40, 40);
591   GOOD_ACCESS(array, 39);
592   GOOD_ACCESS(array, 80);
593   BAD_ACCESS(array, 40);
594   BAD_ACCESS(array, 60);
595   BAD_ACCESS(array, 79);
596   EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
597                kUseAfterPoisonErrorMessage);
598   __asan_unpoison_memory_region(array + 40, 40);
599   // access previously poisoned memory.
600   GOOD_ACCESS(array, 40);
601   GOOD_ACCESS(array, 79);
602   free(array);
603 }
604 
TEST(AddressSanitizerInterface,OverlappingPoisonMemoryRegionTest)605 TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
606   char *array = Ident((char*)malloc(120));
607   // Poison [0..40) and [80..120)
608   __asan_poison_memory_region(array, 40);
609   __asan_poison_memory_region(array + 80, 40);
610   BAD_ACCESS(array, 20);
611   GOOD_ACCESS(array, 60);
612   BAD_ACCESS(array, 100);
613   // Poison whole array - [0..120)
614   __asan_poison_memory_region(array, 120);
615   BAD_ACCESS(array, 60);
616   // Unpoison [24..96)
617   __asan_unpoison_memory_region(array + 24, 72);
618   BAD_ACCESS(array, 23);
619   GOOD_ACCESS(array, 24);
620   GOOD_ACCESS(array, 60);
621   GOOD_ACCESS(array, 95);
622   BAD_ACCESS(array, 96);
623   free(array);
624 }
625 
TEST(AddressSanitizerInterface,PushAndPopWithPoisoningTest)626 TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
627   // Vector of capacity 20
628   char *vec = Ident((char*)malloc(20));
629   __asan_poison_memory_region(vec, 20);
630   for (size_t i = 0; i < 7; i++) {
631     // Simulate push_back.
632     __asan_unpoison_memory_region(vec + i, 1);
633     GOOD_ACCESS(vec, i);
634     BAD_ACCESS(vec, i + 1);
635   }
636   for (size_t i = 7; i > 0; i--) {
637     // Simulate pop_back.
638     __asan_poison_memory_region(vec + i - 1, 1);
639     BAD_ACCESS(vec, i - 1);
640     if (i > 1) GOOD_ACCESS(vec, i - 2);
641   }
642   free(vec);
643 }
644 
TEST(AddressSanitizerInterface,GlobalRedzones)645 TEST(AddressSanitizerInterface, GlobalRedzones) {
646   GOOD_ACCESS(glob1, 1 - 1);
647   GOOD_ACCESS(glob2, 2 - 1);
648   GOOD_ACCESS(glob3, 3 - 1);
649   GOOD_ACCESS(glob4, 4 - 1);
650   GOOD_ACCESS(glob5, 5 - 1);
651   GOOD_ACCESS(glob6, 6 - 1);
652   GOOD_ACCESS(glob7, 7 - 1);
653   GOOD_ACCESS(glob8, 8 - 1);
654   GOOD_ACCESS(glob9, 9 - 1);
655   GOOD_ACCESS(glob10, 10 - 1);
656   GOOD_ACCESS(glob11, 11 - 1);
657   GOOD_ACCESS(glob12, 12 - 1);
658   GOOD_ACCESS(glob13, 13 - 1);
659   GOOD_ACCESS(glob14, 14 - 1);
660   GOOD_ACCESS(glob15, 15 - 1);
661   GOOD_ACCESS(glob16, 16 - 1);
662   GOOD_ACCESS(glob17, 17 - 1);
663   GOOD_ACCESS(glob1000, 1000 - 1);
664   GOOD_ACCESS(glob10000, 10000 - 1);
665   GOOD_ACCESS(glob100000, 100000 - 1);
666 
667   BAD_ACCESS(glob1, 1);
668   BAD_ACCESS(glob2, 2);
669   BAD_ACCESS(glob3, 3);
670   BAD_ACCESS(glob4, 4);
671   BAD_ACCESS(glob5, 5);
672   BAD_ACCESS(glob6, 6);
673   BAD_ACCESS(glob7, 7);
674   BAD_ACCESS(glob8, 8);
675   BAD_ACCESS(glob9, 9);
676   BAD_ACCESS(glob10, 10);
677   BAD_ACCESS(glob11, 11);
678   BAD_ACCESS(glob12, 12);
679   BAD_ACCESS(glob13, 13);
680   BAD_ACCESS(glob14, 14);
681   BAD_ACCESS(glob15, 15);
682   BAD_ACCESS(glob16, 16);
683   BAD_ACCESS(glob17, 17);
684   BAD_ACCESS(glob1000, 1000);
685   BAD_ACCESS(glob1000, 1100);  // Redzone is at least 101 bytes.
686   BAD_ACCESS(glob10000, 10000);
687   BAD_ACCESS(glob10000, 11000);  // Redzone is at least 1001 bytes.
688   BAD_ACCESS(glob100000, 100000);
689   BAD_ACCESS(glob100000, 110000);  // Redzone is at least 10001 bytes.
690 }
691 
692 // Make sure that each aligned block of size "2^granularity" doesn't have
693 // "true" value before "false" value.
MakeShadowValid(bool * shadow,int length,int granularity)694 static void MakeShadowValid(bool *shadow, int length, int granularity) {
695   bool can_be_poisoned = true;
696   for (int i = length - 1; i >= 0; i--) {
697     if (!shadow[i])
698       can_be_poisoned = false;
699     if (!can_be_poisoned)
700       shadow[i] = false;
701     if (i % (1 << granularity) == 0) {
702       can_be_poisoned = true;
703     }
704   }
705 }
706 
TEST(AddressSanitizerInterface,PoisoningStressTest)707 TEST(AddressSanitizerInterface, PoisoningStressTest) {
708   const size_t kSize = 24;
709   bool expected[kSize];
710   char *arr = Ident((char*)malloc(kSize));
711   for (size_t l1 = 0; l1 < kSize; l1++) {
712     for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
713       for (size_t l2 = 0; l2 < kSize; l2++) {
714         for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
715           // Poison [l1, l1+s1), [l2, l2+s2) and check result.
716           __asan_unpoison_memory_region(arr, kSize);
717           __asan_poison_memory_region(arr + l1, s1);
718           __asan_poison_memory_region(arr + l2, s2);
719           memset(expected, false, kSize);
720           memset(expected + l1, true, s1);
721           MakeShadowValid(expected, kSize, /*granularity*/ 3);
722           memset(expected + l2, true, s2);
723           MakeShadowValid(expected, kSize, /*granularity*/ 3);
724           for (size_t i = 0; i < kSize; i++) {
725             ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
726           }
727           // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
728           __asan_poison_memory_region(arr, kSize);
729           __asan_unpoison_memory_region(arr + l1, s1);
730           __asan_unpoison_memory_region(arr + l2, s2);
731           memset(expected, true, kSize);
732           memset(expected + l1, false, s1);
733           MakeShadowValid(expected, kSize, /*granularity*/ 3);
734           memset(expected + l2, false, s2);
735           MakeShadowValid(expected, kSize, /*granularity*/ 3);
736           for (size_t i = 0; i < kSize; i++) {
737             ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
738           }
739         }
740       }
741     }
742   }
743 }
744 
TEST(AddressSanitizerInterface,PoisonedRegion)745 TEST(AddressSanitizerInterface, PoisonedRegion) {
746   size_t rz = 16;
747   for (size_t size = 1; size <= 64; size++) {
748     char *p = new char[size];
749     uptr x = reinterpret_cast<uptr>(p);
750     for (size_t beg = 0; beg < size + rz; beg++) {
751       for (size_t end = beg; end < size + rz; end++) {
752         uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
753         if (beg == end) {
754           EXPECT_FALSE(first_poisoned);
755         } else if (beg < size && end <= size) {
756           EXPECT_FALSE(first_poisoned);
757         } else if (beg >= size) {
758           EXPECT_EQ(x + beg, first_poisoned);
759         } else {
760           EXPECT_GT(end, size);
761           EXPECT_EQ(x + size, first_poisoned);
762         }
763       }
764     }
765     delete [] p;
766   }
767 }
768 
769 // This is a performance benchmark for manual runs.
770 // asan's memset interceptor calls mem_is_zero for the entire shadow region.
771 // the profile should look like this:
772 //     89.10%   [.] __memset_sse2
773 //     10.50%   [.] __sanitizer::mem_is_zero
774 // I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
775 // than memset itself.
TEST(AddressSanitizerInterface,DISABLED_StressLargeMemset)776 TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) {
777   size_t size = 1 << 20;
778   char *x = new char[size];
779   for (int i = 0; i < 100000; i++)
780     Ident(memset)(x, 0, size);
781   delete [] x;
782 }
783 
784 // Same here, but we run memset with small sizes.
TEST(AddressSanitizerInterface,DISABLED_StressSmallMemset)785 TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) {
786   size_t size = 32;
787   char *x = new char[size];
788   for (int i = 0; i < 100000000; i++)
789     Ident(memset)(x, 0, size);
790   delete [] x;
791 }
792 
793 static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
794 static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
795 
TEST(AddressSanitizerInterface,DISABLED_InvalidPoisonAndUnpoisonCallsTest)796 TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
797   char *array = Ident((char*)malloc(120));
798   __asan_unpoison_memory_region(array, 120);
799   // Try to unpoison not owned memory
800   EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
801                kInvalidUnpoisonMessage);
802   EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
803                kInvalidUnpoisonMessage);
804 
805   __asan_poison_memory_region(array, 120);
806   // Try to poison not owned memory.
807   EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
808   EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
809                kInvalidPoisonMessage);
810   free(array);
811 }
812 
ErrorReportCallbackOneToZ(const char * report)813 static void ErrorReportCallbackOneToZ(const char *report) {
814   int report_len = strlen(report);
815   ASSERT_EQ(6, write(2, "ABCDEF", 6));
816   ASSERT_EQ(report_len, write(2, report, report_len));
817   ASSERT_EQ(6, write(2, "ABCDEF", 6));
818   _exit(1);
819 }
820 
TEST(AddressSanitizerInterface,SetErrorReportCallbackTest)821 TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
822   __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
823   EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
824                ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
825   __asan_set_error_report_callback(NULL);
826 }
827 
TEST(AddressSanitizerInterface,GetOwnershipStressTest)828 TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
829   std::vector<char *> pointers;
830   std::vector<size_t> sizes;
831 #if ASAN_ALLOCATOR_VERSION == 1
832   const size_t kNumMallocs =
833       (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
834 #elif ASAN_ALLOCATOR_VERSION == 2  // too slow with asan_allocator2. :(
835   const size_t kNumMallocs = 1 << 9;
836 #endif
837   for (size_t i = 0; i < kNumMallocs; i++) {
838     size_t size = i * 100 + 1;
839     pointers.push_back((char*)malloc(size));
840     sizes.push_back(size);
841   }
842   for (size_t i = 0; i < 4000000; i++) {
843     EXPECT_FALSE(__asan_get_ownership(&pointers));
844     EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
845     size_t idx = i % kNumMallocs;
846     EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
847     EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
848   }
849   for (size_t i = 0, n = pointers.size(); i < n; i++)
850     free(pointers[i]);
851 }
852 
TEST(AddressSanitizerInterface,CallocOverflow)853 TEST(AddressSanitizerInterface, CallocOverflow) {
854   size_t kArraySize = 4096;
855   volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
856   volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
857   void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
858   EXPECT_EQ(0L, Ident(p));
859 }
860 
TEST(AddressSanitizerInterface,CallocOverflow2)861 TEST(AddressSanitizerInterface, CallocOverflow2) {
862 #if SANITIZER_WORDSIZE == 32
863   size_t kArraySize = 112;
864   volatile size_t kArraySize2 = 43878406;
865   void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
866   EXPECT_EQ(0L, Ident(p));
867 #endif
868 }
869 
TEST(AddressSanitizerInterface,CallocReturnsZeroMem)870 TEST(AddressSanitizerInterface, CallocReturnsZeroMem) {
871   size_t sizes[] = {16, 1000, 10000, 100000, 2100000};
872   for (size_t s = 0; s < ARRAY_SIZE(sizes); s++) {
873     size_t size = sizes[s];
874     for (size_t iter = 0; iter < 5; iter++) {
875       char *x = Ident((char*)calloc(1, size));
876       EXPECT_EQ(x[0], 0);
877       EXPECT_EQ(x[size - 1], 0);
878       EXPECT_EQ(x[size / 2], 0);
879       EXPECT_EQ(x[size / 3], 0);
880       EXPECT_EQ(x[size / 4], 0);
881       memset(x, 0x42, size);
882       free(Ident(x));
883       free(Ident(malloc(Ident(1 << 27))));  // Try to drain the quarantine.
884     }
885   }
886 }
887