• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- asan_noinst_test.cc -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
14 
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_test_utils.h"
19 #include <sanitizer/allocator_interface.h>
20 
21 #include <assert.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>  // for memset()
25 #include <algorithm>
26 #include <vector>
27 #include <limits>
28 
29 // ATTENTION!
30 // Please don't call intercepted functions (including malloc() and friends)
31 // in this test. The static runtime library is linked explicitly (without
32 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
33 
34 #if !defined(_WIN32)
35 extern "C" {
36 // Set specific ASan options for uninstrumented unittest.
__asan_default_options()37 const char* __asan_default_options() {
38   return "allow_reexec=0";
39 }
40 }  // extern "C"
41 #endif
42 
43 // Make sure __asan_init is called before any test case is run.
44 struct AsanInitCaller {
AsanInitCallerAsanInitCaller45   AsanInitCaller() { __asan_init(); }
46 };
47 static AsanInitCaller asan_init_caller;
48 
TEST(AddressSanitizer,InternalSimpleDeathTest)49 TEST(AddressSanitizer, InternalSimpleDeathTest) {
50   EXPECT_DEATH(exit(1), "");
51 }
52 
MallocStress(size_t n)53 static void MallocStress(size_t n) {
54   u32 seed = my_rand();
55   StackTrace stack1;
56   stack1.trace[0] = 0xa123;
57   stack1.trace[1] = 0xa456;
58   stack1.size = 2;
59 
60   StackTrace stack2;
61   stack2.trace[0] = 0xb123;
62   stack2.trace[1] = 0xb456;
63   stack2.size = 2;
64 
65   StackTrace stack3;
66   stack3.trace[0] = 0xc123;
67   stack3.trace[1] = 0xc456;
68   stack3.size = 2;
69 
70   std::vector<void *> vec;
71   for (size_t i = 0; i < n; i++) {
72     if ((i % 3) == 0) {
73       if (vec.empty()) continue;
74       size_t idx = my_rand_r(&seed) % vec.size();
75       void *ptr = vec[idx];
76       vec[idx] = vec.back();
77       vec.pop_back();
78       __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
79     } else {
80       size_t size = my_rand_r(&seed) % 1000 + 1;
81       switch ((my_rand_r(&seed) % 128)) {
82         case 0: size += 1024; break;
83         case 1: size += 2048; break;
84         case 2: size += 4096; break;
85       }
86       size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
87       char *ptr = (char*)__asan::asan_memalign(alignment, size,
88                                                &stack2, __asan::FROM_MALLOC);
89       EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0));
90       vec.push_back(ptr);
91       ptr[0] = 0;
92       ptr[size-1] = 0;
93       ptr[size/2] = 0;
94     }
95   }
96   for (size_t i = 0; i < vec.size(); i++)
97     __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
98 }
99 
100 
TEST(AddressSanitizer,NoInstMallocTest)101 TEST(AddressSanitizer, NoInstMallocTest) {
102   MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
103 }
104 
TEST(AddressSanitizer,ThreadedMallocStressTest)105 TEST(AddressSanitizer, ThreadedMallocStressTest) {
106   const int kNumThreads = 4;
107   const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
108   pthread_t t[kNumThreads];
109   for (int i = 0; i < kNumThreads; i++) {
110     PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
111         (void*)kNumIterations);
112   }
113   for (int i = 0; i < kNumThreads; i++) {
114     PTHREAD_JOIN(t[i], 0);
115   }
116 }
117 
PrintShadow(const char * tag,uptr ptr,size_t size)118 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
119   fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
120   uptr prev_shadow = 0;
121   for (sptr i = -32; i < (sptr)size + 32; i++) {
122     uptr shadow = __asan::MemToShadow(ptr + i);
123     if (i == 0 || i == (sptr)size)
124       fprintf(stderr, ".");
125     if (shadow != prev_shadow) {
126       prev_shadow = shadow;
127       fprintf(stderr, "%02x", (int)*(u8*)shadow);
128     }
129   }
130   fprintf(stderr, "\n");
131 }
132 
TEST(AddressSanitizer,DISABLED_InternalPrintShadow)133 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
134   for (size_t size = 1; size <= 513; size++) {
135     char *ptr = new char[size];
136     PrintShadow("m", (uptr)ptr, size);
137     delete [] ptr;
138     PrintShadow("f", (uptr)ptr, size);
139   }
140 }
141 
TEST(AddressSanitizer,QuarantineTest)142 TEST(AddressSanitizer, QuarantineTest) {
143   StackTrace stack;
144   stack.trace[0] = 0x890;
145   stack.size = 1;
146 
147   const int size = 1024;
148   void *p = __asan::asan_malloc(size, &stack);
149   __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
150   size_t i;
151   size_t max_i = 1 << 30;
152   for (i = 0; i < max_i; i++) {
153     void *p1 = __asan::asan_malloc(size, &stack);
154     __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
155     if (p1 == p) break;
156   }
157   EXPECT_GE(i, 10000U);
158   EXPECT_LT(i, max_i);
159 }
160 
ThreadedQuarantineTestWorker(void * unused)161 void *ThreadedQuarantineTestWorker(void *unused) {
162   (void)unused;
163   u32 seed = my_rand();
164   StackTrace stack;
165   stack.trace[0] = 0x890;
166   stack.size = 1;
167 
168   for (size_t i = 0; i < 1000; i++) {
169     void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
170     __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
171   }
172   return NULL;
173 }
174 
175 // Check that the thread local allocators are flushed when threads are
176 // destroyed.
TEST(AddressSanitizer,ThreadedQuarantineTest)177 TEST(AddressSanitizer, ThreadedQuarantineTest) {
178   const int n_threads = 3000;
179   size_t mmaped1 = __sanitizer_get_heap_size();
180   for (int i = 0; i < n_threads; i++) {
181     pthread_t t;
182     PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
183     PTHREAD_JOIN(t, 0);
184     size_t mmaped2 = __sanitizer_get_heap_size();
185     EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
186   }
187 }
188 
ThreadedOneSizeMallocStress(void * unused)189 void *ThreadedOneSizeMallocStress(void *unused) {
190   (void)unused;
191   StackTrace stack;
192   stack.trace[0] = 0x890;
193   stack.size = 1;
194   const size_t kNumMallocs = 1000;
195   for (int iter = 0; iter < 1000; iter++) {
196     void *p[kNumMallocs];
197     for (size_t i = 0; i < kNumMallocs; i++) {
198       p[i] = __asan::asan_malloc(32, &stack);
199     }
200     for (size_t i = 0; i < kNumMallocs; i++) {
201       __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
202     }
203   }
204   return NULL;
205 }
206 
TEST(AddressSanitizer,ThreadedOneSizeMallocStressTest)207 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
208   const int kNumThreads = 4;
209   pthread_t t[kNumThreads];
210   for (int i = 0; i < kNumThreads; i++) {
211     PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
212   }
213   for (int i = 0; i < kNumThreads; i++) {
214     PTHREAD_JOIN(t[i], 0);
215   }
216 }
217 
TEST(AddressSanitizer,ShadowRegionIsPoisonedTest)218 TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
219   using __asan::kHighMemEnd;
220   // Check that __asan_region_is_poisoned works for shadow regions.
221   uptr ptr = kLowShadowBeg + 200;
222   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
223   ptr = kShadowGapBeg + 200;
224   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
225   ptr = kHighShadowBeg + 200;
226   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
227 }
228 
229 // Test __asan_load1 & friends.
TEST(AddressSanitizer,LoadStoreCallbacks)230 TEST(AddressSanitizer, LoadStoreCallbacks) {
231   typedef void (*CB)(uptr p);
232   CB cb[2][5] = {
233       {
234         __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16,
235       }, {
236         __asan_store1, __asan_store2, __asan_store4, __asan_store8,
237         __asan_store16,
238       }
239   };
240 
241   uptr buggy_ptr;
242 
243   __asan_test_only_reported_buggy_pointer = &buggy_ptr;
244   StackTrace stack;
245   stack.trace[0] = 0x890;
246   stack.size = 1;
247 
248   for (uptr len = 16; len <= 32; len++) {
249     char *ptr = (char*) __asan::asan_malloc(len, &stack);
250     uptr p = reinterpret_cast<uptr>(ptr);
251     for (uptr is_write = 0; is_write <= 1; is_write++) {
252       for (uptr size_log = 0; size_log <= 4; size_log++) {
253         uptr size = 1 << size_log;
254         CB call = cb[is_write][size_log];
255         // Iterate only size-aligned offsets.
256         for (uptr offset = 0; offset <= len; offset += size) {
257           buggy_ptr = 0;
258           call(p + offset);
259           if (offset + size <= len)
260             EXPECT_EQ(buggy_ptr, 0U);
261           else
262             EXPECT_EQ(buggy_ptr, p + offset);
263         }
264       }
265     }
266     __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC);
267   }
268   __asan_test_only_reported_buggy_pointer = 0;
269 }
270