• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- asan_noinst_test.cc -----------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
14 
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_test_utils.h"
19 #include <sanitizer/allocator_interface.h>
20 
21 #include <assert.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>  // for memset()
25 #include <algorithm>
26 #include <vector>
27 #include <limits>
28 
29 // ATTENTION!
30 // Please don't call intercepted functions (including malloc() and friends)
31 // in this test. The static runtime library is linked explicitly (without
32 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
33 
34 // Make sure __asan_init is called before any test case is run.
35 struct AsanInitCaller {
AsanInitCallerAsanInitCaller36   AsanInitCaller() {
37     __asan_init();
38   }
39 };
40 static AsanInitCaller asan_init_caller;
41 
TEST(AddressSanitizer,InternalSimpleDeathTest)42 TEST(AddressSanitizer, InternalSimpleDeathTest) {
43   EXPECT_DEATH(exit(1), "");
44 }
45 
MallocStress(size_t n)46 static void MallocStress(size_t n) {
47   u32 seed = my_rand();
48   BufferedStackTrace stack1;
49   stack1.trace_buffer[0] = 0xa123;
50   stack1.trace_buffer[1] = 0xa456;
51   stack1.size = 2;
52 
53   BufferedStackTrace stack2;
54   stack2.trace_buffer[0] = 0xb123;
55   stack2.trace_buffer[1] = 0xb456;
56   stack2.size = 2;
57 
58   BufferedStackTrace stack3;
59   stack3.trace_buffer[0] = 0xc123;
60   stack3.trace_buffer[1] = 0xc456;
61   stack3.size = 2;
62 
63   std::vector<void *> vec;
64   for (size_t i = 0; i < n; i++) {
65     if ((i % 3) == 0) {
66       if (vec.empty()) continue;
67       size_t idx = my_rand_r(&seed) % vec.size();
68       void *ptr = vec[idx];
69       vec[idx] = vec.back();
70       vec.pop_back();
71       __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
72     } else {
73       size_t size = my_rand_r(&seed) % 1000 + 1;
74       switch ((my_rand_r(&seed) % 128)) {
75         case 0: size += 1024; break;
76         case 1: size += 2048; break;
77         case 2: size += 4096; break;
78       }
79       size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
80       char *ptr = (char*)__asan::asan_memalign(alignment, size,
81                                                &stack2, __asan::FROM_MALLOC);
82       EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0));
83       vec.push_back(ptr);
84       ptr[0] = 0;
85       ptr[size-1] = 0;
86       ptr[size/2] = 0;
87     }
88   }
89   for (size_t i = 0; i < vec.size(); i++)
90     __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
91 }
92 
93 
TEST(AddressSanitizer,NoInstMallocTest)94 TEST(AddressSanitizer, NoInstMallocTest) {
95   MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
96 }
97 
TEST(AddressSanitizer,ThreadedMallocStressTest)98 TEST(AddressSanitizer, ThreadedMallocStressTest) {
99   const int kNumThreads = 4;
100   const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
101   pthread_t t[kNumThreads];
102   for (int i = 0; i < kNumThreads; i++) {
103     PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
104         (void*)kNumIterations);
105   }
106   for (int i = 0; i < kNumThreads; i++) {
107     PTHREAD_JOIN(t[i], 0);
108   }
109 }
110 
PrintShadow(const char * tag,uptr ptr,size_t size)111 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
112   fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
113   uptr prev_shadow = 0;
114   for (sptr i = -32; i < (sptr)size + 32; i++) {
115     uptr shadow = __asan::MemToShadow(ptr + i);
116     if (i == 0 || i == (sptr)size)
117       fprintf(stderr, ".");
118     if (shadow != prev_shadow) {
119       prev_shadow = shadow;
120       fprintf(stderr, "%02x", (int)*(u8*)shadow);
121     }
122   }
123   fprintf(stderr, "\n");
124 }
125 
TEST(AddressSanitizer,DISABLED_InternalPrintShadow)126 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
127   for (size_t size = 1; size <= 513; size++) {
128     char *ptr = new char[size];
129     PrintShadow("m", (uptr)ptr, size);
130     delete [] ptr;
131     PrintShadow("f", (uptr)ptr, size);
132   }
133 }
134 
TEST(AddressSanitizer,QuarantineTest)135 TEST(AddressSanitizer, QuarantineTest) {
136   BufferedStackTrace stack;
137   stack.trace_buffer[0] = 0x890;
138   stack.size = 1;
139 
140   const int size = 1024;
141   void *p = __asan::asan_malloc(size, &stack);
142   __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
143   size_t i;
144   size_t max_i = 1 << 30;
145   for (i = 0; i < max_i; i++) {
146     void *p1 = __asan::asan_malloc(size, &stack);
147     __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
148     if (p1 == p) break;
149   }
150   EXPECT_GE(i, 10000U);
151   EXPECT_LT(i, max_i);
152 }
153 
ThreadedQuarantineTestWorker(void * unused)154 void *ThreadedQuarantineTestWorker(void *unused) {
155   (void)unused;
156   u32 seed = my_rand();
157   BufferedStackTrace stack;
158   stack.trace_buffer[0] = 0x890;
159   stack.size = 1;
160 
161   for (size_t i = 0; i < 1000; i++) {
162     void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
163     __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
164   }
165   return NULL;
166 }
167 
168 // Check that the thread local allocators are flushed when threads are
169 // destroyed.
TEST(AddressSanitizer,ThreadedQuarantineTest)170 TEST(AddressSanitizer, ThreadedQuarantineTest) {
171   const int n_threads = 3000;
172   size_t mmaped1 = __sanitizer_get_heap_size();
173   for (int i = 0; i < n_threads; i++) {
174     pthread_t t;
175     PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
176     PTHREAD_JOIN(t, 0);
177     size_t mmaped2 = __sanitizer_get_heap_size();
178     EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
179   }
180 }
181 
ThreadedOneSizeMallocStress(void * unused)182 void *ThreadedOneSizeMallocStress(void *unused) {
183   (void)unused;
184   BufferedStackTrace stack;
185   stack.trace_buffer[0] = 0x890;
186   stack.size = 1;
187   const size_t kNumMallocs = 1000;
188   for (int iter = 0; iter < 1000; iter++) {
189     void *p[kNumMallocs];
190     for (size_t i = 0; i < kNumMallocs; i++) {
191       p[i] = __asan::asan_malloc(32, &stack);
192     }
193     for (size_t i = 0; i < kNumMallocs; i++) {
194       __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
195     }
196   }
197   return NULL;
198 }
199 
TEST(AddressSanitizer,ThreadedOneSizeMallocStressTest)200 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
201   const int kNumThreads = 4;
202   pthread_t t[kNumThreads];
203   for (int i = 0; i < kNumThreads; i++) {
204     PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
205   }
206   for (int i = 0; i < kNumThreads; i++) {
207     PTHREAD_JOIN(t[i], 0);
208   }
209 }
210 
TEST(AddressSanitizer,ShadowRegionIsPoisonedTest)211 TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
212   using __asan::kHighMemEnd;
213   // Check that __asan_region_is_poisoned works for shadow regions.
214   uptr ptr = kLowShadowBeg + 200;
215   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
216   ptr = kShadowGapBeg + 200;
217   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
218   ptr = kHighShadowBeg + 200;
219   EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
220 }
221 
222 // Test __asan_load1 & friends.
TEST(AddressSanitizer,LoadStoreCallbacks)223 TEST(AddressSanitizer, LoadStoreCallbacks) {
224   typedef void (*CB)(uptr p);
225   CB cb[2][5] = {
226       {
227         __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16,
228       }, {
229         __asan_store1, __asan_store2, __asan_store4, __asan_store8,
230         __asan_store16,
231       }
232   };
233 
234   uptr buggy_ptr;
235 
236   __asan_test_only_reported_buggy_pointer = &buggy_ptr;
237   BufferedStackTrace stack;
238   stack.trace_buffer[0] = 0x890;
239   stack.size = 1;
240 
241   for (uptr len = 16; len <= 32; len++) {
242     char *ptr = (char*) __asan::asan_malloc(len, &stack);
243     uptr p = reinterpret_cast<uptr>(ptr);
244     for (uptr is_write = 0; is_write <= 1; is_write++) {
245       for (uptr size_log = 0; size_log <= 4; size_log++) {
246         uptr size = 1 << size_log;
247         CB call = cb[is_write][size_log];
248         // Iterate only size-aligned offsets.
249         for (uptr offset = 0; offset <= len; offset += size) {
250           buggy_ptr = 0;
251           call(p + offset);
252           if (offset + size <= len)
253             EXPECT_EQ(buggy_ptr, 0U);
254           else
255             EXPECT_EQ(buggy_ptr, p + offset);
256         }
257       }
258     }
259     __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC);
260   }
261   __asan_test_only_reported_buggy_pointer = 0;
262 }
263