• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 
18 #include "sanitizer_test_utils.h"
19 #include "sanitizer_pthread_wrappers.h"
20 
21 #include "gtest/gtest.h"
22 
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <set>
27 
28 // Too slow for debug build
29 #if !SANITIZER_DEBUG
30 
31 #if SANITIZER_CAN_USE_ALLOCATOR64
32 #if SANITIZER_WINDOWS
33 static const uptr kAllocatorSpace = 0x10000000000ULL;
34 static const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
35 static const u64 kAddressSpaceSize = 1ULL << 40;
36 #else
37 static const uptr kAllocatorSpace = 0x700000000000ULL;
38 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
39 static const u64 kAddressSpaceSize = 1ULL << 47;
40 #endif
41 
42 typedef SizeClassAllocator64<
43   kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
44 
45 typedef SizeClassAllocator64<
46   kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
47 #elif defined(__mips64)
48 static const u64 kAddressSpaceSize = 1ULL << 40;
49 #elif defined(__aarch64__)
50 static const u64 kAddressSpaceSize = 1ULL << 39;
51 #elif defined(__s390x__)
52 static const u64 kAddressSpaceSize = 1ULL << 53;
53 #elif defined(__s390__)
54 static const u64 kAddressSpaceSize = 1ULL << 31;
55 #else
56 static const u64 kAddressSpaceSize = 1ULL << 32;
57 #endif
58 
59 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
60 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
61 
62 typedef SizeClassAllocator32<
63   0, kAddressSpaceSize,
64   /*kMetadataSize*/16,
65   CompactSizeClassMap,
66   kRegionSizeLog,
67   FlatByteMap<kFlatByteMapSize> >
68   Allocator32Compact;
69 
70 template <class SizeClassMap>
TestSizeClassMap()71 void TestSizeClassMap() {
72   typedef SizeClassMap SCMap;
73   // SCMap::Print();
74   SCMap::Validate();
75 }
76 
TEST(SanitizerCommon,DefaultSizeClassMap)77 TEST(SanitizerCommon, DefaultSizeClassMap) {
78   TestSizeClassMap<DefaultSizeClassMap>();
79 }
80 
TEST(SanitizerCommon,CompactSizeClassMap)81 TEST(SanitizerCommon, CompactSizeClassMap) {
82   TestSizeClassMap<CompactSizeClassMap>();
83 }
84 
TEST(SanitizerCommon,InternalSizeClassMap)85 TEST(SanitizerCommon, InternalSizeClassMap) {
86   TestSizeClassMap<InternalSizeClassMap>();
87 }
88 
89 template <class Allocator>
TestSizeClassAllocator()90 void TestSizeClassAllocator() {
91   Allocator *a = new Allocator;
92   a->Init();
93   SizeClassAllocatorLocalCache<Allocator> cache;
94   memset(&cache, 0, sizeof(cache));
95   cache.Init(0);
96 
97   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
98     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
99 
100   std::vector<void *> allocated;
101 
102   uptr last_total_allocated = 0;
103   for (int i = 0; i < 3; i++) {
104     // Allocate a bunch of chunks.
105     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
106       uptr size = sizes[s];
107       if (!a->CanAllocate(size, 1)) continue;
108       // printf("s = %ld\n", size);
109       uptr n_iter = std::max((uptr)6, 4000000 / size);
110       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
111       for (uptr i = 0; i < n_iter; i++) {
112         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
113         char *x = (char*)cache.Allocate(a, class_id0);
114         x[0] = 0;
115         x[size - 1] = 0;
116         x[size / 2] = 0;
117         allocated.push_back(x);
118         CHECK_EQ(x, a->GetBlockBegin(x));
119         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
120         CHECK(a->PointerIsMine(x));
121         CHECK(a->PointerIsMine(x + size - 1));
122         CHECK(a->PointerIsMine(x + size / 2));
123         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
124         uptr class_id = a->GetSizeClass(x);
125         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
126         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
127         metadata[0] = reinterpret_cast<uptr>(x) + 1;
128         metadata[1] = 0xABCD;
129       }
130     }
131     // Deallocate all.
132     for (uptr i = 0; i < allocated.size(); i++) {
133       void *x = allocated[i];
134       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
135       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
136       CHECK_EQ(metadata[1], 0xABCD);
137       cache.Deallocate(a, a->GetSizeClass(x), x);
138     }
139     allocated.clear();
140     uptr total_allocated = a->TotalMemoryUsed();
141     if (last_total_allocated == 0)
142       last_total_allocated = total_allocated;
143     CHECK_EQ(last_total_allocated, total_allocated);
144   }
145 
146   // Check that GetBlockBegin never crashes.
147   for (uptr x = 0, step = kAddressSpaceSize / 100000;
148        x < kAddressSpaceSize - step; x += step)
149     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
150       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
151 
152   a->TestOnlyUnmap();
153   delete a;
154 }
155 
156 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64)157 TEST(SanitizerCommon, SizeClassAllocator64) {
158   TestSizeClassAllocator<Allocator64>();
159 }
160 
TEST(SanitizerCommon,SizeClassAllocator64Compact)161 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
162   TestSizeClassAllocator<Allocator64Compact>();
163 }
164 #endif
165 
TEST(SanitizerCommon,SizeClassAllocator32Compact)166 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
167   TestSizeClassAllocator<Allocator32Compact>();
168 }
169 
170 template <class Allocator>
SizeClassAllocatorMetadataStress()171 void SizeClassAllocatorMetadataStress() {
172   Allocator *a = new Allocator;
173   a->Init();
174   SizeClassAllocatorLocalCache<Allocator> cache;
175   memset(&cache, 0, sizeof(cache));
176   cache.Init(0);
177 
178   const uptr kNumAllocs = 1 << 13;
179   void *allocated[kNumAllocs];
180   void *meta[kNumAllocs];
181   for (uptr i = 0; i < kNumAllocs; i++) {
182     void *x = cache.Allocate(a, 1 + i % 50);
183     allocated[i] = x;
184     meta[i] = a->GetMetaData(x);
185   }
186   // Get Metadata kNumAllocs^2 times.
187   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
188     uptr idx = i % kNumAllocs;
189     void *m = a->GetMetaData(allocated[idx]);
190     EXPECT_EQ(m, meta[idx]);
191   }
192   for (uptr i = 0; i < kNumAllocs; i++) {
193     cache.Deallocate(a, 1 + i % 50, allocated[i]);
194   }
195 
196   a->TestOnlyUnmap();
197   delete a;
198 }
199 
200 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)201 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
202   SizeClassAllocatorMetadataStress<Allocator64>();
203 }
204 
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)205 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
206   SizeClassAllocatorMetadataStress<Allocator64Compact>();
207 }
208 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)209 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
210   SizeClassAllocatorMetadataStress<Allocator32Compact>();
211 }
212 
213 template <class Allocator>
SizeClassAllocatorGetBlockBeginStress()214 void SizeClassAllocatorGetBlockBeginStress() {
215   Allocator *a = new Allocator;
216   a->Init();
217   SizeClassAllocatorLocalCache<Allocator> cache;
218   memset(&cache, 0, sizeof(cache));
219   cache.Init(0);
220 
221   uptr max_size_class = Allocator::kNumClasses - 1;
222   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
223   u64 G8 = 1ULL << 33;
224   // Make sure we correctly compute GetBlockBegin() w/o overflow.
225   for (size_t i = 0; i <= G8 / size; i++) {
226     void *x = cache.Allocate(a, max_size_class);
227     void *beg = a->GetBlockBegin(x);
228     // if ((i & (i - 1)) == 0)
229     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
230     EXPECT_EQ(x, beg);
231   }
232 
233   a->TestOnlyUnmap();
234   delete a;
235 }
236 
237 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64GetBlockBegin)238 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
239   SizeClassAllocatorGetBlockBeginStress<Allocator64>();
240 }
TEST(SanitizerCommon,SizeClassAllocator64CompactGetBlockBegin)241 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
242   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
243 }
TEST(SanitizerCommon,SizeClassAllocator32CompactGetBlockBegin)244 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
245   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
246 }
247 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
248 
249 struct TestMapUnmapCallback {
250   static int map_count, unmap_count;
OnMapTestMapUnmapCallback251   void OnMap(uptr p, uptr size) const { map_count++; }
OnUnmapTestMapUnmapCallback252   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
253 };
254 int TestMapUnmapCallback::map_count;
255 int TestMapUnmapCallback::unmap_count;
256 
257 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)258 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
259   TestMapUnmapCallback::map_count = 0;
260   TestMapUnmapCallback::unmap_count = 0;
261   typedef SizeClassAllocator64<
262       kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
263       TestMapUnmapCallback> Allocator64WithCallBack;
264   Allocator64WithCallBack *a = new Allocator64WithCallBack;
265   a->Init();
266   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
267   SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
268   memset(&cache, 0, sizeof(cache));
269   cache.Init(0);
270   AllocatorStats stats;
271   stats.Init();
272   a->AllocateBatch(&stats, &cache, 32);
273   EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
274   a->TestOnlyUnmap();
275   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
276   delete a;
277 }
278 #endif
279 
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)280 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
281   TestMapUnmapCallback::map_count = 0;
282   TestMapUnmapCallback::unmap_count = 0;
283   typedef SizeClassAllocator32<
284       0, kAddressSpaceSize,
285       /*kMetadataSize*/16,
286       CompactSizeClassMap,
287       kRegionSizeLog,
288       FlatByteMap<kFlatByteMapSize>,
289       TestMapUnmapCallback>
290     Allocator32WithCallBack;
291   Allocator32WithCallBack *a = new Allocator32WithCallBack;
292   a->Init();
293   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
294   SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
295   memset(&cache, 0, sizeof(cache));
296   cache.Init(0);
297   AllocatorStats stats;
298   stats.Init();
299   a->AllocateBatch(&stats, &cache, 32);
300   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
301   a->TestOnlyUnmap();
302   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
303   delete a;
304   // fprintf(stderr, "Map: %d Unmap: %d\n",
305   //         TestMapUnmapCallback::map_count,
306   //         TestMapUnmapCallback::unmap_count);
307 }
308 
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)309 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
310   TestMapUnmapCallback::map_count = 0;
311   TestMapUnmapCallback::unmap_count = 0;
312   LargeMmapAllocator<TestMapUnmapCallback> a;
313   a.Init(/* may_return_null */ false);
314   AllocatorStats stats;
315   stats.Init();
316   void *x = a.Allocate(&stats, 1 << 20, 1);
317   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
318   a.Deallocate(&stats, x);
319   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
320 }
321 
322 template<class Allocator>
FailInAssertionOnOOM()323 void FailInAssertionOnOOM() {
324   Allocator a;
325   a.Init();
326   SizeClassAllocatorLocalCache<Allocator> cache;
327   memset(&cache, 0, sizeof(cache));
328   cache.Init(0);
329   AllocatorStats stats;
330   stats.Init();
331   for (int i = 0; i < 1000000; i++) {
332     a.AllocateBatch(&stats, &cache, 52);
333   }
334 
335   a.TestOnlyUnmap();
336 }
337 
338 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Overflow)339 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
340   EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
341 }
342 #endif
343 
TEST(SanitizerCommon,LargeMmapAllocator)344 TEST(SanitizerCommon, LargeMmapAllocator) {
345   LargeMmapAllocator<> a;
346   a.Init(/* may_return_null */ false);
347   AllocatorStats stats;
348   stats.Init();
349 
350   static const int kNumAllocs = 1000;
351   char *allocated[kNumAllocs];
352   static const uptr size = 4000;
353   // Allocate some.
354   for (int i = 0; i < kNumAllocs; i++) {
355     allocated[i] = (char *)a.Allocate(&stats, size, 1);
356     CHECK(a.PointerIsMine(allocated[i]));
357   }
358   // Deallocate all.
359   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
360   for (int i = 0; i < kNumAllocs; i++) {
361     char *p = allocated[i];
362     CHECK(a.PointerIsMine(p));
363     a.Deallocate(&stats, p);
364   }
365   // Check that non left.
366   CHECK_EQ(a.TotalMemoryUsed(), 0);
367 
368   // Allocate some more, also add metadata.
369   for (int i = 0; i < kNumAllocs; i++) {
370     char *x = (char *)a.Allocate(&stats, size, 1);
371     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
372     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
373     *meta = i;
374     allocated[i] = x;
375   }
376   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
377     char *p = allocated[i % kNumAllocs];
378     CHECK(a.PointerIsMine(p));
379     CHECK(a.PointerIsMine(p + 2000));
380   }
381   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
382   // Deallocate all in reverse order.
383   for (int i = 0; i < kNumAllocs; i++) {
384     int idx = kNumAllocs - i - 1;
385     char *p = allocated[idx];
386     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
387     CHECK_EQ(*meta, idx);
388     CHECK(a.PointerIsMine(p));
389     a.Deallocate(&stats, p);
390   }
391   CHECK_EQ(a.TotalMemoryUsed(), 0);
392 
393   // Test alignments.
394   uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
395   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
396     const uptr kNumAlignedAllocs = 100;
397     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
398       uptr size = ((i % 10) + 1) * 4096;
399       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
400       CHECK_EQ(p, a.GetBlockBegin(p));
401       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
402       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
403       CHECK_EQ(0, (uptr)allocated[i] % alignment);
404       p[0] = p[size - 1] = 0;
405     }
406     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
407       a.Deallocate(&stats, allocated[i]);
408     }
409   }
410 
411   // Regression test for boundary condition in GetBlockBegin().
412   uptr page_size = GetPageSizeCached();
413   char *p = (char *)a.Allocate(&stats, page_size, 1);
414   CHECK_EQ(p, a.GetBlockBegin(p));
415   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
416   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
417   a.Deallocate(&stats, p);
418 }
419 
420 template
421 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
TestCombinedAllocator()422 void TestCombinedAllocator() {
423   typedef
424       CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
425       Allocator;
426   Allocator *a = new Allocator;
427   a->Init(/* may_return_null */ true);
428 
429   AllocatorCache cache;
430   memset(&cache, 0, sizeof(cache));
431   a->InitCache(&cache);
432 
433   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
434   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
435   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
436   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
437   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
438 
439   // Set to false
440   a->SetMayReturnNull(false);
441   EXPECT_DEATH(a->Allocate(&cache, -1, 1),
442                "allocator is terminating the process");
443 
444   const uptr kNumAllocs = 100000;
445   const uptr kNumIter = 10;
446   for (uptr iter = 0; iter < kNumIter; iter++) {
447     std::vector<void*> allocated;
448     for (uptr i = 0; i < kNumAllocs; i++) {
449       uptr size = (i % (1 << 14)) + 1;
450       if ((i % 1024) == 0)
451         size = 1 << (10 + (i % 14));
452       void *x = a->Allocate(&cache, size, 1);
453       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
454       CHECK_EQ(*meta, 0);
455       *meta = size;
456       allocated.push_back(x);
457     }
458 
459     random_shuffle(allocated.begin(), allocated.end());
460 
461     for (uptr i = 0; i < kNumAllocs; i++) {
462       void *x = allocated[i];
463       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
464       CHECK_NE(*meta, 0);
465       CHECK(a->PointerIsMine(x));
466       *meta = 0;
467       a->Deallocate(&cache, x);
468     }
469     allocated.clear();
470     a->SwallowCache(&cache);
471   }
472   a->DestroyCache(&cache);
473   a->TestOnlyUnmap();
474 }
475 
476 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,CombinedAllocator64)477 TEST(SanitizerCommon, CombinedAllocator64) {
478   TestCombinedAllocator<Allocator64,
479       LargeMmapAllocator<>,
480       SizeClassAllocatorLocalCache<Allocator64> > ();
481 }
482 
TEST(SanitizerCommon,CombinedAllocator64Compact)483 TEST(SanitizerCommon, CombinedAllocator64Compact) {
484   TestCombinedAllocator<Allocator64Compact,
485       LargeMmapAllocator<>,
486       SizeClassAllocatorLocalCache<Allocator64Compact> > ();
487 }
488 #endif
489 
TEST(SanitizerCommon,CombinedAllocator32Compact)490 TEST(SanitizerCommon, CombinedAllocator32Compact) {
491   TestCombinedAllocator<Allocator32Compact,
492       LargeMmapAllocator<>,
493       SizeClassAllocatorLocalCache<Allocator32Compact> > ();
494 }
495 
496 template <class AllocatorCache>
TestSizeClassAllocatorLocalCache()497 void TestSizeClassAllocatorLocalCache() {
498   AllocatorCache cache;
499   typedef typename AllocatorCache::Allocator Allocator;
500   Allocator *a = new Allocator();
501 
502   a->Init();
503   memset(&cache, 0, sizeof(cache));
504   cache.Init(0);
505 
506   const uptr kNumAllocs = 10000;
507   const int kNumIter = 100;
508   uptr saved_total = 0;
509   for (int class_id = 1; class_id <= 5; class_id++) {
510     for (int it = 0; it < kNumIter; it++) {
511       void *allocated[kNumAllocs];
512       for (uptr i = 0; i < kNumAllocs; i++) {
513         allocated[i] = cache.Allocate(a, class_id);
514       }
515       for (uptr i = 0; i < kNumAllocs; i++) {
516         cache.Deallocate(a, class_id, allocated[i]);
517       }
518       cache.Drain(a);
519       uptr total_allocated = a->TotalMemoryUsed();
520       if (it)
521         CHECK_EQ(saved_total, total_allocated);
522       saved_total = total_allocated;
523     }
524   }
525 
526   a->TestOnlyUnmap();
527   delete a;
528 }
529 
530 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)531 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
532   TestSizeClassAllocatorLocalCache<
533       SizeClassAllocatorLocalCache<Allocator64> >();
534 }
535 
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)536 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
537   TestSizeClassAllocatorLocalCache<
538       SizeClassAllocatorLocalCache<Allocator64Compact> >();
539 }
540 #endif
541 
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)542 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
543   TestSizeClassAllocatorLocalCache<
544       SizeClassAllocatorLocalCache<Allocator32Compact> >();
545 }
546 
547 #if SANITIZER_CAN_USE_ALLOCATOR64
548 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
549 static AllocatorCache static_allocator_cache;
550 
AllocatorLeakTestWorker(void * arg)551 void *AllocatorLeakTestWorker(void *arg) {
552   typedef AllocatorCache::Allocator Allocator;
553   Allocator *a = (Allocator*)(arg);
554   static_allocator_cache.Allocate(a, 10);
555   static_allocator_cache.Drain(a);
556   return 0;
557 }
558 
TEST(SanitizerCommon,AllocatorLeakTest)559 TEST(SanitizerCommon, AllocatorLeakTest) {
560   typedef AllocatorCache::Allocator Allocator;
561   Allocator a;
562   a.Init();
563   uptr total_used_memory = 0;
564   for (int i = 0; i < 100; i++) {
565     pthread_t t;
566     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
567     PTHREAD_JOIN(t, 0);
568     if (i == 0)
569       total_used_memory = a.TotalMemoryUsed();
570     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
571   }
572 
573   a.TestOnlyUnmap();
574 }
575 
576 // Struct which is allocated to pass info to new threads.  The new thread frees
577 // it.
578 struct NewThreadParams {
579   AllocatorCache *thread_cache;
580   AllocatorCache::Allocator *allocator;
581   uptr class_id;
582 };
583 
584 // Called in a new thread.  Just frees its argument.
DeallocNewThreadWorker(void * arg)585 static void *DeallocNewThreadWorker(void *arg) {
586   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
587   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
588   return NULL;
589 }
590 
591 // The allocator cache is supposed to be POD and zero initialized.  We should be
592 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)593 TEST(Allocator, AllocatorCacheDeallocNewThread) {
594   AllocatorCache::Allocator allocator;
595   allocator.Init();
596   AllocatorCache main_cache;
597   AllocatorCache child_cache;
598   memset(&main_cache, 0, sizeof(main_cache));
599   memset(&child_cache, 0, sizeof(child_cache));
600 
601   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
602   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
603       main_cache.Allocate(&allocator, class_id));
604   params->thread_cache = &child_cache;
605   params->allocator = &allocator;
606   params->class_id = class_id;
607   pthread_t t;
608   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
609   PTHREAD_JOIN(t, 0);
610 
611   allocator.TestOnlyUnmap();
612 }
613 #endif
614 
TEST(Allocator,Basic)615 TEST(Allocator, Basic) {
616   char *p = (char*)InternalAlloc(10);
617   EXPECT_NE(p, (char*)0);
618   char *p2 = (char*)InternalAlloc(20);
619   EXPECT_NE(p2, (char*)0);
620   EXPECT_NE(p2, p);
621   InternalFree(p);
622   InternalFree(p2);
623 }
624 
TEST(Allocator,Stress)625 TEST(Allocator, Stress) {
626   const int kCount = 1000;
627   char *ptrs[kCount];
628   unsigned rnd = 42;
629   for (int i = 0; i < kCount; i++) {
630     uptr sz = my_rand_r(&rnd) % 1000;
631     char *p = (char*)InternalAlloc(sz);
632     EXPECT_NE(p, (char*)0);
633     ptrs[i] = p;
634   }
635   for (int i = 0; i < kCount; i++) {
636     InternalFree(ptrs[i]);
637   }
638 }
639 
TEST(Allocator,LargeAlloc)640 TEST(Allocator, LargeAlloc) {
641   void *p = InternalAlloc(10 << 20);
642   InternalFree(p);
643 }
644 
TEST(Allocator,ScopedBuffer)645 TEST(Allocator, ScopedBuffer) {
646   const int kSize = 512;
647   {
648     InternalScopedBuffer<int> int_buf(kSize);
649     EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
650   }
651   InternalScopedBuffer<char> char_buf(kSize);
652   EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
653   internal_memset(char_buf.data(), 'c', kSize);
654   for (int i = 0; i < kSize; i++) {
655     EXPECT_EQ('c', char_buf[i]);
656   }
657 }
658 
IterationTestCallback(uptr chunk,void * arg)659 void IterationTestCallback(uptr chunk, void *arg) {
660   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
661 }
662 
663 template <class Allocator>
TestSizeClassAllocatorIteration()664 void TestSizeClassAllocatorIteration() {
665   Allocator *a = new Allocator;
666   a->Init();
667   SizeClassAllocatorLocalCache<Allocator> cache;
668   memset(&cache, 0, sizeof(cache));
669   cache.Init(0);
670 
671   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
672     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
673 
674   std::vector<void *> allocated;
675 
676   // Allocate a bunch of chunks.
677   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
678     uptr size = sizes[s];
679     if (!a->CanAllocate(size, 1)) continue;
680     // printf("s = %ld\n", size);
681     uptr n_iter = std::max((uptr)6, 80000 / size);
682     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
683     for (uptr j = 0; j < n_iter; j++) {
684       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
685       void *x = cache.Allocate(a, class_id0);
686       allocated.push_back(x);
687     }
688   }
689 
690   std::set<uptr> reported_chunks;
691   a->ForceLock();
692   a->ForEachChunk(IterationTestCallback, &reported_chunks);
693   a->ForceUnlock();
694 
695   for (uptr i = 0; i < allocated.size(); i++) {
696     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
697     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
698               reported_chunks.end());
699   }
700 
701   a->TestOnlyUnmap();
702   delete a;
703 }
704 
705 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Iteration)706 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
707   TestSizeClassAllocatorIteration<Allocator64>();
708 }
709 #endif
710 
TEST(SanitizerCommon,SizeClassAllocator32Iteration)711 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
712   TestSizeClassAllocatorIteration<Allocator32Compact>();
713 }
714 
TEST(SanitizerCommon,LargeMmapAllocatorIteration)715 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
716   LargeMmapAllocator<> a;
717   a.Init(/* may_return_null */ false);
718   AllocatorStats stats;
719   stats.Init();
720 
721   static const uptr kNumAllocs = 1000;
722   char *allocated[kNumAllocs];
723   static const uptr size = 40;
724   // Allocate some.
725   for (uptr i = 0; i < kNumAllocs; i++)
726     allocated[i] = (char *)a.Allocate(&stats, size, 1);
727 
728   std::set<uptr> reported_chunks;
729   a.ForceLock();
730   a.ForEachChunk(IterationTestCallback, &reported_chunks);
731   a.ForceUnlock();
732 
733   for (uptr i = 0; i < kNumAllocs; i++) {
734     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
735     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
736               reported_chunks.end());
737   }
738   for (uptr i = 0; i < kNumAllocs; i++)
739     a.Deallocate(&stats, allocated[i]);
740 }
741 
TEST(SanitizerCommon,LargeMmapAllocatorBlockBegin)742 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
743   LargeMmapAllocator<> a;
744   a.Init(/* may_return_null */ false);
745   AllocatorStats stats;
746   stats.Init();
747 
748   static const uptr kNumAllocs = 1024;
749   static const uptr kNumExpectedFalseLookups = 10000000;
750   char *allocated[kNumAllocs];
751   static const uptr size = 4096;
752   // Allocate some.
753   for (uptr i = 0; i < kNumAllocs; i++) {
754     allocated[i] = (char *)a.Allocate(&stats, size, 1);
755   }
756 
757   a.ForceLock();
758   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
759     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
760     char *p1 = allocated[i % kNumAllocs];
761     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
762     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
763     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
764     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
765   }
766 
767   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
768     void *p = reinterpret_cast<void *>(i % 1024);
769     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
770     p = reinterpret_cast<void *>(~0L - (i % 1024));
771     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
772   }
773   a.ForceUnlock();
774 
775   for (uptr i = 0; i < kNumAllocs; i++)
776     a.Deallocate(&stats, allocated[i]);
777 }
778 
779 
780 #if SANITIZER_CAN_USE_ALLOCATOR64
781 // Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon,SizeClassAllocator64PopulateFreeListOOM)782 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
783   // In a world where regions are small and chunks are huge...
784   typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
785   typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
786                                SpecialSizeClassMap> SpecialAllocator64;
787   const uptr kRegionSize =
788       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
789   SpecialAllocator64 *a = new SpecialAllocator64;
790   a->Init();
791   SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
792   memset(&cache, 0, sizeof(cache));
793   cache.Init(0);
794 
795   // ...one man is on a mission to overflow a region with a series of
796   // successive allocations.
797   const uptr kClassID = 107;
798   const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
799   ASSERT_LT(2 * kAllocationSize, kRegionSize);
800   ASSERT_GT(3 * kAllocationSize, kRegionSize);
801   cache.Allocate(a, kClassID);
802   EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
803                "The process has exhausted");
804   a->TestOnlyUnmap();
805   delete a;
806 }
807 #endif
808 
TEST(SanitizerCommon,TwoLevelByteMap)809 TEST(SanitizerCommon, TwoLevelByteMap) {
810   const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
811   const u64 n = kSize1 * kSize2;
812   TwoLevelByteMap<kSize1, kSize2> m;
813   m.TestOnlyInit();
814   for (u64 i = 0; i < n; i += 7) {
815     m.set(i, (i % 100) + 1);
816   }
817   for (u64 j = 0; j < n; j++) {
818     if (j % 7)
819       EXPECT_EQ(m[j], 0);
820     else
821       EXPECT_EQ(m[j], (j % 100) + 1);
822   }
823 
824   m.TestOnlyUnmap();
825 }
826 
827 
828 typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
829 
830 struct TestByteMapParam {
831   TestByteMap *m;
832   size_t shard;
833   size_t num_shards;
834 };
835 
TwoLevelByteMapUserThread(void * param)836 void *TwoLevelByteMapUserThread(void *param) {
837   TestByteMapParam *p = (TestByteMapParam*)param;
838   for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
839     size_t val = (i % 100) + 1;
840     p->m->set(i, val);
841     EXPECT_EQ((*p->m)[i], val);
842   }
843   return 0;
844 }
845 
TEST(SanitizerCommon,ThreadedTwoLevelByteMap)846 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
847   TestByteMap m;
848   m.TestOnlyInit();
849   TestMapUnmapCallback::map_count = 0;
850   TestMapUnmapCallback::unmap_count = 0;
851   static const int kNumThreads = 4;
852   pthread_t t[kNumThreads];
853   TestByteMapParam p[kNumThreads];
854   for (int i = 0; i < kNumThreads; i++) {
855     p[i].m = &m;
856     p[i].shard = i;
857     p[i].num_shards = kNumThreads;
858     PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
859   }
860   for (int i = 0; i < kNumThreads; i++) {
861     PTHREAD_JOIN(t[i], 0);
862   }
863   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
864   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
865   m.TestOnlyUnmap();
866   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
867   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
868 }
869 
870 #endif  // #if !SANITIZER_DEBUG
871