1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_common.h"
16
17 #include "sanitizer_test_utils.h"
18
19 #include "gtest/gtest.h"
20
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include <algorithm>
24 #include <vector>
25 #include <set>
26
27 // Too slow for debug build
28 #if TSAN_DEBUG == 0
29
30 #if SANITIZER_WORDSIZE == 64
31 static const uptr kAllocatorSpace = 0x700000000000ULL;
32 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
33 static const u64 kAddressSpaceSize = 1ULL << 47;
34
35 typedef SizeClassAllocator64<
36 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
37
38 typedef SizeClassAllocator64<
39 kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
40 #else
41 static const u64 kAddressSpaceSize = 1ULL << 32;
42 #endif
43
44 typedef SizeClassAllocator32<
45 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
46
47 template <class SizeClassMap>
TestSizeClassMap()48 void TestSizeClassMap() {
49 typedef SizeClassMap SCMap;
50 // SCMap::Print();
51 SCMap::Validate();
52 }
53
TEST(SanitizerCommon,DefaultSizeClassMap)54 TEST(SanitizerCommon, DefaultSizeClassMap) {
55 TestSizeClassMap<DefaultSizeClassMap>();
56 }
57
TEST(SanitizerCommon,CompactSizeClassMap)58 TEST(SanitizerCommon, CompactSizeClassMap) {
59 TestSizeClassMap<CompactSizeClassMap>();
60 }
61
62 template <class Allocator>
TestSizeClassAllocator()63 void TestSizeClassAllocator() {
64 Allocator *a = new Allocator;
65 a->Init();
66 SizeClassAllocatorLocalCache<Allocator> cache;
67 memset(&cache, 0, sizeof(cache));
68 cache.Init(0);
69
70 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
71 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
72
73 std::vector<void *> allocated;
74
75 uptr last_total_allocated = 0;
76 for (int i = 0; i < 3; i++) {
77 // Allocate a bunch of chunks.
78 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
79 uptr size = sizes[s];
80 if (!a->CanAllocate(size, 1)) continue;
81 // printf("s = %ld\n", size);
82 uptr n_iter = std::max((uptr)6, 8000000 / size);
83 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
84 for (uptr i = 0; i < n_iter; i++) {
85 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
86 char *x = (char*)cache.Allocate(a, class_id0);
87 x[0] = 0;
88 x[size - 1] = 0;
89 x[size / 2] = 0;
90 allocated.push_back(x);
91 CHECK_EQ(x, a->GetBlockBegin(x));
92 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
93 CHECK(a->PointerIsMine(x));
94 CHECK(a->PointerIsMine(x + size - 1));
95 CHECK(a->PointerIsMine(x + size / 2));
96 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
97 uptr class_id = a->GetSizeClass(x);
98 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
99 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
100 metadata[0] = reinterpret_cast<uptr>(x) + 1;
101 metadata[1] = 0xABCD;
102 }
103 }
104 // Deallocate all.
105 for (uptr i = 0; i < allocated.size(); i++) {
106 void *x = allocated[i];
107 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
108 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
109 CHECK_EQ(metadata[1], 0xABCD);
110 cache.Deallocate(a, a->GetSizeClass(x), x);
111 }
112 allocated.clear();
113 uptr total_allocated = a->TotalMemoryUsed();
114 if (last_total_allocated == 0)
115 last_total_allocated = total_allocated;
116 CHECK_EQ(last_total_allocated, total_allocated);
117 }
118
119 // Check that GetBlockBegin never crashes.
120 for (uptr x = 0, step = kAddressSpaceSize / 100000;
121 x < kAddressSpaceSize - step; x += step)
122 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
123 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
124
125 a->TestOnlyUnmap();
126 delete a;
127 }
128
129 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64)130 TEST(SanitizerCommon, SizeClassAllocator64) {
131 TestSizeClassAllocator<Allocator64>();
132 }
133
TEST(SanitizerCommon,SizeClassAllocator64Compact)134 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
135 TestSizeClassAllocator<Allocator64Compact>();
136 }
137 #endif
138
TEST(SanitizerCommon,SizeClassAllocator32Compact)139 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
140 TestSizeClassAllocator<Allocator32Compact>();
141 }
142
143 template <class Allocator>
SizeClassAllocatorMetadataStress()144 void SizeClassAllocatorMetadataStress() {
145 Allocator *a = new Allocator;
146 a->Init();
147 SizeClassAllocatorLocalCache<Allocator> cache;
148 memset(&cache, 0, sizeof(cache));
149 cache.Init(0);
150 static volatile void *sink;
151
152 const uptr kNumAllocs = 10000;
153 void *allocated[kNumAllocs];
154 for (uptr i = 0; i < kNumAllocs; i++) {
155 void *x = cache.Allocate(a, 1 + i % 50);
156 allocated[i] = x;
157 }
158 // Get Metadata kNumAllocs^2 times.
159 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
160 sink = a->GetMetaData(allocated[i % kNumAllocs]);
161 }
162 for (uptr i = 0; i < kNumAllocs; i++) {
163 cache.Deallocate(a, 1 + i % 50, allocated[i]);
164 }
165
166 a->TestOnlyUnmap();
167 (void)sink;
168 delete a;
169 }
170
171 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)172 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
173 SizeClassAllocatorMetadataStress<Allocator64>();
174 }
175
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)176 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
177 SizeClassAllocatorMetadataStress<Allocator64Compact>();
178 }
179 #endif
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)180 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
181 SizeClassAllocatorMetadataStress<Allocator32Compact>();
182 }
183
184 struct TestMapUnmapCallback {
185 static int map_count, unmap_count;
OnMapTestMapUnmapCallback186 void OnMap(uptr p, uptr size) const { map_count++; }
OnUnmapTestMapUnmapCallback187 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
188 };
189 int TestMapUnmapCallback::map_count;
190 int TestMapUnmapCallback::unmap_count;
191
192 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)193 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
194 TestMapUnmapCallback::map_count = 0;
195 TestMapUnmapCallback::unmap_count = 0;
196 typedef SizeClassAllocator64<
197 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
198 TestMapUnmapCallback> Allocator64WithCallBack;
199 Allocator64WithCallBack *a = new Allocator64WithCallBack;
200 a->Init();
201 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
202 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
203 memset(&cache, 0, sizeof(cache));
204 cache.Init(0);
205 AllocatorStats stats;
206 stats.Init();
207 a->AllocateBatch(&stats, &cache, 32);
208 EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
209 a->TestOnlyUnmap();
210 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
211 delete a;
212 }
213 #endif
214
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)215 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
216 TestMapUnmapCallback::map_count = 0;
217 TestMapUnmapCallback::unmap_count = 0;
218 typedef SizeClassAllocator32<
219 0, kAddressSpaceSize, 16, CompactSizeClassMap,
220 TestMapUnmapCallback> Allocator32WithCallBack;
221 Allocator32WithCallBack *a = new Allocator32WithCallBack;
222 a->Init();
223 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
224 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
225 memset(&cache, 0, sizeof(cache));
226 cache.Init(0);
227 AllocatorStats stats;
228 stats.Init();
229 a->AllocateBatch(&stats, &cache, 32);
230 EXPECT_EQ(TestMapUnmapCallback::map_count, 2); // alloc.
231 a->TestOnlyUnmap();
232 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2); // The whole thing + alloc.
233 delete a;
234 // fprintf(stderr, "Map: %d Unmap: %d\n",
235 // TestMapUnmapCallback::map_count,
236 // TestMapUnmapCallback::unmap_count);
237 }
238
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)239 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
240 TestMapUnmapCallback::map_count = 0;
241 TestMapUnmapCallback::unmap_count = 0;
242 LargeMmapAllocator<TestMapUnmapCallback> a;
243 a.Init();
244 AllocatorStats stats;
245 stats.Init();
246 void *x = a.Allocate(&stats, 1 << 20, 1);
247 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
248 a.Deallocate(&stats, x);
249 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
250 }
251
252 template<class Allocator>
FailInAssertionOnOOM()253 void FailInAssertionOnOOM() {
254 Allocator a;
255 a.Init();
256 SizeClassAllocatorLocalCache<Allocator> cache;
257 memset(&cache, 0, sizeof(cache));
258 cache.Init(0);
259 AllocatorStats stats;
260 stats.Init();
261 for (int i = 0; i < 1000000; i++) {
262 a.AllocateBatch(&stats, &cache, 52);
263 }
264
265 a.TestOnlyUnmap();
266 }
267
268 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64Overflow)269 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
270 EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
271 }
272 #endif
273
TEST(SanitizerCommon,LargeMmapAllocator)274 TEST(SanitizerCommon, LargeMmapAllocator) {
275 LargeMmapAllocator<> a;
276 a.Init();
277 AllocatorStats stats;
278 stats.Init();
279
280 static const int kNumAllocs = 1000;
281 char *allocated[kNumAllocs];
282 static const uptr size = 4000;
283 // Allocate some.
284 for (int i = 0; i < kNumAllocs; i++) {
285 allocated[i] = (char *)a.Allocate(&stats, size, 1);
286 CHECK(a.PointerIsMine(allocated[i]));
287 }
288 // Deallocate all.
289 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
290 for (int i = 0; i < kNumAllocs; i++) {
291 char *p = allocated[i];
292 CHECK(a.PointerIsMine(p));
293 a.Deallocate(&stats, p);
294 }
295 // Check that non left.
296 CHECK_EQ(a.TotalMemoryUsed(), 0);
297
298 // Allocate some more, also add metadata.
299 for (int i = 0; i < kNumAllocs; i++) {
300 char *x = (char *)a.Allocate(&stats, size, 1);
301 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
302 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
303 *meta = i;
304 allocated[i] = x;
305 }
306 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
307 char *p = allocated[i % kNumAllocs];
308 CHECK(a.PointerIsMine(p));
309 CHECK(a.PointerIsMine(p + 2000));
310 }
311 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
312 // Deallocate all in reverse order.
313 for (int i = 0; i < kNumAllocs; i++) {
314 int idx = kNumAllocs - i - 1;
315 char *p = allocated[idx];
316 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
317 CHECK_EQ(*meta, idx);
318 CHECK(a.PointerIsMine(p));
319 a.Deallocate(&stats, p);
320 }
321 CHECK_EQ(a.TotalMemoryUsed(), 0);
322
323 // Test alignments.
324 uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
325 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
326 const uptr kNumAlignedAllocs = 100;
327 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
328 uptr size = ((i % 10) + 1) * 4096;
329 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
330 CHECK_EQ(p, a.GetBlockBegin(p));
331 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
332 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
333 CHECK_EQ(0, (uptr)allocated[i] % alignment);
334 p[0] = p[size - 1] = 0;
335 }
336 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
337 a.Deallocate(&stats, allocated[i]);
338 }
339 }
340 }
341
342 template
343 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
TestCombinedAllocator()344 void TestCombinedAllocator() {
345 typedef
346 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
347 Allocator;
348 Allocator *a = new Allocator;
349 a->Init();
350
351 AllocatorCache cache;
352 memset(&cache, 0, sizeof(cache));
353 a->InitCache(&cache);
354
355 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
356 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
357 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
358 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
359 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
360
361 const uptr kNumAllocs = 100000;
362 const uptr kNumIter = 10;
363 for (uptr iter = 0; iter < kNumIter; iter++) {
364 std::vector<void*> allocated;
365 for (uptr i = 0; i < kNumAllocs; i++) {
366 uptr size = (i % (1 << 14)) + 1;
367 if ((i % 1024) == 0)
368 size = 1 << (10 + (i % 14));
369 void *x = a->Allocate(&cache, size, 1);
370 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
371 CHECK_EQ(*meta, 0);
372 *meta = size;
373 allocated.push_back(x);
374 }
375
376 random_shuffle(allocated.begin(), allocated.end());
377
378 for (uptr i = 0; i < kNumAllocs; i++) {
379 void *x = allocated[i];
380 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
381 CHECK_NE(*meta, 0);
382 CHECK(a->PointerIsMine(x));
383 *meta = 0;
384 a->Deallocate(&cache, x);
385 }
386 allocated.clear();
387 a->SwallowCache(&cache);
388 }
389 a->DestroyCache(&cache);
390 a->TestOnlyUnmap();
391 }
392
393 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,CombinedAllocator64)394 TEST(SanitizerCommon, CombinedAllocator64) {
395 TestCombinedAllocator<Allocator64,
396 LargeMmapAllocator<>,
397 SizeClassAllocatorLocalCache<Allocator64> > ();
398 }
399
TEST(SanitizerCommon,CombinedAllocator64Compact)400 TEST(SanitizerCommon, CombinedAllocator64Compact) {
401 TestCombinedAllocator<Allocator64Compact,
402 LargeMmapAllocator<>,
403 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
404 }
405 #endif
406
TEST(SanitizerCommon,CombinedAllocator32Compact)407 TEST(SanitizerCommon, CombinedAllocator32Compact) {
408 TestCombinedAllocator<Allocator32Compact,
409 LargeMmapAllocator<>,
410 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
411 }
412
413 template <class AllocatorCache>
TestSizeClassAllocatorLocalCache()414 void TestSizeClassAllocatorLocalCache() {
415 AllocatorCache cache;
416 typedef typename AllocatorCache::Allocator Allocator;
417 Allocator *a = new Allocator();
418
419 a->Init();
420 memset(&cache, 0, sizeof(cache));
421 cache.Init(0);
422
423 const uptr kNumAllocs = 10000;
424 const int kNumIter = 100;
425 uptr saved_total = 0;
426 for (int class_id = 1; class_id <= 5; class_id++) {
427 for (int it = 0; it < kNumIter; it++) {
428 void *allocated[kNumAllocs];
429 for (uptr i = 0; i < kNumAllocs; i++) {
430 allocated[i] = cache.Allocate(a, class_id);
431 }
432 for (uptr i = 0; i < kNumAllocs; i++) {
433 cache.Deallocate(a, class_id, allocated[i]);
434 }
435 cache.Drain(a);
436 uptr total_allocated = a->TotalMemoryUsed();
437 if (it)
438 CHECK_EQ(saved_total, total_allocated);
439 saved_total = total_allocated;
440 }
441 }
442
443 a->TestOnlyUnmap();
444 delete a;
445 }
446
447 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)448 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
449 TestSizeClassAllocatorLocalCache<
450 SizeClassAllocatorLocalCache<Allocator64> >();
451 }
452
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)453 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
454 TestSizeClassAllocatorLocalCache<
455 SizeClassAllocatorLocalCache<Allocator64Compact> >();
456 }
457 #endif
458
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)459 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
460 TestSizeClassAllocatorLocalCache<
461 SizeClassAllocatorLocalCache<Allocator32Compact> >();
462 }
463
464 #if SANITIZER_WORDSIZE == 64
465 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
466 static AllocatorCache static_allocator_cache;
467
AllocatorLeakTestWorker(void * arg)468 void *AllocatorLeakTestWorker(void *arg) {
469 typedef AllocatorCache::Allocator Allocator;
470 Allocator *a = (Allocator*)(arg);
471 static_allocator_cache.Allocate(a, 10);
472 static_allocator_cache.Drain(a);
473 return 0;
474 }
475
TEST(SanitizerCommon,AllocatorLeakTest)476 TEST(SanitizerCommon, AllocatorLeakTest) {
477 typedef AllocatorCache::Allocator Allocator;
478 Allocator a;
479 a.Init();
480 uptr total_used_memory = 0;
481 for (int i = 0; i < 100; i++) {
482 pthread_t t;
483 EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
484 EXPECT_EQ(0, pthread_join(t, 0));
485 if (i == 0)
486 total_used_memory = a.TotalMemoryUsed();
487 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
488 }
489
490 a.TestOnlyUnmap();
491 }
492
493 // Struct which is allocated to pass info to new threads. The new thread frees
494 // it.
495 struct NewThreadParams {
496 AllocatorCache *thread_cache;
497 AllocatorCache::Allocator *allocator;
498 uptr class_id;
499 };
500
501 // Called in a new thread. Just frees its argument.
DeallocNewThreadWorker(void * arg)502 static void *DeallocNewThreadWorker(void *arg) {
503 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
504 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
505 return NULL;
506 }
507
508 // The allocator cache is supposed to be POD and zero initialized. We should be
509 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)510 TEST(Allocator, AllocatorCacheDeallocNewThread) {
511 AllocatorCache::Allocator allocator;
512 allocator.Init();
513 AllocatorCache main_cache;
514 AllocatorCache child_cache;
515 memset(&main_cache, 0, sizeof(main_cache));
516 memset(&child_cache, 0, sizeof(child_cache));
517
518 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
519 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
520 main_cache.Allocate(&allocator, class_id));
521 params->thread_cache = &child_cache;
522 params->allocator = &allocator;
523 params->class_id = class_id;
524 pthread_t t;
525 EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
526 EXPECT_EQ(0, pthread_join(t, 0));
527 }
528 #endif
529
TEST(Allocator,Basic)530 TEST(Allocator, Basic) {
531 char *p = (char*)InternalAlloc(10);
532 EXPECT_NE(p, (char*)0);
533 char *p2 = (char*)InternalAlloc(20);
534 EXPECT_NE(p2, (char*)0);
535 EXPECT_NE(p2, p);
536 InternalFree(p);
537 InternalFree(p2);
538 }
539
TEST(Allocator,Stress)540 TEST(Allocator, Stress) {
541 const int kCount = 1000;
542 char *ptrs[kCount];
543 unsigned rnd = 42;
544 for (int i = 0; i < kCount; i++) {
545 uptr sz = my_rand_r(&rnd) % 1000;
546 char *p = (char*)InternalAlloc(sz);
547 EXPECT_NE(p, (char*)0);
548 ptrs[i] = p;
549 }
550 for (int i = 0; i < kCount; i++) {
551 InternalFree(ptrs[i]);
552 }
553 }
554
TEST(Allocator,ScopedBuffer)555 TEST(Allocator, ScopedBuffer) {
556 const int kSize = 512;
557 {
558 InternalScopedBuffer<int> int_buf(kSize);
559 EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
560 }
561 InternalScopedBuffer<char> char_buf(kSize);
562 EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
563 internal_memset(char_buf.data(), 'c', kSize);
564 for (int i = 0; i < kSize; i++) {
565 EXPECT_EQ('c', char_buf[i]);
566 }
567 }
568
569 class IterationTestCallback {
570 public:
IterationTestCallback(std::set<void * > * chunks)571 explicit IterationTestCallback(std::set<void *> *chunks)
572 : chunks_(chunks) {}
operator ()(void * chunk) const573 void operator()(void *chunk) const {
574 chunks_->insert(chunk);
575 }
576 private:
577 std::set<void *> *chunks_;
578 };
579
580 template <class Allocator>
TestSizeClassAllocatorIteration()581 void TestSizeClassAllocatorIteration() {
582 Allocator *a = new Allocator;
583 a->Init();
584 SizeClassAllocatorLocalCache<Allocator> cache;
585 memset(&cache, 0, sizeof(cache));
586 cache.Init(0);
587
588 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
589 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
590
591 std::vector<void *> allocated;
592
593 // Allocate a bunch of chunks.
594 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
595 uptr size = sizes[s];
596 if (!a->CanAllocate(size, 1)) continue;
597 // printf("s = %ld\n", size);
598 uptr n_iter = std::max((uptr)6, 80000 / size);
599 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
600 for (uptr j = 0; j < n_iter; j++) {
601 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
602 void *x = cache.Allocate(a, class_id0);
603 allocated.push_back(x);
604 }
605 }
606
607 std::set<void *> reported_chunks;
608 IterationTestCallback callback(&reported_chunks);
609 a->ForceLock();
610 a->ForEachChunk(callback);
611 a->ForceUnlock();
612
613 for (uptr i = 0; i < allocated.size(); i++) {
614 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
615 ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
616 }
617
618 a->TestOnlyUnmap();
619 delete a;
620 }
621
622 #if SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon,SizeClassAllocator64Iteration)623 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
624 TestSizeClassAllocatorIteration<Allocator64>();
625 }
626 #endif
627
TEST(SanitizerCommon,SizeClassAllocator32Iteration)628 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
629 TestSizeClassAllocatorIteration<Allocator32Compact>();
630 }
631
632
TEST(SanitizerCommon,LargeMmapAllocatorIteration)633 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
634 LargeMmapAllocator<> a;
635 a.Init();
636 AllocatorStats stats;
637 stats.Init();
638
639 static const uptr kNumAllocs = 1000;
640 char *allocated[kNumAllocs];
641 static const uptr size = 40;
642 // Allocate some.
643 for (uptr i = 0; i < kNumAllocs; i++) {
644 allocated[i] = (char *)a.Allocate(&stats, size, 1);
645 }
646
647 std::set<void *> reported_chunks;
648 IterationTestCallback callback(&reported_chunks);
649 a.ForceLock();
650 a.ForEachChunk(callback);
651 a.ForceUnlock();
652
653 for (uptr i = 0; i < kNumAllocs; i++) {
654 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
655 ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
656 }
657 }
658
659 #endif // #if TSAN_DEBUG==0
660