1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "memtag.h"
10 #include "tests/scudo_unit_test.h"
11
12 #include "allocator_config.h"
13 #include "chunk.h"
14 #include "combined.h"
15 #include "mem_map.h"
16
17 #include <condition_variable>
18 #include <memory>
19 #include <mutex>
20 #include <set>
21 #include <stdlib.h>
22 #include <thread>
23 #include <vector>
24
25 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
26 static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
27
28 // Fuchsia complains that the function is not used.
disableDebuggerdMaybe()29 UNUSED static void disableDebuggerdMaybe() {
30 #if SCUDO_ANDROID
31 // Disable the debuggerd signal handler on Android, without this we can end
32 // up spending a significant amount of time creating tombstones.
33 signal(SIGSEGV, SIG_DFL);
34 #endif
35 }
36
37 template <class AllocatorT>
isPrimaryAllocation(scudo::uptr Size,scudo::uptr Alignment)38 bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
39 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
40 if (Alignment < MinAlignment)
41 Alignment = MinAlignment;
42 const scudo::uptr NeededSize =
43 scudo::roundUp(Size, MinAlignment) +
44 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
45 return AllocatorT::PrimaryT::canAllocate(NeededSize);
46 }
47
48 template <class AllocatorT>
checkMemoryTaggingMaybe(AllocatorT * Allocator,void * P,scudo::uptr Size,scudo::uptr Alignment)49 void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
50 scudo::uptr Alignment) {
51 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
52 Size = scudo::roundUp(Size, MinAlignment);
53 if (Allocator->useMemoryTaggingTestOnly())
54 EXPECT_DEATH(
55 {
56 disableDebuggerdMaybe();
57 reinterpret_cast<char *>(P)[-1] = 0xaa;
58 },
59 "");
60 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
61 ? Allocator->useMemoryTaggingTestOnly()
62 : Alignment == MinAlignment) {
63 EXPECT_DEATH(
64 {
65 disableDebuggerdMaybe();
66 reinterpret_cast<char *>(P)[Size] = 0xaa;
67 },
68 "");
69 }
70 }
71
72 template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
TestAllocatorTestAllocator73 TestAllocator() {
74 this->initThreadMaybe();
75 if (scudo::archSupportsMemoryTagging() &&
76 !scudo::systemDetectsMemoryTagFaultsTestOnly())
77 this->disableMemoryTagging();
78 }
~TestAllocatorTestAllocator79 ~TestAllocator() { this->unmapTestOnly(); }
80
operator newTestAllocator81 void *operator new(size_t size) {
82 void *p = nullptr;
83 EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
84 return p;
85 }
86
operator deleteTestAllocator87 void operator delete(void *ptr) { free(ptr); }
88 };
89
90 template <class TypeParam> struct ScudoCombinedTest : public Test {
ScudoCombinedTestScudoCombinedTest91 ScudoCombinedTest() {
92 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
93 Allocator = std::make_unique<AllocatorT>();
94 }
~ScudoCombinedTestScudoCombinedTest95 ~ScudoCombinedTest() {
96 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
97 UseQuarantine = true;
98 }
99
100 void RunTest();
101
102 void BasicTest(scudo::uptr SizeLog);
103
104 using AllocatorT = TestAllocator<TypeParam>;
105 std::unique_ptr<AllocatorT> Allocator;
106 };
107
108 template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
109
110 #if SCUDO_FUCHSIA
111 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
112 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
113 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
114 #else
115 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
116 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
117 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
118 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
119 #endif
120
121 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
122 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
123 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
124
125 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
126 template <class TypeParam> \
127 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
128 void Run(); \
129 }; \
130 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
131 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
132
SCUDO_TYPED_TEST(ScudoCombinedTest,IsOwned)133 SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
134 auto *Allocator = this->Allocator.get();
135 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
136 EXPECT_FALSE(
137 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
138
139 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
140 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
141 StackBuffer[I] = 0x42U;
142 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
143 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
144 EXPECT_EQ(StackBuffer[I], 0x42U);
145 }
146
147 template <class Config>
BasicTest(scudo::uptr SizeLog)148 void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
149 auto *Allocator = this->Allocator.get();
150
151 // This allocates and deallocates a bunch of chunks, with a wide range of
152 // sizes and alignments, with a focus on sizes that could trigger weird
153 // behaviors (plus or minus a small delta of a power of two for example).
154 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
155 const scudo::uptr Align = 1U << AlignLog;
156 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
157 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
158 continue;
159 const scudo::uptr Size = (1U << SizeLog) + Delta;
160 void *P = Allocator->allocate(Size, Origin, Align);
161 EXPECT_NE(P, nullptr);
162 EXPECT_TRUE(Allocator->isOwned(P));
163 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
164 EXPECT_LE(Size, Allocator->getUsableSize(P));
165 memset(P, 0xaa, Size);
166 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
167 Allocator->deallocate(P, Origin, Size);
168 }
169 }
170
171 Allocator->printStats();
172 }
173
174 #define SCUDO_MAKE_BASIC_TEST(SizeLog) \
175 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
176 this->BasicTest(SizeLog); \
177 }
178
179 SCUDO_MAKE_BASIC_TEST(0)
180 SCUDO_MAKE_BASIC_TEST(1)
181 SCUDO_MAKE_BASIC_TEST(2)
182 SCUDO_MAKE_BASIC_TEST(3)
183 SCUDO_MAKE_BASIC_TEST(4)
184 SCUDO_MAKE_BASIC_TEST(5)
185 SCUDO_MAKE_BASIC_TEST(6)
186 SCUDO_MAKE_BASIC_TEST(7)
187 SCUDO_MAKE_BASIC_TEST(8)
188 SCUDO_MAKE_BASIC_TEST(9)
189 SCUDO_MAKE_BASIC_TEST(10)
190 SCUDO_MAKE_BASIC_TEST(11)
191 SCUDO_MAKE_BASIC_TEST(12)
192 SCUDO_MAKE_BASIC_TEST(13)
193 SCUDO_MAKE_BASIC_TEST(14)
194 SCUDO_MAKE_BASIC_TEST(15)
195 SCUDO_MAKE_BASIC_TEST(16)
196 SCUDO_MAKE_BASIC_TEST(17)
197 SCUDO_MAKE_BASIC_TEST(18)
198 SCUDO_MAKE_BASIC_TEST(19)
199 SCUDO_MAKE_BASIC_TEST(20)
200
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroContents)201 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
202 auto *Allocator = this->Allocator.get();
203
204 // Ensure that specifying ZeroContents returns a zero'd out block.
205 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
206 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
207 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
208 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
209 EXPECT_NE(P, nullptr);
210 for (scudo::uptr I = 0; I < Size; I++)
211 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
212 memset(P, 0xaa, Size);
213 Allocator->deallocate(P, Origin, Size);
214 }
215 }
216 }
217
SCUDO_TYPED_TEST(ScudoCombinedTest,ZeroFill)218 SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
219 auto *Allocator = this->Allocator.get();
220
221 // Ensure that specifying ZeroFill returns a zero'd out block.
222 Allocator->setFillContents(scudo::ZeroFill);
223 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
224 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
225 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
226 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
227 EXPECT_NE(P, nullptr);
228 for (scudo::uptr I = 0; I < Size; I++)
229 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
230 memset(P, 0xaa, Size);
231 Allocator->deallocate(P, Origin, Size);
232 }
233 }
234 }
235
SCUDO_TYPED_TEST(ScudoCombinedTest,PatternOrZeroFill)236 SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
237 auto *Allocator = this->Allocator.get();
238
239 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
240 // block. The primary allocator only produces pattern filled blocks if MTE
241 // is disabled, so we only require pattern filled blocks in that case.
242 Allocator->setFillContents(scudo::PatternOrZeroFill);
243 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
244 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
245 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
246 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
247 EXPECT_NE(P, nullptr);
248 for (scudo::uptr I = 0; I < Size; I++) {
249 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
250 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
251 1U << MinAlignLog) &&
252 !Allocator->useMemoryTaggingTestOnly())
253 ASSERT_EQ(V, scudo::PatternFillByte);
254 else
255 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
256 }
257 memset(P, 0xaa, Size);
258 Allocator->deallocate(P, Origin, Size);
259 }
260 }
261 }
262
SCUDO_TYPED_TEST(ScudoCombinedTest,BlockReuse)263 SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
264 auto *Allocator = this->Allocator.get();
265
266 // Verify that a chunk will end up being reused, at some point.
267 const scudo::uptr NeedleSize = 1024U;
268 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
269 Allocator->deallocate(NeedleP, Origin);
270 bool Found = false;
271 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
272 void *P = Allocator->allocate(NeedleSize, Origin);
273 if (Allocator->getHeaderTaggedPointer(P) ==
274 Allocator->getHeaderTaggedPointer(NeedleP))
275 Found = true;
276 Allocator->deallocate(P, Origin);
277 }
278 EXPECT_TRUE(Found);
279 }
280
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeIncreasing)281 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
282 auto *Allocator = this->Allocator.get();
283
284 // Reallocate a chunk all the way up to a secondary allocation, verifying that
285 // we preserve the data in the process.
286 scudo::uptr Size = 16;
287 void *P = Allocator->allocate(Size, Origin);
288 const char Marker = 0xab;
289 memset(P, Marker, Size);
290 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
291 void *NewP = Allocator->reallocate(P, Size * 2);
292 EXPECT_NE(NewP, nullptr);
293 for (scudo::uptr J = 0; J < Size; J++)
294 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
295 memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
296 Size *= 2U;
297 P = NewP;
298 }
299 Allocator->deallocate(P, Origin);
300 }
301
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateLargeDecreasing)302 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
303 auto *Allocator = this->Allocator.get();
304
305 // Reallocate a large chunk all the way down to a byte, verifying that we
306 // preserve the data in the process.
307 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
308 const scudo::uptr DataSize = 2048U;
309 void *P = Allocator->allocate(Size, Origin);
310 const char Marker = 0xab;
311 memset(P, Marker, scudo::Min(Size, DataSize));
312 while (Size > 1U) {
313 Size /= 2U;
314 void *NewP = Allocator->reallocate(P, Size);
315 EXPECT_NE(NewP, nullptr);
316 for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
317 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
318 P = NewP;
319 }
320 Allocator->deallocate(P, Origin);
321 }
322
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,ReallocateSame)323 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
324 auto *Allocator = this->Allocator.get();
325
326 // Check that reallocating a chunk to a slightly smaller or larger size
327 // returns the same chunk. This requires that all the sizes we iterate on use
328 // the same block size, but that should be the case for MaxSize - 64 with our
329 // default class size maps.
330 constexpr scudo::uptr ReallocSize =
331 TypeParam::Primary::SizeClassMap::MaxSize - 64;
332 void *P = Allocator->allocate(ReallocSize, Origin);
333 const char Marker = 0xab;
334 memset(P, Marker, ReallocSize);
335 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
336 const scudo::uptr NewSize = ReallocSize + Delta;
337 void *NewP = Allocator->reallocate(P, NewSize);
338 EXPECT_EQ(NewP, P);
339 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
340 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
341 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
342 }
343 Allocator->deallocate(P, Origin);
344 }
345
SCUDO_TYPED_TEST(ScudoCombinedTest,IterateOverChunks)346 SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
347 auto *Allocator = this->Allocator.get();
348 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
349 // they are the ones we allocated. This requires the allocator to not have any
350 // other allocated chunk at this point (eg: won't work with the Quarantine).
351 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
352 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
353 // them will fail.
354 if (!UseQuarantine) {
355 std::vector<void *> V;
356 for (scudo::uptr I = 0; I < 64U; I++)
357 V.push_back(Allocator->allocate(
358 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
359 Allocator->disable();
360 Allocator->iterateOverChunks(
361 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
362 [](uintptr_t Base, size_t Size, void *Arg) {
363 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
364 void *P = reinterpret_cast<void *>(Base);
365 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
366 },
367 reinterpret_cast<void *>(&V));
368 Allocator->enable();
369 for (auto P : V)
370 Allocator->deallocate(P, Origin);
371 }
372 }
373
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,UseAfterFree)374 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
375 auto *Allocator = this->Allocator.get();
376
377 // Check that use-after-free is detected.
378 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
379 const scudo::uptr Size = 1U << SizeLog;
380 if (!Allocator->useMemoryTaggingTestOnly())
381 continue;
382 EXPECT_DEATH(
383 {
384 disableDebuggerdMaybe();
385 void *P = Allocator->allocate(Size, Origin);
386 Allocator->deallocate(P, Origin);
387 reinterpret_cast<char *>(P)[0] = 0xaa;
388 },
389 "");
390 EXPECT_DEATH(
391 {
392 disableDebuggerdMaybe();
393 void *P = Allocator->allocate(Size, Origin);
394 Allocator->deallocate(P, Origin);
395 reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
396 },
397 "");
398 }
399 }
400
SCUDO_TYPED_TEST(ScudoCombinedDeathTest,DisableMemoryTagging)401 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
402 auto *Allocator = this->Allocator.get();
403
404 if (Allocator->useMemoryTaggingTestOnly()) {
405 // Check that disabling memory tagging works correctly.
406 void *P = Allocator->allocate(2048, Origin);
407 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
408 scudo::ScopedDisableMemoryTagChecks NoTagChecks;
409 Allocator->disableMemoryTagging();
410 reinterpret_cast<char *>(P)[2048] = 0xaa;
411 Allocator->deallocate(P, Origin);
412
413 P = Allocator->allocate(2048, Origin);
414 EXPECT_EQ(scudo::untagPointer(P), P);
415 reinterpret_cast<char *>(P)[2048] = 0xaa;
416 Allocator->deallocate(P, Origin);
417
418 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
419 }
420 }
421
SCUDO_TYPED_TEST(ScudoCombinedTest,Stats)422 SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
423 auto *Allocator = this->Allocator.get();
424
425 scudo::uptr BufferSize = 8192;
426 std::vector<char> Buffer(BufferSize);
427 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
428 while (ActualSize > BufferSize) {
429 BufferSize = ActualSize + 1024;
430 Buffer.resize(BufferSize);
431 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
432 }
433 std::string Stats(Buffer.begin(), Buffer.end());
434 // Basic checks on the contents of the statistics output, which also allows us
435 // to verify that we got it all.
436 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
437 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
438 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
439 }
440
SCUDO_TYPED_TEST(ScudoCombinedTest,CacheDrain)441 SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
442 auto *Allocator = this->Allocator.get();
443
444 std::vector<void *> V;
445 for (scudo::uptr I = 0; I < 64U; I++)
446 V.push_back(Allocator->allocate(
447 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
448 for (auto P : V)
449 Allocator->deallocate(P, Origin);
450
451 bool UnlockRequired;
452 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
453 EXPECT_TRUE(!TSD->getCache().isEmpty());
454 TSD->getCache().drain();
455 EXPECT_TRUE(TSD->getCache().isEmpty());
456 if (UnlockRequired)
457 TSD->unlock();
458 }
459
SCUDO_TYPED_TEST(ScudoCombinedTest,ForceCacheDrain)460 SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
461 auto *Allocator = this->Allocator.get();
462
463 std::vector<void *> V;
464 for (scudo::uptr I = 0; I < 64U; I++)
465 V.push_back(Allocator->allocate(
466 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
467 for (auto P : V)
468 Allocator->deallocate(P, Origin);
469
470 // `ForceAll` will also drain the caches.
471 Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
472
473 bool UnlockRequired;
474 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
475 EXPECT_TRUE(TSD->getCache().isEmpty());
476 EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
477 EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
478 if (UnlockRequired)
479 TSD->unlock();
480 }
481
SCUDO_TYPED_TEST(ScudoCombinedTest,ThreadedCombined)482 SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
483 std::mutex Mutex;
484 std::condition_variable Cv;
485 bool Ready = false;
486 auto *Allocator = this->Allocator.get();
487 std::thread Threads[32];
488 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
489 Threads[I] = std::thread([&]() {
490 {
491 std::unique_lock<std::mutex> Lock(Mutex);
492 while (!Ready)
493 Cv.wait(Lock);
494 }
495 std::vector<std::pair<void *, scudo::uptr>> V;
496 for (scudo::uptr I = 0; I < 256U; I++) {
497 const scudo::uptr Size = std::rand() % 4096U;
498 void *P = Allocator->allocate(Size, Origin);
499 // A region could have ran out of memory, resulting in a null P.
500 if (P)
501 V.push_back(std::make_pair(P, Size));
502 }
503 while (!V.empty()) {
504 auto Pair = V.back();
505 Allocator->deallocate(Pair.first, Origin, Pair.second);
506 V.pop_back();
507 }
508 });
509 {
510 std::unique_lock<std::mutex> Lock(Mutex);
511 Ready = true;
512 Cv.notify_all();
513 }
514 for (auto &T : Threads)
515 T.join();
516 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
517 }
518
519 // Test that multiple instantiations of the allocator have not messed up the
520 // process's signal handlers (GWP-ASan used to do this).
TEST(ScudoCombinedDeathTest,SKIP_ON_FUCHSIA (testSEGV))521 TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
522 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
523 scudo::ReservedMemoryT ReservedMemory;
524 ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, "testSEGV"));
525 void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
526 ASSERT_NE(P, nullptr);
527 EXPECT_DEATH(memset(P, 0xaa, Size), "");
528 ReservedMemory.release();
529 }
530
531 struct DeathSizeClassConfig {
532 static const scudo::uptr NumBits = 1;
533 static const scudo::uptr MinSizeLog = 10;
534 static const scudo::uptr MidSizeLog = 10;
535 static const scudo::uptr MaxSizeLog = 13;
536 static const scudo::u16 MaxNumCachedHint = 8;
537 static const scudo::uptr MaxBytesCachedLog = 12;
538 static const scudo::uptr SizeDelta = 0;
539 };
540
541 static const scudo::uptr DeathRegionSizeLog = 21U;
542 struct DeathConfig {
543 static const bool MaySupportMemoryTagging = false;
544
545 // Tiny allocator, its Primary only serves chunks of four sizes.
546 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
547 typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
548 static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
549 static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
550 static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
551 typedef scudo::uptr PrimaryCompactPtrT;
552 static const scudo::uptr PrimaryCompactPtrScale = 0;
553 static const bool PrimaryEnableRandomOffset = true;
554 static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
555 static const scudo::uptr PrimaryGroupSizeLog = 18;
556
557 typedef scudo::MapAllocatorNoCache SecondaryCache;
558 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
559 };
560
TEST(ScudoCombinedDeathTest,DeathCombined)561 TEST(ScudoCombinedDeathTest, DeathCombined) {
562 using AllocatorT = TestAllocator<DeathConfig>;
563 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
564
565 const scudo::uptr Size = 1000U;
566 void *P = Allocator->allocate(Size, Origin);
567 EXPECT_NE(P, nullptr);
568
569 // Invalid sized deallocation.
570 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
571
572 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
573 UNUSED void *MisalignedP =
574 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
575 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
576 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
577
578 // Header corruption.
579 scudo::u64 *H =
580 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
581 *H ^= 0x42U;
582 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
583 *H ^= 0x420042U;
584 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
585 *H ^= 0x420000U;
586
587 // Invalid chunk state.
588 Allocator->deallocate(P, Origin, Size);
589 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
590 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
591 EXPECT_DEATH(Allocator->getUsableSize(P), "");
592 }
593
594 // Verify that when a region gets full, the allocator will still manage to
595 // fulfill the allocation through a larger size class.
TEST(ScudoCombinedTest,FullRegion)596 TEST(ScudoCombinedTest, FullRegion) {
597 using AllocatorT = TestAllocator<DeathConfig>;
598 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
599
600 std::vector<void *> V;
601 scudo::uptr FailedAllocationsCount = 0;
602 for (scudo::uptr ClassId = 1U;
603 ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
604 const scudo::uptr Size =
605 DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
606 // Allocate enough to fill all of the regions above this one.
607 const scudo::uptr MaxNumberOfChunks =
608 ((1U << DeathRegionSizeLog) / Size) *
609 (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
610 void *P;
611 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
612 P = Allocator->allocate(Size - 64U, Origin);
613 if (!P)
614 FailedAllocationsCount++;
615 else
616 V.push_back(P);
617 }
618 while (!V.empty()) {
619 Allocator->deallocate(V.back(), Origin);
620 V.pop_back();
621 }
622 }
623 EXPECT_EQ(FailedAllocationsCount, 0U);
624 }
625
626 // Ensure that releaseToOS can be called prior to any other allocator
627 // operation without issue.
SCUDO_TYPED_TEST(ScudoCombinedTest,ReleaseToOS)628 SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
629 auto *Allocator = this->Allocator.get();
630 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
631 }
632
SCUDO_TYPED_TEST(ScudoCombinedTest,OddEven)633 SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
634 auto *Allocator = this->Allocator.get();
635
636 if (!Allocator->useMemoryTaggingTestOnly())
637 return;
638
639 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
640 scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
641 scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
642 EXPECT_NE(Tag1 % 2, Tag2 % 2);
643 };
644
645 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
646 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
647 ClassId++) {
648 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
649
650 std::set<scudo::uptr> Ptrs;
651 bool Found = false;
652 for (unsigned I = 0; I != 65536; ++I) {
653 scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
654 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
655 if (Ptrs.count(P - Size)) {
656 Found = true;
657 CheckOddEven(P, P - Size);
658 break;
659 }
660 if (Ptrs.count(P + Size)) {
661 Found = true;
662 CheckOddEven(P, P + Size);
663 break;
664 }
665 Ptrs.insert(P);
666 }
667 EXPECT_TRUE(Found);
668 }
669 }
670
SCUDO_TYPED_TEST(ScudoCombinedTest,DisableMemInit)671 SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
672 auto *Allocator = this->Allocator.get();
673
674 std::vector<void *> Ptrs(65536);
675
676 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
677
678 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
679
680 // Test that if mem-init is disabled on a thread, calloc should still work as
681 // expected. This is tricky to ensure when MTE is enabled, so this test tries
682 // to exercise the relevant code on our MTE path.
683 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
684 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
685 const scudo::uptr Size =
686 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
687 if (Size < 8)
688 continue;
689 for (unsigned I = 0; I != Ptrs.size(); ++I) {
690 Ptrs[I] = Allocator->allocate(Size, Origin);
691 memset(Ptrs[I], 0xaa, Size);
692 }
693 for (unsigned I = 0; I != Ptrs.size(); ++I)
694 Allocator->deallocate(Ptrs[I], Origin, Size);
695 for (unsigned I = 0; I != Ptrs.size(); ++I) {
696 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
697 memset(Ptrs[I], 0xbb, Size - 8);
698 }
699 for (unsigned I = 0; I != Ptrs.size(); ++I)
700 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
701 for (unsigned I = 0; I != Ptrs.size(); ++I) {
702 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
703 for (scudo::uptr J = 0; J < Size; ++J)
704 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
705 }
706 }
707
708 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
709 }
710
SCUDO_TYPED_TEST(ScudoCombinedTest,ReallocateInPlaceStress)711 SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
712 auto *Allocator = this->Allocator.get();
713
714 // Regression test: make realloc-in-place happen at the very right end of a
715 // mapped region.
716 constexpr int nPtrs = 10000;
717 for (int i = 1; i < 32; ++i) {
718 scudo::uptr Size = 16 * i - 1;
719 std::vector<void *> Ptrs;
720 for (int i = 0; i < nPtrs; ++i) {
721 void *P = Allocator->allocate(Size, Origin);
722 P = Allocator->reallocate(P, Size + 1);
723 Ptrs.push_back(P);
724 }
725
726 for (int i = 0; i < nPtrs; ++i)
727 Allocator->deallocate(Ptrs[i], Origin);
728 }
729 }
730
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferSize)731 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
732 auto *Allocator = this->Allocator.get();
733 auto Size = Allocator->getRingBufferSize();
734 if (Size > 0)
735 EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
736 }
737
SCUDO_TYPED_TEST(ScudoCombinedTest,RingBufferAddress)738 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
739 auto *Allocator = this->Allocator.get();
740 auto *Addr = Allocator->getRingBufferAddress();
741 EXPECT_NE(Addr, nullptr);
742 EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
743 }
744
745 #if SCUDO_CAN_USE_PRIMARY64
746 #if SCUDO_TRUSTY
747
748 // TrustyConfig is designed for a domain-specific allocator. Add a basic test
749 // which covers only simple operations and ensure the configuration is able to
750 // compile.
TEST(ScudoCombinedTest,BasicTrustyConfig)751 TEST(ScudoCombinedTest, BasicTrustyConfig) {
752 using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
753 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
754
755 for (scudo::uptr ClassId = 1U;
756 ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
757 ClassId++) {
758 const scudo::uptr Size =
759 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
760 void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
761 ASSERT_NE(p, nullptr);
762 free(p);
763 }
764
765 bool UnlockRequired;
766 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
767 TSD->getCache().drain();
768
769 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
770 }
771
772 #endif
773 #endif
774
775 #if SCUDO_LINUX
776
SCUDO_TYPED_TEST(ScudoCombinedTest,SoftRssLimit)777 SCUDO_TYPED_TEST(ScudoCombinedTest, SoftRssLimit) {
778 auto *Allocator = this->Allocator.get();
779 Allocator->setRssLimitsTestOnly(1, 0, true);
780
781 size_t Megabyte = 1024 * 1024;
782 size_t ChunkSize = 16;
783 size_t Error = 256;
784
785 std::vector<void *> Ptrs;
786 for (size_t index = 0; index < Megabyte + Error; index += ChunkSize) {
787 void *Ptr = Allocator->allocate(ChunkSize, Origin);
788 Ptrs.push_back(Ptr);
789 }
790
791 EXPECT_EQ(nullptr, Allocator->allocate(ChunkSize, Origin));
792
793 for (void *Ptr : Ptrs)
794 Allocator->deallocate(Ptr, Origin);
795 }
796
SCUDO_TYPED_TEST(ScudoCombinedTest,HardRssLimit)797 SCUDO_TYPED_TEST(ScudoCombinedTest, HardRssLimit) {
798 auto *Allocator = this->Allocator.get();
799 Allocator->setRssLimitsTestOnly(0, 1, false);
800
801 size_t Megabyte = 1024 * 1024;
802
803 EXPECT_DEATH(
804 {
805 disableDebuggerdMaybe();
806 Allocator->allocate(Megabyte, Origin);
807 },
808 "");
809 }
810
811 #endif
812