1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "tests/scudo_unit_test.h"
10
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
13
14 #include <stdlib.h>
15
16 #include <condition_variable>
17 #include <mutex>
18 #include <set>
19 #include <thread>
20
21 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
22 // cache contains a single volatile uptr, to be able to test that several
23 // concurrent threads will not access or modify the same cache at the same time.
24 template <class Config> class MockAllocator {
25 public:
26 using ThisT = MockAllocator<Config>;
27 using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
28 using CacheT = struct MockCache { volatile scudo::uptr Canary; };
29 using QuarantineCacheT = struct MockQuarantine {};
30
init()31 void init() {
32 // This should only be called once by the registry.
33 EXPECT_FALSE(Initialized);
34 Initialized = true;
35 }
36
unmapTestOnly()37 void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
initCache(CacheT * Cache)38 void initCache(CacheT *Cache) { *Cache = {}; }
commitBack(scudo::TSD<MockAllocator> * TSD)39 void commitBack(scudo::TSD<MockAllocator> *TSD) {}
getTSDRegistry()40 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
callPostInitCallback()41 void callPostInitCallback() {}
42
isInitialized()43 bool isInitialized() { return Initialized; }
44
operator new(size_t Size)45 void *operator new(size_t Size) {
46 void *P = nullptr;
47 EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
48 return P;
49 }
operator delete(void * P)50 void operator delete(void *P) { free(P); }
51
52 private:
53 bool Initialized = false;
54 TSDRegistryT TSDRegistry;
55 };
56
57 struct OneCache {
58 template <class Allocator>
59 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
60 };
61
62 struct SharedCaches {
63 template <class Allocator>
64 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
65 };
66
67 struct ExclusiveCaches {
68 template <class Allocator>
69 using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
70 };
71
TEST(ScudoTSDTest,TSDRegistryInit)72 TEST(ScudoTSDTest, TSDRegistryInit) {
73 using AllocatorT = MockAllocator<OneCache>;
74 auto Deleter = [](AllocatorT *A) {
75 A->unmapTestOnly();
76 delete A;
77 };
78 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
79 Deleter);
80 EXPECT_FALSE(Allocator->isInitialized());
81
82 auto Registry = Allocator->getTSDRegistry();
83 Registry->init(Allocator.get());
84 EXPECT_TRUE(Allocator->isInitialized());
85 }
86
testRegistry()87 template <class AllocatorT> static void testRegistry() {
88 auto Deleter = [](AllocatorT *A) {
89 A->unmapTestOnly();
90 delete A;
91 };
92 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
93 Deleter);
94 EXPECT_FALSE(Allocator->isInitialized());
95
96 auto Registry = Allocator->getTSDRegistry();
97 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
98 EXPECT_TRUE(Allocator->isInitialized());
99
100 bool UnlockRequired;
101 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
102 EXPECT_NE(TSD, nullptr);
103 EXPECT_EQ(TSD->Cache.Canary, 0U);
104 if (UnlockRequired)
105 TSD->unlock();
106
107 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
108 TSD = Registry->getTSDAndLock(&UnlockRequired);
109 EXPECT_NE(TSD, nullptr);
110 EXPECT_EQ(TSD->Cache.Canary, 0U);
111 memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
112 if (UnlockRequired)
113 TSD->unlock();
114 }
115
TEST(ScudoTSDTest,TSDRegistryBasic)116 TEST(ScudoTSDTest, TSDRegistryBasic) {
117 testRegistry<MockAllocator<OneCache>>();
118 testRegistry<MockAllocator<SharedCaches>>();
119 #if !SCUDO_FUCHSIA
120 testRegistry<MockAllocator<ExclusiveCaches>>();
121 #endif
122 }
123
124 static std::mutex Mutex;
125 static std::condition_variable Cv;
126 static bool Ready;
127
stressCache(AllocatorT * Allocator)128 template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
129 auto Registry = Allocator->getTSDRegistry();
130 {
131 std::unique_lock<std::mutex> Lock(Mutex);
132 while (!Ready)
133 Cv.wait(Lock);
134 }
135 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
136 bool UnlockRequired;
137 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
138 EXPECT_NE(TSD, nullptr);
139 // For an exclusive TSD, the cache should be empty. We cannot guarantee the
140 // same for a shared TSD.
141 if (!UnlockRequired)
142 EXPECT_EQ(TSD->Cache.Canary, 0U);
143 // Transform the thread id to a uptr to use it as canary.
144 const scudo::uptr Canary = static_cast<scudo::uptr>(
145 std::hash<std::thread::id>{}(std::this_thread::get_id()));
146 TSD->Cache.Canary = Canary;
147 // Loop a few times to make sure that a concurrent thread isn't modifying it.
148 for (scudo::uptr I = 0; I < 4096U; I++)
149 EXPECT_EQ(TSD->Cache.Canary, Canary);
150 if (UnlockRequired)
151 TSD->unlock();
152 }
153
testRegistryThreaded()154 template <class AllocatorT> static void testRegistryThreaded() {
155 Ready = false;
156 auto Deleter = [](AllocatorT *A) {
157 A->unmapTestOnly();
158 delete A;
159 };
160 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
161 Deleter);
162 std::thread Threads[32];
163 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
164 Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
165 {
166 std::unique_lock<std::mutex> Lock(Mutex);
167 Ready = true;
168 Cv.notify_all();
169 }
170 for (auto &T : Threads)
171 T.join();
172 }
173
TEST(ScudoTSDTest,TSDRegistryThreaded)174 TEST(ScudoTSDTest, TSDRegistryThreaded) {
175 testRegistryThreaded<MockAllocator<OneCache>>();
176 testRegistryThreaded<MockAllocator<SharedCaches>>();
177 #if !SCUDO_FUCHSIA
178 testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
179 #endif
180 }
181
182 static std::set<void *> Pointers;
183
stressSharedRegistry(MockAllocator<SharedCaches> * Allocator)184 static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
185 std::set<void *> Set;
186 auto Registry = Allocator->getTSDRegistry();
187 {
188 std::unique_lock<std::mutex> Lock(Mutex);
189 while (!Ready)
190 Cv.wait(Lock);
191 }
192 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
193 bool UnlockRequired;
194 for (scudo::uptr I = 0; I < 4096U; I++) {
195 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
196 EXPECT_NE(TSD, nullptr);
197 Set.insert(reinterpret_cast<void *>(TSD));
198 if (UnlockRequired)
199 TSD->unlock();
200 }
201 {
202 std::unique_lock<std::mutex> Lock(Mutex);
203 Pointers.insert(Set.begin(), Set.end());
204 }
205 }
206
TEST(ScudoTSDTest,TSDRegistryTSDsCount)207 TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
208 Ready = false;
209 Pointers.clear();
210 using AllocatorT = MockAllocator<SharedCaches>;
211 auto Deleter = [](AllocatorT *A) {
212 A->unmapTestOnly();
213 delete A;
214 };
215 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
216 Deleter);
217 // We attempt to use as many TSDs as the shared cache offers by creating a
218 // decent amount of threads that will be run concurrently and attempt to get
219 // and lock TSDs. We put them all in a set and count the number of entries
220 // after we are done.
221 std::thread Threads[32];
222 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
223 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
224 {
225 std::unique_lock<std::mutex> Lock(Mutex);
226 Ready = true;
227 Cv.notify_all();
228 }
229 for (auto &T : Threads)
230 T.join();
231 // The initial number of TSDs we get will be the minimum of the default count
232 // and the number of CPUs.
233 EXPECT_LE(Pointers.size(), 8U);
234 Pointers.clear();
235 auto Registry = Allocator->getTSDRegistry();
236 // Increase the number of TSDs to 16.
237 Registry->setOption(scudo::Option::MaxTSDsCount, 16);
238 Ready = false;
239 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
240 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
241 {
242 std::unique_lock<std::mutex> Lock(Mutex);
243 Ready = true;
244 Cv.notify_all();
245 }
246 for (auto &T : Threads)
247 T.join();
248 // We should get 16 distinct TSDs back.
249 EXPECT_EQ(Pointers.size(), 16U);
250 }
251