1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
16 #define PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
17
18 #include <gtest/gtest.h>
19
20 #include <algorithm>
21 #include <array>
22 #include <cstdlib>
23 #include <cstring>
24 #include <ctime>
25 #include <tuple>
26 #include <unordered_set>
27
28 #include "libpandabase/mem/mem.h"
29 #include "libpandabase/os/thread.h"
30 #include "libpandabase/utils/utils.h"
31 #include "runtime/mem/bump-allocator.h"
32 #include "runtime/mem/mem_stats_additional_info.h"
33 #include "runtime/mem/mem_stats_default.h"
34 #include "runtime/include/object_header.h"
35
36 namespace ark::mem {
37
38 template <class Allocator>
39 class AllocatorTest : public testing::Test {
40 public:
AllocatorTest()41 explicit AllocatorTest()
42 {
43 #ifdef PANDA_NIGHTLY_TEST_ON
44 seed_ = std::time(NULL);
45 #else
46 static constexpr unsigned int FIXED_SEED = 0xDEADBEEF;
47 seed_ = FIXED_SEED;
48 #endif
49 srand(seed_);
50 InitByteArray();
51 }
52
53 protected:
54 static constexpr size_t BYTE_ARRAY_SIZE = 1000;
55
56 unsigned int seed_; // NOLINT(misc-non-private-member-variables-in-classes)
57 std::array<uint8_t, BYTE_ARRAY_SIZE> byteArray_ {}; // NOLINT(misc-non-private-member-variables-in-classes)
58
59 /// Byte array initialization of random bytes
InitByteArray()60 void InitByteArray()
61 {
62 for (size_t i = 0; i < BYTE_ARRAY_SIZE; ++i) {
63 byteArray_[i] = RandFromRange(0, std::numeric_limits<uint8_t>::max());
64 }
65 }
66
67 /**
68 * @brief Add pool to allocator (maybe empty for some allocators)
69 * @param allocator - allocator for pool memory adding
70 */
71 virtual void AddMemoryPoolToAllocator([[maybe_unused]] Allocator &allocator) = 0;
72
73 /**
74 * @brief Add pool to allocator and protect (maybe empty for some allocators)
75 * @param allocator - allocator for pool memory addition and protection
76 */
77 virtual void AddMemoryPoolToAllocatorProtected([[maybe_unused]] Allocator &allocator) = 0;
78
79 /**
80 * @brief Check to allocated by this allocator
81 * @param allocator - allocator
82 * @param mem - allocated memory
83 */
84 virtual bool AllocatedByThisAllocator([[maybe_unused]] Allocator &allocator, [[maybe_unused]] void *mem) = 0;
85
86 /**
87 * @brief Generate random value from [min_value, max_value]
88 * @param min_value - minimum size_t value in range
89 * @param max_value - maximum size_t value in range
90 * @return random size_t value [min_value, max_value]
91 */
RandFromRange(size_t minValue,size_t maxValue)92 size_t RandFromRange(size_t minValue, size_t maxValue)
93 {
94 // rand() is not thread-safe method.
95 // So do it under the lock
96 static os::memory::Mutex randLock;
97 os::memory::LockHolder lock(randLock);
98 // NOLINTNEXTLINE(cert-msc50-cpp)
99 return minValue + rand() % (maxValue - minValue + 1);
100 }
101
102 /**
103 * @brief Write value in memory for death test
104 * @param mem - memory for writing
105 *
106 * Write value in memory for address sanitizer test
107 */
DeathWriteUint64(void * mem)108 void DeathWriteUint64(void *mem)
109 {
110 static constexpr uint64_t INVALID_ADDR = 0xDEADBEEF;
111 *(static_cast<uint64_t *>(mem)) = INVALID_ADDR;
112 }
113
114 /**
115 * @brief Set random bytes in memory from byte array
116 * @param mem - memory for random bytes from byte array writing
117 * @param size - size memory in bytes
118 * @return start index in byte_array
119 */
SetBytesFromByteArray(void * mem,size_t size)120 size_t SetBytesFromByteArray(void *mem, size_t size)
121 {
122 size_t startIndex = RandFromRange(0, BYTE_ARRAY_SIZE - 1);
123 size_t copied = 0;
124 size_t firstCopySize = std::min(size, BYTE_ARRAY_SIZE - startIndex);
125 // Set head of memory
126 memcpy_s(mem, firstCopySize, &byteArray_[startIndex], firstCopySize);
127 size -= firstCopySize;
128 copied += firstCopySize;
129 // Set middle part of memory
130 while (size > BYTE_ARRAY_SIZE) {
131 memcpy_s(ToVoidPtr(ToUintPtr(mem) + copied), BYTE_ARRAY_SIZE, byteArray_.data(), BYTE_ARRAY_SIZE);
132 size -= BYTE_ARRAY_SIZE;
133 copied += BYTE_ARRAY_SIZE;
134 }
135 // Set tail of memory
136 memcpy_s(ToVoidPtr(ToUintPtr(mem) + copied), size, byteArray_.data(), size);
137
138 return startIndex;
139 }
140
141 /**
142 * @brief Compare bytes in memory with byte array
143 * @param mem - memory for random bytes from byte array writing
144 * @param size - size memory in bytes
145 * @param start_index_in_byte_array - start index in byte array for comaration with memory
146 * @return boolean value: true if bytes are equal and fasle if not equal
147 */
CompareBytesWithByteArray(void * mem,size_t size,size_t startIndexInByteArray)148 bool CompareBytesWithByteArray(void *mem, size_t size, size_t startIndexInByteArray)
149 {
150 size_t compared = 0;
151 size_t firstCompareSize = std::min(size, BYTE_ARRAY_SIZE - startIndexInByteArray);
152 // Compare head of memory
153 if (memcmp(mem, &byteArray_[startIndexInByteArray], firstCompareSize) != 0) {
154 return false;
155 }
156 compared += firstCompareSize;
157 size -= firstCompareSize;
158 // Compare middle part of memory
159 while (size >= BYTE_ARRAY_SIZE) {
160 if (memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byteArray_.data(), BYTE_ARRAY_SIZE) != 0) {
161 return false;
162 }
163 size -= BYTE_ARRAY_SIZE;
164 compared += BYTE_ARRAY_SIZE;
165 }
166 // Compare tail of memory
167 return memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byteArray_.data(), size) == 0;
168 }
169
170 /**
171 * @brief Allocate with one alignment
172 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
173 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
174 * @tparam ALIGNMENT - enum Alignment value for allocations
175 * @tparam AllocatorArgs - arguments types for allocor creation
176 * @param pools_count - count of pools needed by allocation
177 * @param allocator_args - arguments for allocator creation
178 *
179 * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with ALIGNMENT alignment
180 */
181 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment ALIGNMENT, class... AllocatorArgs>
OneAlignedAllocFreeTest(size_t poolsCount,AllocatorArgs &&...allocatorArgs)182 void OneAlignedAllocFreeTest(size_t poolsCount, AllocatorArgs &&...allocatorArgs)
183 {
184 static constexpr size_t ALLOCATIONS_COUNT = MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1;
185
186 auto *memStats = new mem::MemStatsType();
187 Allocator allocator(memStats, std::forward<AllocatorArgs>(allocatorArgs)...);
188 for (size_t i = 0; i < poolsCount; ++i) {
189 AddMemoryPoolToAllocator(allocator);
190 }
191 std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocatedElements;
192
193 // Allocations
194 for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
195 void *mem = allocator.Alloc(size, Alignment(ALIGNMENT));
196 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes with "
197 << static_cast<size_t>(ALIGNMENT) << " log alignment, seed: " << seed_;
198 ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(ALIGNMENT)) - 1), 0UL)
199 << size << " bytes, " << static_cast<size_t>(ALIGNMENT) << " log alignment, seed: " << seed_;
200 allocatedElements[size - MIN_ALLOC_SIZE] = {mem, SetBytesFromByteArray(mem, size)};
201 }
202 // Check and Free
203 for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; size++) {
204 size_t k = size - MIN_ALLOC_SIZE;
205 ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[k].first, size, allocatedElements[k].second))
206 << "address: " << std::hex << allocatedElements[k].first << ", size: " << size
207 << ", alignment: " << static_cast<size_t>(ALIGNMENT) << ", seed: " << seed_;
208 allocator.Free(allocatedElements[k].first);
209 }
210 delete memStats;
211 }
212 /**
213 * @brief Allocate with all alignment
214 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
215 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
216 * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
217 * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
218 * @param pools_count - count of pools needed by allocation
219 *
220 * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with all possible alignment from
221 * [LOG_ALIGN_MIN, LOG_ALIGN_MAX]
222 */
223 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
224 Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX>
225 void AlignedAllocFreeTest(size_t poolsCount = 1)
226 {
227 static_assert(MIN_ALLOC_SIZE <= MAX_ALLOC_SIZE);
228 static_assert(LOG_ALIGN_MIN_VALUE <= LOG_ALIGN_MAX_VALUE);
229 static constexpr size_t ALLOCATIONS_COUNT =
230 (MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1) * (LOG_ALIGN_MAX_VALUE - LOG_ALIGN_MIN_VALUE + 1);
231
232 std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocatedElements;
233 auto *memStats = new mem::MemStatsType();
234 Allocator allocator(memStats);
235 for (size_t i = 0; i < poolsCount; i++) {
236 AddMemoryPoolToAllocator(allocator);
237 }
238
239 // Allocations with alignment
240 size_t k = 0;
241 for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
242 for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
243 void *mem = allocator.Alloc(size, Alignment(align));
244 ASSERT_TRUE(mem != nullptr)
245 << "Didn't allocate " << size << " bytes with " << align << " log alignment, seed: " << seed_;
246 ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(align)) - 1), 0UL)
247 << size << " bytes, " << align << " log alignment, seed: " << seed_;
248 allocatedElements[k] = {mem, SetBytesFromByteArray(mem, size)};
249 }
250 }
251 // Check and free
252 k = 0;
253 for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
254 for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
255 ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[k].first, size, allocatedElements[k].second))
256 << "address: " << std::hex << allocatedElements[k].first << ", size: " << size
257 << ", alignment: " << align << ", seed: " << seed_;
258 allocator.Free(allocatedElements[k].first);
259 }
260 }
261 delete memStats;
262 }
263
264 /**
265 * @brief Simple test for allocate and free
266 * @param alloc_size - size in bytes for each allocation
267 * @param elements_count - count of elements for allocation
268 * @param pools_count - count of pools needed by allocation
269 *
270 * Allocate elements with random values setting, check and free memory
271 */
272 void AllocateAndFree(size_t allocSize, size_t elementsCount, size_t poolsCount = 1)
273 {
274 auto *memStats = new mem::MemStatsType();
275 Allocator allocator(memStats);
276 for (size_t i = 0; i < poolsCount; i++) {
277 AddMemoryPoolToAllocator(allocator);
278 }
279 std::vector<std::pair<void *, size_t>> allocatedElements(elementsCount);
280
281 // Allocations
282 for (size_t i = 0; i < elementsCount; ++i) {
283 void *mem = allocator.Alloc(allocSize);
284 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << allocSize << " bytes in " << i
285 << " iteration, seed: " << seed_;
286 size_t index = SetBytesFromByteArray(mem, allocSize);
287 allocatedElements[i] = {mem, index};
288 }
289 // Free
290 for (auto &element : allocatedElements) {
291 ASSERT_TRUE(CompareBytesWithByteArray(element.first, allocSize, element.second))
292 << "address: " << std::hex << element.first << ", size: " << allocSize << ", seed: " << seed_;
293 allocator.Free(element.first);
294 }
295 delete memStats;
296 }
297
298 /**
299 * @brief Simple test for checking iteration over free pools method.
300 * @tparam pools_count - count of pools needed by allocation, must be bigger than 3
301 * @param alloc_size - size in bytes for each allocation
302 *
303 * Allocate and use memory pools; free all elements from first, last
304 * and one in the middle; call iteration over free pools
305 * and allocate smth again.
306 */
307 template <size_t POOLS_COUNT = 5>
VisitAndRemoveFreePools(size_t allocSize)308 void VisitAndRemoveFreePools(size_t allocSize)
309 {
310 static constexpr size_t POOLS_TO_FREE = 3;
311 static_assert(POOLS_COUNT > POOLS_TO_FREE);
312 std::array<std::vector<void *>, POOLS_COUNT> allocatedElements;
313 auto *memStats = new mem::MemStatsType();
314 Allocator allocator(memStats);
315
316 for (size_t i = 0; i < POOLS_COUNT; i++) {
317 AddMemoryPoolToAllocator(allocator);
318 while (true) {
319 void *mem = allocator.Alloc(allocSize);
320 if (mem == nullptr) {
321 break;
322 }
323 allocatedElements[i].push_back(mem);
324 }
325 }
326 std::array<size_t, POOLS_TO_FREE> freedPoolsIndexes = {0, POOLS_COUNT / 2, POOLS_COUNT - 1};
327 // free all elements in pools
328 for (auto i : freedPoolsIndexes) {
329 FreeAllocatedElements(allocatedElements, allocator, i);
330 }
331 size_t freedPools = 0;
332 allocator.VisitAndRemoveFreePools([&freedPools](void *mem, size_t size) {
333 (void)mem;
334 (void)size;
335 freedPools++;
336 });
337 ASSERT_TRUE(freedPools == POOLS_TO_FREE) << ", seed: " << seed_;
338 ASSERT_TRUE(allocator.Alloc(allocSize) == nullptr) << ", seed: " << seed_;
339 // allocate again
340 for (auto i : freedPoolsIndexes) {
341 AddMemoryPoolToAllocator(allocator);
342 while (true) {
343 void *mem = allocator.Alloc(allocSize);
344 if (mem == nullptr) {
345 break;
346 }
347 allocatedElements[i].push_back(mem);
348 }
349 }
350 // free everything:
351 for (size_t i = 0; i < POOLS_COUNT; i++) {
352 FreeAllocatedElements(allocatedElements, allocator, i);
353 }
354 freedPools = 0;
355 allocator.VisitAndRemoveFreePools([&freedPools](void *mem, size_t size) {
356 (void)mem;
357 (void)size;
358 freedPools++;
359 });
360 delete memStats;
361 ASSERT_TRUE(freedPools == POOLS_COUNT) << ", seed: " << seed_;
362 }
363
364 template <size_t POOLS_COUNT = 5>
FreeAllocatedElements(std::array<std::vector<void * >,POOLS_COUNT> & allocatedElements,Allocator & allocator,size_t i)365 static void FreeAllocatedElements(std::array<std::vector<void *>, POOLS_COUNT> &allocatedElements,
366 Allocator &allocator, size_t i)
367 {
368 for (auto j : allocatedElements[i]) {
369 allocator.Free(j);
370 }
371 allocatedElements[i].clear();
372 }
373
374 /**
375 * @brief Allocate with different sizes and free in random order
376 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
377 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
378 * @tparam AllocatorArgs - arguments types for allocor creation
379 * @param elements_count - count of elements for allocation
380 * @param pools_count - count of pools needed by allocation
381 * @param allocator_args - arguments for allocator creation
382 * Allocate elements with random size and random values setting in random order, check and free memory in random
383 * order too
384 */
385 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, class... AllocatorArgs>
AllocateFreeDifferentSizesTest(size_t elementsCount,size_t poolsCount,AllocatorArgs &&...allocatorArgs)386 void AllocateFreeDifferentSizesTest(size_t elementsCount, size_t poolsCount, AllocatorArgs &&...allocatorArgs)
387 {
388 std::unordered_set<size_t> usedIndexes;
389 // {memory, size, start_index_in_byte_array}
390 std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
391 auto *memStats = new mem::MemStatsType();
392 Allocator allocator(memStats, std::forward<AllocatorArgs>(allocatorArgs)...);
393 for (size_t i = 0; i < poolsCount; i++) {
394 AddMemoryPoolToAllocator(allocator);
395 }
396
397 size_t fullSizeAllocated = 0;
398 for (size_t i = 0; i < elementsCount; ++i) {
399 size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
400 // Allocation
401 void *mem = allocator.Alloc(size);
402 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes, full allocated: " << fullSizeAllocated
403 << ", seed: " << seed_;
404 fullSizeAllocated += size;
405 // Write random bytes
406 allocatedElements[i] = {mem, size, SetBytesFromByteArray(mem, size)};
407 usedIndexes.insert(i);
408 }
409 // Compare and free
410 while (!usedIndexes.empty()) {
411 size_t i = RandFromRange(0, elementsCount - 1);
412 auto it = usedIndexes.find(i);
413 if (it != usedIndexes.end()) {
414 usedIndexes.erase(it);
415 } else {
416 i = *usedIndexes.begin();
417 usedIndexes.erase(usedIndexes.begin());
418 }
419 // Compare
420 ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]),
421 std::get<2U>(allocatedElements[i])))
422 << "Address: " << std::hex << std::get<0>(allocatedElements[i])
423 << ", size: " << std::get<1>(allocatedElements[i])
424 << ", start index in byte array: " << std::get<2U>(allocatedElements[i]) << ", seed: " << seed_;
425 allocator.Free(std::get<0>(allocatedElements[i]));
426 }
427 delete memStats;
428 }
429
430 /**
431 * @brief Try to allocate too big object, must not allocate memory
432 * @tparam MAX_ALLOC_SIZE - maximum possible size for allocation by this allocator
433 */
434 template <size_t MAX_ALLOC_SIZE>
AllocateTooBigObjectTest()435 void AllocateTooBigObjectTest()
436 {
437 auto *memStats = new mem::MemStatsType();
438 Allocator allocator(memStats);
439 AddMemoryPoolToAllocator(allocator);
440
441 size_t sizeObj = MAX_ALLOC_SIZE + 1;
442 void *mem = allocator.Alloc(sizeObj);
443 ASSERT_TRUE(mem == nullptr) << "Allocate too big object with " << sizeObj << " size at address " << std::hex
444 << mem;
445 delete memStats;
446 }
447
448 /**
449 * @brief Try to allocate too many objects, must not allocate all objects
450 * @param alloc_size - size in bytes for one allocation
451 * @param elements_count - count of elements for allocation
452 *
453 * Allocate too many elements, so must not allocate all objects
454 */
AllocateTooMuchTest(size_t allocSize,size_t elementsCount)455 void AllocateTooMuchTest(size_t allocSize, size_t elementsCount)
456 {
457 auto *memStats = new mem::MemStatsType();
458 Allocator allocator(memStats);
459 AddMemoryPoolToAllocatorProtected(allocator);
460
461 bool isNotAll = false;
462 for (size_t i = 0; i < elementsCount; i++) {
463 void *mem = allocator.Alloc(allocSize);
464 if (mem == nullptr) {
465 isNotAll = true;
466 break;
467 }
468 SetBytesFromByteArray(mem, allocSize);
469 }
470 ASSERT_TRUE(isNotAll) << "elements count: " << elementsCount << ", element size: " << allocSize
471 << ", seed: " << seed_;
472 delete memStats;
473 }
474
475 /**
476 * @brief Use allocator in std::vector
477 * @param elements_count - count of elements for allocation
478 *
479 * Check working of adapter of this allocator on example std::vector
480 */
481 // NOLINTNEXTLINE(readability-magic-numbers)
482 void AllocateVectorTest(size_t elementsCount = 32)
483 {
484 using ElementType = size_t;
485 static constexpr size_t MAGIC_CONST = 3;
486 auto *memStats = new mem::MemStatsType();
487 Allocator allocator(memStats);
488 AddMemoryPoolToAllocatorProtected(allocator);
489 using AdapterType = typename decltype(allocator.Adapter())::template Rebind<ElementType>::other;
490 std::vector<ElementType, AdapterType> vec(allocator.Adapter());
491
492 for (size_t i = 0; i < elementsCount; i++) {
493 vec.push_back(i * MAGIC_CONST);
494 }
495 for (size_t i = 0; i < elementsCount; i++) {
496 ASSERT_EQ(vec[i], i * MAGIC_CONST) << "iteration: " << i;
497 }
498
499 vec.clear();
500
501 for (size_t i = 0; i < elementsCount; i++) {
502 vec.push_back(i * (MAGIC_CONST + 1));
503 }
504 for (size_t i = 0; i < elementsCount; i++) {
505 ASSERT_EQ(vec[i], i * (MAGIC_CONST + 1)) << "iteration: " << i;
506 }
507 delete memStats;
508 }
509
510 /**
511 * @brief Allocate and reuse
512 * @tparam element_type - type of elements for allocations
513 * @param alignment_mask - mask for alignment of two addresses
514 * @param elements_count - count of elements for allocation
515 *
516 * Allocate and free memory and later reuse. Checking for two start addresses
517 */
518 template <class ElementType = uint64_t>
519 void AllocateReuseTest(size_t alignmentMask, size_t elementsCount = 100) // NOLINT(readability-magic-numbers)
520 {
521 static constexpr size_t SIZE_1 = sizeof(ElementType);
522 static constexpr size_t SIZE_2 = SIZE_1 * 3;
523
524 auto *memStats = new mem::MemStatsType();
525 Allocator allocator(memStats);
526 AddMemoryPoolToAllocator(allocator);
527 std::vector<std::pair<void *, size_t>> allocatedElements(elementsCount);
528
529 // First allocations
530 for (size_t i = 0; i < elementsCount; ++i) {
531 void *mem = allocator.Alloc(SIZE_1);
532 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_1 << " bytes in " << i << " iteration";
533 size_t index = SetBytesFromByteArray(mem, SIZE_1);
534 allocatedElements[i] = {mem, index};
535 }
536 auto firstAllocatedMem = reinterpret_cast<uintptr_t>(allocatedElements[0].first);
537 // Free
538 for (size_t i = 0; i < elementsCount; i++) {
539 ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[i].first, SIZE_1, allocatedElements[i].second))
540 << "address: " << std::hex << allocatedElements[i].first << ", size: " << SIZE_1 << ", seed: " << seed_;
541 allocator.Free(allocatedElements[i].first);
542 }
543 // Second allocations
544 for (size_t i = 0; i < elementsCount; ++i) {
545 void *mem = allocator.Alloc(SIZE_2);
546 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_2 << " bytes in " << i << " iteration";
547 size_t index = SetBytesFromByteArray(mem, SIZE_2);
548 allocatedElements[i] = {mem, index};
549 }
550 auto secondAllocatedMem = reinterpret_cast<uintptr_t>(allocatedElements[0].first);
551 // Free
552 for (size_t i = 0; i < elementsCount; i++) {
553 ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[i].first, SIZE_2, allocatedElements[i].second))
554 << "address: " << std::hex << allocatedElements[i].first << ", size: " << SIZE_2 << ", seed: " << seed_;
555 allocator.Free(allocatedElements[i].first);
556 }
557 delete memStats;
558 ASSERT_EQ(firstAllocatedMem & ~alignmentMask, secondAllocatedMem & ~alignmentMask)
559 << "first address = " << std::hex << firstAllocatedMem << ", second address = " << std::hex
560 << secondAllocatedMem << std::endl
561 << "alignment mask: " << alignmentMask << ", seed: " << seed_;
562 }
563 /**
564 * @brief Allocate and free objects, collect via allocator method
565 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
566 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
567 * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
568 * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
569 * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
570 * don't use pools
571 * @param free_granularity - granularity for objects free before collection
572 * @param pools_count - count of pools needed by allocation
573 *
574 * Allocate objects, free part of objects and collect via allocator method with free calls during the collection.
575 * Check of collection.
576 */
577 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
578 Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
579 void ObjectCollectionTest(size_t freeGranularity = 4, size_t poolsCount = 2)
580 {
581 size_t elementsCount = 0;
582 std::vector<void *> allocatedElements;
583 auto *memStats = new mem::MemStatsType();
584 Allocator allocator(memStats);
585 std::unordered_set<size_t> usedIndexes;
586 ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
587 ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator,
588 elementsCount, allocatedElements, usedIndexes);
589
590 // Collect all objects into unordered_set via allocator's method
591 allocator.Collect(&AllocatorTest<Allocator>::ReturnDeadAndPutInSet);
592 // Check in unordered_set
593 for (size_t i = 0; i < elementsCount; i++) {
594 auto it = usedIndexes.find(i);
595 if (it != usedIndexes.end()) {
596 void *mem = allocatedElements[i];
597 ASSERT_TRUE(EraseFromSet(mem))
598 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
599 }
600 }
601
602 delete memStats;
603 ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
604 }
605
606 /**
607 * @brief Allocate and free objects, collect via allocator method
608 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
609 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
610 * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
611 * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
612 * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
613 * don't use pools
614 * @param free_granularity - granularity for objects free before collection
615 * @param pools_count - count of pools needed by allocation
616 *
617 * Allocate objects, free part of objects and iterate via allocator method.
618 * Check the iterated elements and free later.
619 */
620 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
621 Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
622 void ObjectIteratorTest(size_t freeGranularity = 4, size_t poolsCount = 2)
623 {
624 size_t elementsCount = 0;
625 std::vector<void *> allocatedElements;
626 auto *memStats = new mem::MemStatsType();
627 Allocator allocator(memStats);
628 std::unordered_set<size_t> usedIndexes;
629 ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
630 ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator,
631 elementsCount, allocatedElements, usedIndexes);
632
633 // Collect all objects into unordered_set via allocator's method
634 allocator.IterateOverObjects(&AllocatorTest<Allocator>::VisitAndPutInSet);
635 // Free all and check in unordered_set
636 for (size_t i = 0; i < elementsCount; i++) {
637 auto it = usedIndexes.find(i);
638 if (it != usedIndexes.end()) {
639 void *mem = allocatedElements[i];
640 allocator.Free(mem);
641 ASSERT_TRUE(EraseFromSet(mem))
642 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
643 }
644 }
645
646 delete memStats;
647 ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
648 }
649
650 /**
651 * @brief Allocate and free objects, iterate via allocator method iterating in range
652 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
653 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
654 * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
655 * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
656 * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
657 * don't use pools
658 * @param range_iteration_size - size of a iteration range during test. Must be a power of two
659 * @param free_granularity - granularity for objects free before collection
660 * @param pools_count - count of pools needed by allocation
661 *
662 * Allocate objects, free part of objects and iterate via allocator method iterating in range. Check of iteration
663 * and free later.
664 */
665 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
666 Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
667 void ObjectIteratorInRangeTest(size_t rangeIterationSize, size_t freeGranularity = 4, size_t poolsCount = 2)
668 {
669 ASSERT((rangeIterationSize & (rangeIterationSize - 1U)) == 0U);
670 size_t elementsCount = 0;
671 std::vector<void *> allocatedElements;
672 std::unordered_set<size_t> usedIndexes;
673 auto *memStats = new mem::MemStatsType();
674 Allocator allocator(memStats);
675 ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
676 ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator,
677 elementsCount, allocatedElements, usedIndexes);
678
679 void *minObjPointer = *std::min_element(allocatedElements.begin(), allocatedElements.end());
680 void *maxObjPointer = *std::max_element(allocatedElements.begin(), allocatedElements.end());
681 // Collect all objects into unordered_set via allocator's method
682 uintptr_t curPointer = ToUintPtr(minObjPointer);
683 curPointer = curPointer & (~(rangeIterationSize - 1));
684 while (curPointer <= ToUintPtr(maxObjPointer)) {
685 allocator.IterateOverObjectsInRange(&AllocatorTest<Allocator>::VisitAndPutInSet, ToVoidPtr(curPointer),
686 ToVoidPtr(curPointer + rangeIterationSize - 1U));
687 curPointer = curPointer + rangeIterationSize;
688 }
689
690 // Free all and check in unordered_set
691 for (size_t i = 0; i < elementsCount; i++) {
692 auto it = usedIndexes.find(i);
693 if (it != usedIndexes.end()) {
694 void *mem = allocatedElements[i];
695 allocator.Free(mem);
696 ASSERT_TRUE(EraseFromSet(mem))
697 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
698 }
699 }
700 delete memStats;
701 ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
702 }
703
704 /**
705 * @brief Address sanitizer test for allocator
706 * @tparam elements_count - count of elements for allocation
707 * @param free_granularity - granularity for freed elements
708 * @param pools_count - count of pools needed by allocation
709 *
710 * Test for address sanitizer. Free some elements and try to write value in freed elements.
711 */
712 // NOLINTNEXTLINE(readability-magic-numbers)
713 template <size_t ELEMENTS_COUNT = 100>
714 void AsanTest(size_t freeGranularity = 3, size_t poolsCount = 1) // NOLINT(readability-magic-numbers)
715 {
716 using ElementType = uint64_t;
717 static constexpr size_t ALLOC_SIZE = sizeof(ElementType);
718 static constexpr size_t ALLOCATIONS_COUNT = ELEMENTS_COUNT;
719
720 if (freeGranularity == 0) {
721 freeGranularity = 1;
722 }
723
724 auto *memStats = new mem::MemStatsType();
725 Allocator allocator(memStats);
726 for (size_t i = 0; i < poolsCount; i++) {
727 AddMemoryPoolToAllocatorProtected(allocator);
728 }
729 std::array<void *, ALLOCATIONS_COUNT> allocatedElements {};
730 // Allocations
731 for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
732 void *mem = allocator.Alloc(ALLOC_SIZE);
733 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << ALLOC_SIZE << " bytes on " << i << " iteration";
734 allocatedElements[i] = mem;
735 }
736 // Free some elements
737 for (size_t i = 0; i < ALLOCATIONS_COUNT; i += freeGranularity) {
738 allocator.Free(allocatedElements[i]);
739 }
740 // Asan check
741 for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
742 if (i % freeGranularity == 0) {
743 #ifdef PANDA_ASAN_ON
744 EXPECT_DEATH(DeathWriteUint64(allocatedElements[i]), "")
745 << "Write " << sizeof(ElementType) << " bytes at address " << std::hex << allocatedElements[i];
746 #else
747 continue;
748 #endif // PANDA_ASAN_ON
749 } else {
750 allocator.Free(allocatedElements[i]);
751 }
752 }
753 delete memStats;
754 }
755 /**
756 * @brief Test to allocated by this allocator
757 *
758 * Test for allocator function which check memory on allocaion by this allocator
759 */
AllocatedByThisAllocatorTest()760 void AllocatedByThisAllocatorTest()
761 {
762 mem::MemStatsType memStats;
763 Allocator allocator(&memStats);
764 AllocatedByThisAllocatorTest(allocator);
765 }
766
767 /**
768 * @brief Test to allocated by this allocator
769 *
770 * Test for allocator function which check memory on allocaion by this allocator
771 */
AllocatedByThisAllocatorTest(Allocator & allocator)772 void AllocatedByThisAllocatorTest(Allocator &allocator)
773 {
774 static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
775 AddMemoryPoolToAllocatorProtected(allocator);
776 void *allocatedByThis = allocator.Alloc(ALLOC_SIZE);
777 // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
778 std::unique_ptr<void, void (*)(void *)> allocatedByMalloc(std::malloc(ALLOC_SIZE), free);
779 std::array<uint8_t, ALLOC_SIZE> allocatedOnStack {};
780 void *allocatedByMallocAddr = allocatedByMalloc.get();
781
782 ASSERT_TRUE(AllocatedByThisAllocator(allocator, allocatedByThis)) << "address: " << std::hex << allocatedByThis;
783 ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocatedByMallocAddr))
784 << "address: " << allocatedByMallocAddr;
785 ASSERT_FALSE(AllocatedByThisAllocator(allocator, static_cast<void *>(allocatedOnStack.data())))
786 << "address on stack: " << std::hex << static_cast<void *>(allocatedOnStack.data());
787
788 allocator.Free(allocatedByThis);
789 allocatedByMalloc.reset();
790
791 // NOLINTNEXTLINE(clang-analyzer-unix.Malloc)
792 ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocatedByMallocAddr))
793 << "after free, address: " << allocatedByMallocAddr;
794 }
795
796 /**
797 * @brief Simultaneously allocate/free objects in different threads
798 * @tparam allocator - target allocator for test
799 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
800 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
801 * @tparam THREADS_COUNT - the number of threads used in this test
802 * @param min_elements_count - minimum elements which will be allocated during test for each thread
803 * @param max_elements_count - maximum elements which will be allocated during test for each thread
804 */
805 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocTest(Allocator * allocator,size_t minElementsCount,size_t maxElementsCount)806 void MtAllocTest(Allocator *allocator, size_t minElementsCount, size_t maxElementsCount)
807 {
808 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
809 // We have an issue with QEMU during MT tests. Issue 2852
810 static_assert(THREADS_COUNT == 1);
811 #endif
812 std::atomic<size_t> numFinished = 0;
813 for (size_t i = 0; i < THREADS_COUNT; i++) {
814 auto tid = os::thread::ThreadStart(&MtAllocRun, this, allocator, &numFinished, MIN_ALLOC_SIZE,
815 MAX_ALLOC_SIZE, minElementsCount, maxElementsCount);
816 os::thread::ThreadDetach(tid);
817 }
818
819 while (true) {
820 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially
821 // consistent order where threads observe all modifications in the same order
822 if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
823 break;
824 }
825 os::thread::Yield();
826 }
827 }
828
829 /**
830 * @brief Simultaneously allocate/free objects in different threads
831 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
832 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
833 * @tparam THREADS_COUNT - the number of threads used in this test
834 * @param min_elements_count - minimum elements which will be allocated during test for each thread
835 * @param max_elements_count - maximum elements which will be allocated during test for each thread
836 * @param free_granularity - granularity for objects free before total free
837 */
838 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
839 void MtAllocFreeTest(size_t minElementsCount, size_t maxElementsCount, size_t freeGranularity = 4)
840 {
841 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
842 // We have an issue with QEMU during MT tests. Issue 2852
843 static_assert(THREADS_COUNT == 1);
844 #endif
845 auto *memStats = new mem::MemStatsType();
846 Allocator allocator(memStats);
847 std::atomic<size_t> numFinished = 0;
848
849 // Prepare an allocator
850 MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
851
852 for (size_t i = 0; i < THREADS_COUNT; i++) {
853 (void)freeGranularity;
854 auto tid = os::thread::ThreadStart(&MtAllocFreeRun, this, &allocator, &numFinished, freeGranularity,
855 MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, minElementsCount, maxElementsCount);
856 os::thread::ThreadDetach(tid);
857 }
858
859 while (true) {
860 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially
861 // consistent order where threads observe all modifications in the same order
862 if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
863 break;
864 }
865 os::thread::Yield();
866 }
867 delete memStats;
868 }
869
870 /**
871 * @brief Simultaneously allocate objects and iterate over objects (in range too) in different threads
872 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
873 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
874 * @tparam THREADS_COUNT - the number of threads used in this test
875 * @param min_elements_count - minimum elements which will be allocated during test for each thread
876 * @param max_elements_count - maximum elements which will be allocated during test for each thread
877 * @param range_iteration_size - size of a iteration range during test. Must be a power of two
878 */
879 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocIterateTest(size_t minElementsCount,size_t maxElementsCount,size_t rangeIterationSize)880 void MtAllocIterateTest(size_t minElementsCount, size_t maxElementsCount, size_t rangeIterationSize)
881 {
882 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
883 // We have an issue with QEMU during MT tests. Issue 2852
884 static_assert(THREADS_COUNT == 1);
885 #endif
886 auto *memStats = new mem::MemStatsType();
887 Allocator allocator(memStats);
888 std::atomic<size_t> numFinished = 0;
889
890 // Prepare an allocator
891 MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
892
893 for (size_t i = 0; i < THREADS_COUNT; i++) {
894 (void)rangeIterationSize;
895 auto tid = os::thread::ThreadStart(&MtAllocIterateRun, this, &allocator, &numFinished, rangeIterationSize,
896 MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, minElementsCount, maxElementsCount);
897 os::thread::ThreadDetach(tid);
898 }
899
900 while (true) {
901 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially
902 // consistent order where threads observe all modifications in the same order
903 if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
904 break;
905 }
906 os::thread::Yield();
907 }
908
909 // Delete all objects in allocator
910 allocator.Collect([&](ObjectHeader *object) {
911 (void)object;
912 return ObjectStatus::DEAD_OBJECT;
913 });
914 delete memStats;
915 }
916
917 /**
918 * @brief Simultaneously allocate and collect objects in different threads
919 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
920 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
921 * @tparam THREADS_COUNT - the number of threads used in this test
922 * @param min_elements_count - minimum elements which will be allocated during test for each thread
923 * @param max_elements_count - maximum elements which will be allocated during test for each thread
924 * @param max_thread_with_collect - maximum threads which will call collect simultaneously
925 */
926 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
927 void MtAllocCollectTest(size_t minElementsCount, size_t maxElementsCount, size_t maxThreadWithCollect = 1)
928 {
929 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
930 // We have an issue with QEMU during MT tests. Issue 2852
931 static_assert(THREADS_COUNT == 1);
932 #endif
933 auto *memStats = new mem::MemStatsType();
934 Allocator allocator(memStats);
935 std::atomic<size_t> numFinished = 0;
936 std::atomic<uint32_t> threadWithCollect {0U};
937
938 // Prepare an allocator
939 MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
940
941 for (size_t i = 0; i < THREADS_COUNT; i++) {
942 auto tid = os::thread::ThreadStart(&MtAllocCollectRun, this, &allocator, &numFinished, MIN_ALLOC_SIZE,
943 MAX_ALLOC_SIZE, minElementsCount, maxElementsCount, maxThreadWithCollect,
944 &threadWithCollect);
945 os::thread::ThreadDetach(tid);
946 }
947
948 while (true) {
949 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially
950 // consistent order where threads observe all modifications in the same order
951 if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
952 break;
953 }
954 os::thread::Yield();
955 }
956
957 // Delete all objects in allocator
958 allocator.Collect([&](ObjectHeader *object) {
959 (void)object;
960 return ObjectStatus::DEAD_OBJECT;
961 });
962 delete memStats;
963 }
964
965 private:
966 /**
967 * @brief Allocate and free objects in allocator for future collecting/iterating checks
968 * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
969 * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
970 * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
971 * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
972 * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
973 * don't use pools
974 * @param free_granularity - granularity for objects free before collection
975 * @param pools_count - count of pools needed by allocation
976 *
977 * Allocate objects and free part of objects.
978 */
979 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE,
980 Alignment LOG_ALIGN_MAX_VALUE, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratingSetUp(size_t freeGranularity,size_t poolsCount,Allocator & allocator,size_t & elementsCount,std::vector<void * > & allocatedElements,std::unordered_set<size_t> & usedIndexes)981 void ObjectIteratingSetUp(size_t freeGranularity, size_t poolsCount, Allocator &allocator, size_t &elementsCount,
982 std::vector<void *> &allocatedElements, std::unordered_set<size_t> &usedIndexes)
983 {
984 AddMemoryPoolToAllocator(allocator);
985 size_t allocatedPools = 1;
986 auto doAllocations = [poolsCount]([[maybe_unused]] size_t allocatedPoolsCount,
987 [[maybe_unused]] size_t count) -> bool {
988 if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
989 return allocatedPoolsCount < poolsCount;
990 } else {
991 (void)poolsCount;
992 return count < ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR;
993 }
994 };
995
996 // Allocations
997 while (doAllocations(allocatedPools, elementsCount)) {
998 size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
999 size_t align = RandFromRange(LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE);
1000 void *mem = allocator.Alloc(size, Alignment(align));
1001 if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
1002 if (mem == nullptr) {
1003 AddMemoryPoolToAllocator(allocator);
1004 allocatedPools++;
1005 mem = allocator.Alloc(size);
1006 }
1007 }
1008 ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes in " << elementsCount
1009 << " iteration, seed : " << seed_;
1010 allocatedElements.push_back(mem);
1011 usedIndexes.insert(elementsCount++);
1012 }
1013 // Free some elements
1014 for (size_t i = 0; i < elementsCount; i += freeGranularity) {
1015 size_t index = RandFromRange(0, elementsCount - 1);
1016 auto it = usedIndexes.find(index);
1017 if (it == usedIndexes.end()) {
1018 it = usedIndexes.begin();
1019 index = *it;
1020 }
1021 allocator.Free(allocatedElements[index]);
1022 usedIndexes.erase(it);
1023 }
1024 }
1025
1026 /**
1027 * @brief Prepare Allocator for the MT work. Allocate and free everything except one element
1028 * It will generate a common allocator state before specific tests.
1029 */
1030 void MTTestPrologue(Allocator &allocator, size_t allocSize);
1031
1032 static void MtAllocRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1033 std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
1034 size_t minElementsCount, size_t maxElementsCount);
1035
1036 static void MtAllocFreeRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1037 std::atomic<size_t> *numFinished, size_t freeGranularity, size_t minAllocSize,
1038 size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount);
1039
1040 static void MtAllocIterateRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1041 std::atomic<size_t> *numFinished, size_t rangeIterationSize, size_t minAllocSize,
1042 size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount);
1043
1044 static void MtAllocCollectRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1045 std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
1046 size_t minElementsCount, size_t maxElementsCount, uint32_t maxThreadWithCollect,
1047 std::atomic<uint32_t> *threadWithCollect);
1048
1049 static std::unordered_set<void *> objectsSet_;
1050
VisitAndPutInSet(void * objMem)1051 static void VisitAndPutInSet(void *objMem)
1052 {
1053 objectsSet_.insert(objMem);
1054 }
1055
ReturnDeadAndPutInSet(ObjectHeader * objMem)1056 static ObjectStatus ReturnDeadAndPutInSet(ObjectHeader *objMem)
1057 {
1058 objectsSet_.insert(objMem);
1059 return ObjectStatus::DEAD_OBJECT;
1060 }
1061
EraseFromSet(void * objMem)1062 static bool EraseFromSet(void *objMem)
1063 {
1064 auto it = objectsSet_.find(objMem);
1065 if (it != objectsSet_.end()) {
1066 objectsSet_.erase(it);
1067 return true;
1068 }
1069 return false;
1070 }
1071
IsEmptySet()1072 static bool IsEmptySet() noexcept
1073 {
1074 return objectsSet_.empty();
1075 }
1076
BuildInfoForMtAllocTests(std::tuple<void *,size_t,size_t> allocatedElement,unsigned int seed)1077 static std::string BuildInfoForMtAllocTests(std::tuple<void *, size_t, size_t> allocatedElement, unsigned int seed)
1078 {
1079 std::stringstream stream;
1080 stream << "Address: " << std::hex << std::get<0>(allocatedElement)
1081 << ", size: " << std::get<1>(allocatedElement)
1082 << ", start index in byte array: " << std::get<2U>(allocatedElement) << ", seed: " << seed;
1083 return stream.str();
1084 }
1085 };
1086
1087 // NOLINTBEGIN(fuchsia-statically-constructed-objects)
1088 template <class Allocator>
1089 std::unordered_set<void *> AllocatorTest<Allocator>::objectsSet_;
1090 // NOLINTEND(fuchsia-statically-constructed-objects)
1091
1092 template <class Allocator>
MtAllocRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1093 void AllocatorTest<Allocator>::MtAllocRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1094 std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
1095 size_t minElementsCount, size_t maxElementsCount)
1096 {
1097 size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1098 std::unordered_set<size_t> usedIndexes;
1099 // {memory, size, start_index_in_byte_array}
1100 std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1101
1102 for (size_t i = 0; i < elementsCount; ++i) {
1103 size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1104 // Allocation
1105 void *mem = allocator->Alloc(size);
1106 // Do while because other threads can use the whole pool before we try to allocate smth in it
1107 while (mem == nullptr) {
1108 allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1109 mem = allocator->Alloc(size);
1110 }
1111 ASSERT_TRUE(mem != nullptr);
1112 // Write random bytes
1113 allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1114 usedIndexes.insert(i);
1115 }
1116
1117 // Compare
1118 while (!usedIndexes.empty()) {
1119 size_t i = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1120 auto it = usedIndexes.find(i);
1121 if (it != usedIndexes.end()) {
1122 usedIndexes.erase(it);
1123 } else {
1124 i = *usedIndexes.begin();
1125 usedIndexes.erase(usedIndexes.begin());
1126 }
1127 ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[i])));
1128 ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1129 std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1130 << BuildInfoForMtAllocTests(allocatedElements[i], allocatorTestInstance->seed_);
1131 }
1132 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1133 // where threads observe all modifications in the same order
1134 numFinished->fetch_add(1, std::memory_order_seq_cst);
1135 }
1136
1137 template <class Allocator>
MtAllocFreeRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t freeGranularity,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1138 void AllocatorTest<Allocator>::MtAllocFreeRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1139 std::atomic<size_t> *numFinished, size_t freeGranularity,
1140 size_t minAllocSize, size_t maxAllocSize, size_t minElementsCount,
1141 size_t maxElementsCount)
1142 {
1143 size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1144 std::unordered_set<size_t> usedIndexes;
1145 // {memory, size, start_index_in_byte_array}
1146 std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1147
1148 for (size_t i = 0; i < elementsCount; ++i) {
1149 size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1150 // Allocation
1151 void *mem = allocator->Alloc(size);
1152 // Do while because other threads can use the whole pool before we try to allocate smth in it
1153 while (mem == nullptr) {
1154 allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1155 mem = allocator->Alloc(size);
1156 }
1157 ASSERT_TRUE(mem != nullptr);
1158 // Write random bytes
1159 allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1160 usedIndexes.insert(i);
1161 }
1162
1163 // Free some elements
1164 for (size_t i = 0; i < elementsCount; i += freeGranularity) {
1165 size_t index = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1166 auto it = usedIndexes.find(index);
1167 if (it != usedIndexes.end()) {
1168 usedIndexes.erase(it);
1169 } else {
1170 index = *usedIndexes.begin();
1171 usedIndexes.erase(usedIndexes.begin());
1172 }
1173 ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[index])));
1174 // Compare
1175 ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(std::get<0>(allocatedElements[index]),
1176 std::get<1>(allocatedElements[index]),
1177 std::get<2U>(allocatedElements[index])))
1178 << BuildInfoForMtAllocTests(allocatedElements[i], allocatorTestInstance->seed_);
1179 allocator->Free(std::get<0>(allocatedElements[index]));
1180 }
1181
1182 // Compare and free
1183 while (!usedIndexes.empty()) {
1184 size_t i = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1185 auto it = usedIndexes.find(i);
1186 if (it != usedIndexes.end()) {
1187 usedIndexes.erase(it);
1188 } else {
1189 i = *usedIndexes.begin();
1190 usedIndexes.erase(usedIndexes.begin());
1191 }
1192 // Compare
1193 ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1194 std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1195 << BuildInfoForMtAllocTests(allocatedElements[i], allocatorTestInstance->seed_);
1196 allocator->Free(std::get<0>(allocatedElements[i]));
1197 }
1198 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1199 // where threads observe all modifications in the same order
1200 numFinished->fetch_add(1, std::memory_order_seq_cst);
1201 }
1202
1203 template <class Allocator>
MtAllocIterateRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t rangeIterationSize,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1204 void AllocatorTest<Allocator>::MtAllocIterateRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1205 std::atomic<size_t> *numFinished, size_t rangeIterationSize,
1206 size_t minAllocSize, size_t maxAllocSize, size_t minElementsCount,
1207 size_t maxElementsCount)
1208 {
1209 static constexpr size_t ITERATION_IN_RANGE_COUNT = 100;
1210 size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1211 // {memory, size, start_index_in_byte_array}
1212 std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1213
1214 // Iterate over all object
1215 allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1216
1217 // Allocate objects
1218 for (size_t i = 0; i < elementsCount; ++i) {
1219 size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1220 // Allocation
1221 void *mem = allocator->Alloc(size);
1222 // Do while because other threads can use the whole pool before we try to allocate smth in it
1223 while (mem == nullptr) {
1224 allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1225 mem = allocator->Alloc(size);
1226 }
1227 ASSERT_TRUE(mem != nullptr);
1228 // Write random bytes
1229 allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1230 }
1231
1232 // Iterate over all object
1233 allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1234
1235 size_t iteratedOverObjects = 0;
1236 // Compare values inside the objects
1237 for (size_t i = 0; i < elementsCount; ++i) {
1238 // do a lot of iterate over range calls to check possible races
1239 if (iteratedOverObjects < ITERATION_IN_RANGE_COUNT) {
1240 void *leftBorder = ToVoidPtr(ToUintPtr(std::get<0>(allocatedElements[i])) & ~(rangeIterationSize - 1U));
1241 void *rightBorder = ToVoidPtr(ToUintPtr(leftBorder) + rangeIterationSize - 1U);
1242 allocator->IterateOverObjectsInRange([&](void *mem) { (void)mem; }, leftBorder, rightBorder);
1243 iteratedOverObjects++;
1244 }
1245 ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[i])));
1246 // Compare
1247 ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1248 std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1249 << BuildInfoForMtAllocTests(allocatedElements[i], allocatorTestInstance->seed_);
1250 }
1251 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1252 // where threads observe all modifications in the same order
1253 numFinished->fetch_add(1, std::memory_order_seq_cst);
1254 }
1255
1256 template <class Allocator>
MtAllocCollectRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount,uint32_t maxThreadWithCollect,std::atomic<uint32_t> * threadWithCollect)1257 void AllocatorTest<Allocator>::MtAllocCollectRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1258 std::atomic<size_t> *numFinished, size_t minAllocSize,
1259 size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount,
1260 uint32_t maxThreadWithCollect,
1261 std::atomic<uint32_t> *threadWithCollect)
1262 {
1263 size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1264
1265 // Allocate objects
1266 for (size_t i = 0; i < elementsCount; ++i) {
1267 size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1268 // Allocation
1269 void *mem = allocator->Alloc(size);
1270 // Do while because other threads can use the whole pool before we try to allocate smth in it
1271 while (mem == nullptr) {
1272 allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1273 mem = allocator->Alloc(size);
1274 }
1275 ASSERT_TRUE(mem != nullptr);
1276 auto object = static_cast<ObjectHeader *>(mem);
1277 object->SetMarkedForGC();
1278 }
1279
1280 // Collect objects
1281 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1282 // where threads observe all modifications in the same order
1283 if (threadWithCollect->fetch_add(1U, std::memory_order_seq_cst) < maxThreadWithCollect) {
1284 allocator->Collect([&](ObjectHeader *object) {
1285 ObjectStatus objectStatus =
1286 object->IsMarkedForGC() ? ObjectStatus::DEAD_OBJECT : ObjectStatus::ALIVE_OBJECT;
1287 return objectStatus;
1288 });
1289 }
1290 // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1291 // where threads observe all modifications in the same order
1292 numFinished->fetch_add(1, std::memory_order_seq_cst);
1293 }
1294
1295 template <class Allocator>
MTTestPrologue(Allocator & allocator,size_t allocSize)1296 void AllocatorTest<Allocator>::MTTestPrologue(Allocator &allocator, size_t allocSize)
1297 {
1298 // Allocator preparing:
1299 std::vector<void *> allocatedElements;
1300 AddMemoryPoolToAllocator(allocator);
1301 // Allocate objects
1302 while (true) {
1303 // Allocation
1304 void *mem = allocator.Alloc(allocSize);
1305 if (mem == nullptr) {
1306 break;
1307 }
1308 allocatedElements.push_back(mem);
1309 }
1310 // Free everything except one element:
1311 for (size_t i = 1; i < allocatedElements.size(); ++i) {
1312 allocator.Free(allocatedElements[i]);
1313 }
1314
1315 allocator.VisitAndRemoveFreePools([&](void *mem, size_t size) {
1316 (void)mem;
1317 (void)size;
1318 });
1319 }
1320
1321 } // namespace ark::mem
1322
1323 #endif // PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
1324