• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H
16 #define PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H
17 
18 #include <gtest/gtest.h>
19 
20 #include <algorithm>
21 #include <array>
22 #include <cstdlib>
23 #include <cstring>
24 #include <ctime>
25 #include <tuple>
26 #include <unordered_set>
27 
28 #include "libpandabase/mem/mem.h"
29 #include "libpandabase/os/thread.h"
30 #include "libpandabase/utils/utils.h"
31 #include "runtime/mem/bump-allocator.h"
32 #include "runtime/mem/mem_stats_additional_info.h"
33 #include "runtime/mem/mem_stats_default.h"
34 #include "runtime/include/object_header.h"
35 
36 namespace panda::mem {
37 
38 template <class Allocator>
39 class AllocatorTest : public testing::Test {
40 public:
AllocatorTest()41     explicit AllocatorTest()
42     {
43 #ifdef PANDA_NIGHTLY_TEST_ON
44         seed_ = std::time(NULL);
45 #else
46         static constexpr unsigned int FIXED_SEED = 0xDEADBEEF;
47         seed_ = FIXED_SEED;
48 #endif
49         srand(seed_);
50         InitByteArray();
51     }
52 
53 protected:
54     static constexpr size_t BYTE_ARRAY_SIZE = 1000;
55 
56     unsigned int seed_;                                  // NOLINT(misc-non-private-member-variables-in-classes)
57     std::array<uint8_t, BYTE_ARRAY_SIZE> byteArray_ {};  // NOLINT(misc-non-private-member-variables-in-classes)
58 
59     /// Byte array initialization of random bytes
InitByteArray()60     void InitByteArray()
61     {
62         for (size_t i = 0; i < BYTE_ARRAY_SIZE; ++i) {
63             byteArray_[i] = RandFromRange(0, std::numeric_limits<uint8_t>::max());
64         }
65     }
66 
67     /**
68      * @brief Add pool to allocator (maybe empty for some allocators)
69      * @param allocator - allocator for pool memory adding
70      */
71     virtual void AddMemoryPoolToAllocator([[maybe_unused]] Allocator &allocator) = 0;
72 
73     /**
74      * @brief Add pool to allocator and protect (maybe empty for some allocators)
75      * @param allocator - allocator for pool memory addition and protection
76      */
77     virtual void AddMemoryPoolToAllocatorProtected([[maybe_unused]] Allocator &allocator) = 0;
78 
79     /**
80      * @brief Check to allocated by this allocator
81      * @param allocator - allocator
82      * @param mem - allocated memory
83      */
84     virtual bool AllocatedByThisAllocator([[maybe_unused]] Allocator &allocator, [[maybe_unused]] void *mem) = 0;
85 
86     /**
87      * @brief Generate random value from [min_value, max_value]
88      * @param min_value - minimum size_t value in range
89      * @param max_value - maximum size_t value in range
90      * @return random size_t value [min_value, max_value]
91      */
RandFromRange(size_t minValue,size_t maxValue)92     size_t RandFromRange(size_t minValue, size_t maxValue)
93     {
94         // rand() is not thread-safe method.
95         // So do it under the lock
96         static os::memory::Mutex randLock;
97         os::memory::LockHolder lock(randLock);
98         // NOLINTNEXTLINE(cert-msc50-cpp)
99         return minValue + rand() % (maxValue - minValue + 1);
100     }
101 
102     /**
103      * @brief Write value in memory for death test
104      * @param mem - memory for writing
105      *
106      * Write value in memory for address sanitizer test
107      */
DeathWriteUint64(void * mem)108     void DeathWriteUint64(void *mem)
109     {
110         static constexpr uint64_t INVALID_ADDR = 0xDEADBEEF;
111         *(static_cast<uint64_t *>(mem)) = INVALID_ADDR;
112     }
113 
114     /**
115      * @brief Set random bytes in memory from byte array
116      * @param mem - memory for random bytes from byte array writing
117      * @param size - size memory in bytes
118      * @return start index in byte_array
119      */
SetBytesFromByteArray(void * mem,size_t size)120     size_t SetBytesFromByteArray(void *mem, size_t size)
121     {
122         size_t startIndex = RandFromRange(0, BYTE_ARRAY_SIZE - 1);
123         size_t copied = 0;
124         size_t firstCopySize = std::min(size, BYTE_ARRAY_SIZE - startIndex);
125         // Set head of memory
126         std::copy_n(&byteArray_[startIndex], firstCopySize, reinterpret_cast<uint8_t *>(mem));
127         size -= firstCopySize;
128         copied += firstCopySize;
129         // Set middle part of memory
130         while (size > BYTE_ARRAY_SIZE) {
131             std::copy_n(byteArray_.data(), BYTE_ARRAY_SIZE,
132                         reinterpret_cast<uint8_t *>(ToVoidPtr(ToUintPtr(mem) + copied)));
133             size -= BYTE_ARRAY_SIZE;
134             copied += BYTE_ARRAY_SIZE;
135         }
136         // Set tail of memory
137         std::copy_n(byteArray_.data(), size, reinterpret_cast<uint8_t *>(ToVoidPtr(ToUintPtr(mem) + copied)));
138 
139         return startIndex;
140     }
141 
142     /**
143      * @brief Compare bytes in memory with byte array
144      * @param mem - memory for random bytes from byte array writing
145      * @param size - size memory in bytes
146      * @param start_index_in_byte_array - start index in byte array for comaration with memory
147      * @return boolean value: true if bytes are equal and fasle if not equal
148      */
CompareBytesWithByteArray(void * mem,size_t size,size_t startIndexInByteArray)149     bool CompareBytesWithByteArray(void *mem, size_t size, size_t startIndexInByteArray)
150     {
151         size_t compared = 0;
152         size_t firstCompareSize = std::min(size, BYTE_ARRAY_SIZE - startIndexInByteArray);
153         // Compare head of memory
154         if (memcmp(mem, &byteArray_[startIndexInByteArray], firstCompareSize) != 0) {
155             return false;
156         }
157         compared += firstCompareSize;
158         size -= firstCompareSize;
159         // Compare middle part of memory
160         while (size >= BYTE_ARRAY_SIZE) {
161             if (memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byteArray_.data(), BYTE_ARRAY_SIZE) != 0) {
162                 return false;
163             }
164             size -= BYTE_ARRAY_SIZE;
165             compared += BYTE_ARRAY_SIZE;
166         }
167         // Compare tail of memory
168         return memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byteArray_.data(), size) == 0;
169     }
170 
171     /**
172      * @brief Allocate with one alignment
173      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
174      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
175      * @tparam ALIGNMENT - enum Alignment value for allocations
176      * @tparam AllocatorArgs - arguments types for allocor creation
177      * @param pools_count - count of pools needed by allocation
178      * @param allocator_args - arguments for allocator creation
179      *
180      * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with ALIGNMENT alignment
181      */
182     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment ALIGNMENT, class... AllocatorArgs>
183     void OneAlignedAllocFreeTest(size_t poolsCount, AllocatorArgs &&...allocatorArgs);
184 
185     /**
186      * @brief Allocate with all alignment
187      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
188      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
189      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
190      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
191      * @param pools_count - count of pools needed by allocation
192      *
193      * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with all possible alignment from
194      * [LOG_ALIGN_MIN, LOG_ALIGN_MAX]
195      */
196     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
197               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX>
198     void AlignedAllocFreeTest(size_t poolsCount = 1);
199 
200     /**
201      * @brief Simple test for allocate and free
202      * @param alloc_size - size in bytes for each allocation
203      * @param elements_count - count of elements for allocation
204      * @param pools_count - count of pools needed by allocation
205      *
206      * Allocate elements with random values setting, check and free memory
207      */
208     void AllocateAndFree(size_t allocSize, size_t elementsCount, size_t poolsCount = 1);
209 
210     /**
211      * @brief Simple test for checking iteration over free pools method.
212      * @tparam pools_count - count of pools needed by allocation, must be bigger than 3
213      * @param alloc_size - size in bytes for each allocation
214      *
215      * Allocate and use memory pools; free all elements from first, last
216      * and one in the middle; call iteration over free pools
217      * and allocate smth again.
218      */
219     template <size_t POOLS_COUNT = 5>
220     void VisitAndRemoveFreePools(size_t allocSize);
221 
222     /**
223      * @brief Allocate with different sizes and free in random order
224      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
225      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
226      * @tparam AllocatorArgs - arguments types for allocor creation
227      * @param elements_count - count of elements for allocation
228      * @param pools_count - count of pools needed by allocation
229      * @param allocator_args - arguments for allocator creation
230      * Allocate elements with random size and random values setting in random order, check and free memory in random
231      * order too
232      */
233     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, class... AllocatorArgs>
234     void AllocateFreeDifferentSizesTest(size_t elementsCount, size_t poolsCount, AllocatorArgs &&...allocatorArgs);
235 
236     /**
237      * @brief Try to allocate too big object, must not allocate memory
238      * @tparam MAX_ALLOC_SIZE - maximum possible size for allocation by this allocator
239      */
240     template <size_t MAX_ALLOC_SIZE>
241     void AllocateTooBigObjectTest();
242 
243     /**
244      * @brief Try to allocate too many objects, must not allocate all objects
245      * @param alloc_size - size in bytes for one allocation
246      * @param elements_count - count of elements for allocation
247      *
248      * Allocate too many elements, so must not allocate all objects
249      */
250     void AllocateTooMuchTest(size_t allocSize, size_t elementsCount);
251 
252     /**
253      * @brief Use allocator in std::vector
254      * @param elements_count - count of elements for allocation
255      *
256      * Check working of adapter of this allocator on example std::vector
257      */
258     // NOLINTNEXTLINE(readability-magic-numbers)
259     void AllocateVectorTest(size_t elementsCount = 32);
260 
261     /**
262      * @brief Allocate and reuse
263      * @tparam element_type - type of elements for allocations
264      * @param alignment_mask - mask for alignment of two addresses
265      * @param elements_count - count of elements for allocation
266      *
267      * Allocate and free memory and later reuse. Checking for two start addresses
268      */
269     template <class ElementType = uint64_t>
270     void AllocateReuseTest(size_t alignmentMask, size_t elementsCount = 100);  // NOLINT(readability-magic-numbers)
271 
272     /**
273      * @brief Allocate and free objects, collect via allocator method
274      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
275      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
276      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
277      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
278      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
279      * don't use pools
280      * @param free_granularity - granularity for objects free before collection
281      * @param pools_count - count of pools needed by allocation
282      *
283      * Allocate objects, free part of objects and collect via allocator method with free calls during the collection.
284      * Check of collection.
285      */
286     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
287               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
288     void ObjectCollectionTest(size_t freeGranularity = 4, size_t poolsCount = 2);
289 
290     /**
291      * @brief Allocate and free objects, collect via allocator method
292      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
293      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
294      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
295      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
296      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
297      * don't use pools
298      * @param free_granularity - granularity for objects free before collection
299      * @param pools_count - count of pools needed by allocation
300      *
301      * Allocate objects, free part of objects and iterate via allocator method.
302      * Check the iterated elements and free later.
303      */
304     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
305               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
306     void ObjectIteratorTest(size_t freeGranularity = 4, size_t poolsCount = 2);
307 
308     /**
309      * @brief Allocate and free objects, iterate via allocator method iterating in range
310      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
311      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
312      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
313      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
314      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
315      * don't use pools
316      * @param range_iteration_size - size of a iteration range during test. Must be a power of two
317      * @param free_granularity - granularity for objects free before collection
318      * @param pools_count - count of pools needed by allocation
319      *
320      * Allocate objects, free part of objects and iterate via allocator method iterating in range. Check of iteration
321      * and free later.
322      */
323     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
324               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
325     void ObjectIteratorInRangeTest(size_t rangeIterationSize, size_t freeGranularity = 4, size_t poolsCount = 2);
326 
327     /**
328      * @brief Address sanitizer test for allocator
329      * @tparam elements_count - count of elements for allocation
330      * @param free_granularity - granularity for freed elements
331      * @param pools_count - count of pools needed by allocation
332      *
333      * Test for address sanitizer. Free some elements and try to write value in freed elements.
334      */
335     // NOLINTNEXTLINE(readability-magic-numbers)
336     template <size_t ELEMENTS_COUNT = 100>
337     void AsanTest(size_t freeGranularity = 3, size_t poolsCount = 1);  // NOLINT(readability-magic-numbers)
338 
339     /**
340      * @brief Test to allocated by this allocator
341      *
342      * Test for allocator function which check memory on allocaion by this allocator
343      */
344     void AllocatedByThisAllocatorTest();
345 
346     /**
347      * @brief Test to allocated by this allocator
348      *
349      * Test for allocator function which check memory on allocaion by this allocator
350      */
351     void AllocatedByThisAllocatorTest(Allocator &allocator);
352 
353     /**
354      * @brief Simultaneously allocate/free objects in different threads
355      * @tparam allocator - target allocator for test
356      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
357      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
358      * @tparam THREADS_COUNT - the number of threads used in this test
359      * @param min_elements_count - minimum elements which will be allocated during test for each thread
360      * @param max_elements_count - maximum elements which will be allocated during test for each thread
361      */
362     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
363     void MtAllocTest(Allocator *allocator, size_t minElementsCount, size_t maxElementsCount);
364 
365     /**
366      * @brief Simultaneously allocate/free objects in different threads
367      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
368      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
369      * @tparam THREADS_COUNT - the number of threads used in this test
370      * @param min_elements_count - minimum elements which will be allocated during test for each thread
371      * @param max_elements_count - maximum elements which will be allocated during test for each thread
372      * @param free_granularity - granularity for objects free before total free
373      */
374     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
375     void MtAllocFreeTest(size_t minElementsCount, size_t maxElementsCount, size_t freeGranularity = 4);
376 
377     /**
378      * @brief Simultaneously allocate objects and iterate over objects (in range too) in different threads
379      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
380      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
381      * @tparam THREADS_COUNT - the number of threads used in this test
382      * @param min_elements_count - minimum elements which will be allocated during test for each thread
383      * @param max_elements_count - maximum elements which will be allocated during test for each thread
384      * @param range_iteration_size - size of a iteration range during test. Must be a power of two
385      */
386     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
387     void MtAllocIterateTest(size_t minElementsCount, size_t maxElementsCount, size_t rangeIterationSize);
388 
389     /**
390      * @brief Simultaneously allocate and collect objects in different threads
391      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
392      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
393      * @tparam THREADS_COUNT - the number of threads used in this test
394      * @param min_elements_count - minimum elements which will be allocated during test for each thread
395      * @param max_elements_count - maximum elements which will be allocated during test for each thread
396      * @param max_thread_with_collect - maximum threads which will call collect simultaneously
397      */
398     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
399     void MtAllocCollectTest(size_t minElementsCount, size_t maxElementsCount, size_t maxThreadWithCollect = 1);
400 
401 private:
402     /**
403      * @brief Allocate and free objects in allocator for future collecting/iterating checks
404      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
405      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
406      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
407      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
408      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
409      * don't use pools
410      * @param free_granularity - granularity for objects free before collection
411      * @param pools_count - count of pools needed by allocation
412      *
413      * Allocate objects and free part of objects.
414      */
415     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE,
416               Alignment LOG_ALIGN_MAX_VALUE, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
417     void ObjectIteratingSetUp(size_t freeGranularity, size_t poolsCount, Allocator &allocator, size_t &elementsCount,
418                               std::vector<void *> &allocatedElements, std::unordered_set<size_t> &usedIndexes);
419 
420     /**
421      * @brief Prepare Allocator for the MT work. Allocate and free everything except one element
422      * It will generate a common allocator state before specific tests.
423      */
424     void MTTestPrologue(Allocator &allocator, size_t allocSize);
425 
426     static void MtAllocRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
427                            std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
428                            size_t minElementsCount, size_t maxElementsCount);
429 
430     static void MtAllocFreeRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
431                                std::atomic<size_t> *numFinished, size_t freeGranularity, size_t minAllocSize,
432                                size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount);
433 
434     static void MtAllocIterateRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
435                                   std::atomic<size_t> *numFinished, size_t rangeIterationSize, size_t minAllocSize,
436                                   size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount);
437 
438     static void MtAllocCollectRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
439                                   std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
440                                   size_t minElementsCount, size_t maxElementsCount, uint32_t maxThreadWithCollect,
441                                   std::atomic<uint32_t> *threadWithCollect);
442 
443     static std::unordered_set<void *> objectsSet_;
444 
VisitAndPutInSet(void * objMem)445     static void VisitAndPutInSet(void *objMem)
446     {
447         objectsSet_.insert(objMem);
448     }
449 
ReturnDeadAndPutInSet(ObjectHeader * objMem)450     static ObjectStatus ReturnDeadAndPutInSet(ObjectHeader *objMem)
451     {
452         objectsSet_.insert(objMem);
453         return ObjectStatus::DEAD_OBJECT;
454     }
455 
EraseFromSet(void * objMem)456     static bool EraseFromSet(void *objMem)
457     {
458         auto it = objectsSet_.find(objMem);
459         if (it != objectsSet_.end()) {
460             objectsSet_.erase(it);
461             return true;
462         }
463         return false;
464     }
465 
IsEmptySet()466     static bool IsEmptySet() noexcept
467     {
468         return objectsSet_.empty();
469     }
470 };
471 
472 // NOLINTBEGIN(fuchsia-statically-constructed-objects)
473 template <class Allocator>
474 std::unordered_set<void *> AllocatorTest<Allocator>::objectsSet_;
475 // NOLINTEND(fuchsia-statically-constructed-objects)
476 
477 template <class Allocator>
478 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment ALIGNMENT, class... AllocatorArgs>
OneAlignedAllocFreeTest(size_t poolsCount,AllocatorArgs &&...allocatorArgs)479 inline void AllocatorTest<Allocator>::OneAlignedAllocFreeTest(size_t poolsCount, AllocatorArgs &&...allocatorArgs)
480 {
481     static constexpr size_t ALLOCATIONS_COUNT = MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1;
482 
483     auto *memStats = new mem::MemStatsType();
484     Allocator allocator(memStats, std::forward<AllocatorArgs>(allocatorArgs)...);
485     for (size_t i = 0; i < poolsCount; ++i) {
486         AddMemoryPoolToAllocator(allocator);
487     }
488     std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocatedElements;
489 
490     // Allocations
491     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
492         void *mem = allocator.Alloc(size, Alignment(ALIGNMENT));
493         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes with  " << static_cast<size_t>(ALIGNMENT)
494                                     << " log alignment, seed: " << seed_;
495         ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(ALIGNMENT)) - 1), 0UL)
496             << size << " bytes, " << static_cast<size_t>(ALIGNMENT) << " log alignment, seed: " << seed_;
497         allocatedElements[size - MIN_ALLOC_SIZE] = {mem, SetBytesFromByteArray(mem, size)};
498     }
499     // Check and Free
500     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; size++) {
501         size_t k = size - MIN_ALLOC_SIZE;
502         ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[k].first, size, allocatedElements[k].second))
503             << "address: " << std::hex << allocatedElements[k].first << ", size: " << size
504             << ", alignment: " << static_cast<size_t>(ALIGNMENT) << ", seed: " << seed_;
505         allocator.Free(allocatedElements[k].first);
506     }
507     delete memStats;
508 }
509 
510 template <class Allocator>
511 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE>
AlignedAllocFreeTest(size_t poolsCount)512 inline void AllocatorTest<Allocator>::AlignedAllocFreeTest(size_t poolsCount)
513 {
514     static_assert(MIN_ALLOC_SIZE <= MAX_ALLOC_SIZE);
515     static_assert(LOG_ALIGN_MIN_VALUE <= LOG_ALIGN_MAX_VALUE);
516     static constexpr size_t ALLOCATIONS_COUNT =
517         (MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1) * (LOG_ALIGN_MAX_VALUE - LOG_ALIGN_MIN_VALUE + 1);
518 
519     std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocatedElements;
520     auto *memStats = new mem::MemStatsType();
521     Allocator allocator(memStats);
522     for (size_t i = 0; i < poolsCount; i++) {
523         AddMemoryPoolToAllocator(allocator);
524     }
525 
526     // Allocations with alignment
527     size_t k = 0;
528     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
529         for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
530             void *mem = allocator.Alloc(size, Alignment(align));
531             ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes with  " << align
532                                         << " log alignment, seed: " << seed_;
533             ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(align)) - 1), 0UL)
534                 << size << " bytes, " << align << " log alignment, seed: " << seed_;
535             allocatedElements[k] = {mem, SetBytesFromByteArray(mem, size)};
536         }
537     }
538     // Check and free
539     k = 0;
540     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
541         for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
542             ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[k].first, size, allocatedElements[k].second))
543                 << "address: " << std::hex << allocatedElements[k].first << ", size: " << size
544                 << ", alignment: " << align << ", seed: " << seed_;
545             allocator.Free(allocatedElements[k].first);
546         }
547     }
548     delete memStats;
549 }
550 
551 template <class Allocator>
AllocateAndFree(size_t allocSize,size_t elementsCount,size_t poolsCount)552 inline void AllocatorTest<Allocator>::AllocateAndFree(size_t allocSize, size_t elementsCount, size_t poolsCount)
553 {
554     auto *memStats = new mem::MemStatsType();
555     Allocator allocator(memStats);
556     for (size_t i = 0; i < poolsCount; i++) {
557         AddMemoryPoolToAllocator(allocator);
558     }
559     std::vector<std::pair<void *, size_t>> allocatedElements(elementsCount);
560 
561     // Allocations
562     for (size_t i = 0; i < elementsCount; ++i) {
563         void *mem = allocator.Alloc(allocSize);
564         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << allocSize << " bytes in " << i
565                                     << " iteration, seed: " << seed_;
566         size_t index = SetBytesFromByteArray(mem, allocSize);
567         allocatedElements[i] = {mem, index};
568     }
569     // Free
570     for (auto &element : allocatedElements) {
571         ASSERT_TRUE(CompareBytesWithByteArray(element.first, allocSize, element.second))
572             << "address: " << std::hex << element.first << ", size: " << allocSize << ", seed: " << seed_;
573         allocator.Free(element.first);
574     }
575     delete memStats;
576 }
577 
578 template <class Allocator>
579 template <size_t POOLS_COUNT>
VisitAndRemoveFreePools(size_t allocSize)580 inline void AllocatorTest<Allocator>::VisitAndRemoveFreePools(size_t allocSize)
581 {
582     static constexpr size_t POOLS_TO_FREE = 3;
583     static_assert(POOLS_COUNT > POOLS_TO_FREE);
584     std::array<std::vector<void *>, POOLS_COUNT> allocatedElements;
585     auto *memStats = new mem::MemStatsType();
586     Allocator allocator(memStats);
587 
588     for (size_t i = 0; i < POOLS_COUNT; i++) {
589         AddMemoryPoolToAllocator(allocator);
590         while (true) {
591             void *mem = allocator.Alloc(allocSize);
592             if (mem == nullptr) {
593                 break;
594             }
595             allocatedElements[i].push_back(mem);
596         }
597     }
598     std::array<size_t, POOLS_TO_FREE> freedPoolsIndexes = {0, POOLS_COUNT / 2, POOLS_COUNT - 1};
599     // free all elements in pools
600     for (auto i : freedPoolsIndexes) {
601         for (auto j : allocatedElements[i]) {
602             allocator.Free(j);
603         }
604         allocatedElements[i].clear();
605     }
606     size_t freedPools = 0;
607     allocator.VisitAndRemoveFreePools([&freedPools](void *mem, size_t size) {
608         (void)mem;
609         (void)size;
610         freedPools++;
611     });
612     ASSERT_TRUE(freedPools == POOLS_TO_FREE) << ", seed: " << seed_;
613     ASSERT_TRUE(allocator.Alloc(allocSize) == nullptr) << ", seed: " << seed_;
614     // allocate again
615     for (auto i : freedPoolsIndexes) {
616         AddMemoryPoolToAllocator(allocator);
617         while (true) {
618             void *mem = allocator.Alloc(allocSize);
619             if (mem == nullptr) {
620                 break;
621             }
622             allocatedElements[i].push_back(mem);
623         }
624     }
625     // free everything:
626     for (size_t i = 0; i < POOLS_COUNT; i++) {
627         for (auto j : allocatedElements[i]) {
628             allocator.Free(j);
629         }
630         allocatedElements[i].clear();
631     }
632     freedPools = 0;
633     allocator.VisitAndRemoveFreePools([&freedPools](void *mem, size_t size) {
634         (void)mem;
635         (void)size;
636         freedPools++;
637     });
638     delete memStats;
639     ASSERT_TRUE(freedPools == POOLS_COUNT) << ", seed: " << seed_;
640 }
641 
642 template <class Allocator>
643 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, class... AllocatorArgs>
AllocateFreeDifferentSizesTest(size_t elementsCount,size_t poolsCount,AllocatorArgs &&...allocatorArgs)644 inline void AllocatorTest<Allocator>::AllocateFreeDifferentSizesTest(size_t elementsCount, size_t poolsCount,
645                                                                      AllocatorArgs &&...allocatorArgs)
646 {
647     std::unordered_set<size_t> usedIndexes;
648     // {memory, size, start_index_in_byte_array}
649     std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
650     auto *memStats = new mem::MemStatsType();
651     Allocator allocator(memStats, std::forward<AllocatorArgs>(allocatorArgs)...);
652     for (size_t i = 0; i < poolsCount; i++) {
653         AddMemoryPoolToAllocator(allocator);
654     }
655 
656     size_t fullSizeAllocated = 0;
657     for (size_t i = 0; i < elementsCount; ++i) {
658         size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
659         // Allocation
660         void *mem = allocator.Alloc(size);
661         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes, full allocated: " << fullSizeAllocated
662                                     << ", seed: " << seed_;
663         fullSizeAllocated += size;
664         // Write random bytes
665         allocatedElements[i] = {mem, size, SetBytesFromByteArray(mem, size)};
666         usedIndexes.insert(i);
667     }
668     // Compare and free
669     while (!usedIndexes.empty()) {
670         size_t i = RandFromRange(0, elementsCount - 1);
671         auto it = usedIndexes.find(i);
672         if (it != usedIndexes.end()) {
673             usedIndexes.erase(it);
674         } else {
675             i = *usedIndexes.begin();
676             usedIndexes.erase(usedIndexes.begin());
677         }
678         // Compare
679         ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]),
680                                               std::get<2U>(allocatedElements[i])))
681             << "Address: " << std::hex << std::get<0>(allocatedElements[i])
682             << ", size: " << std::get<1>(allocatedElements[i])
683             << ", start index in byte array: " << std::get<2U>(allocatedElements[i]) << ", seed: " << seed_;
684         allocator.Free(std::get<0>(allocatedElements[i]));
685     }
686     delete memStats;
687 }
688 
689 template <class Allocator>
690 template <size_t MAX_ALLOC_SIZE>
AllocateTooBigObjectTest()691 inline void AllocatorTest<Allocator>::AllocateTooBigObjectTest()
692 {
693     auto *memStats = new mem::MemStatsType();
694     Allocator allocator(memStats);
695     AddMemoryPoolToAllocator(allocator);
696 
697     // NOLINTNEXTLINE(cert-msc50-cpp)
698     size_t sizeObj = MAX_ALLOC_SIZE + 1 + static_cast<size_t>(rand());
699     void *mem = allocator.Alloc(sizeObj);
700     ASSERT_TRUE(mem == nullptr) << "Allocate too big object with " << sizeObj << " size at address " << std::hex << mem;
701     delete memStats;
702 }
703 
704 template <class Allocator>
AllocateTooMuchTest(size_t allocSize,size_t elementsCount)705 inline void AllocatorTest<Allocator>::AllocateTooMuchTest(size_t allocSize, size_t elementsCount)
706 {
707     auto *memStats = new mem::MemStatsType();
708     Allocator allocator(memStats);
709     AddMemoryPoolToAllocatorProtected(allocator);
710 
711     bool isNotAll = false;
712     for (size_t i = 0; i < elementsCount; i++) {
713         void *mem = allocator.Alloc(allocSize);
714         if (mem == nullptr) {
715             isNotAll = true;
716             break;
717         }
718         SetBytesFromByteArray(mem, allocSize);
719     }
720     ASSERT_TRUE(isNotAll) << "elements count: " << elementsCount << ", element size: " << allocSize
721                           << ", seed: " << seed_;
722     delete memStats;
723 }
724 
725 template <class Allocator>
AllocateVectorTest(size_t elementsCount)726 inline void AllocatorTest<Allocator>::AllocateVectorTest(size_t elementsCount)
727 {
728     using ElementType = size_t;
729     static constexpr size_t MAGIC_CONST = 3;
730     auto *memStats = new mem::MemStatsType();
731     Allocator allocator(memStats);
732     AddMemoryPoolToAllocatorProtected(allocator);
733     using AdapterType = typename decltype(allocator.Adapter())::template Rebind<ElementType>::other;
734     std::vector<ElementType, AdapterType> vec(allocator.Adapter());
735 
736     for (size_t i = 0; i < elementsCount; i++) {
737         vec.push_back(i * MAGIC_CONST);
738     }
739     for (size_t i = 0; i < elementsCount; i++) {
740         ASSERT_EQ(vec[i], i * MAGIC_CONST) << "iteration: " << i;
741     }
742 
743     vec.clear();
744 
745     for (size_t i = 0; i < elementsCount; i++) {
746         vec.push_back(i * (MAGIC_CONST + 1));
747     }
748     for (size_t i = 0; i < elementsCount; i++) {
749         ASSERT_EQ(vec[i], i * (MAGIC_CONST + 1)) << "iteration: " << i;
750     }
751     delete memStats;
752 }
753 
754 template <class Allocator>
755 template <class ElementType>
AllocateReuseTest(size_t alignmentMask,size_t elementsCount)756 inline void AllocatorTest<Allocator>::AllocateReuseTest(size_t alignmentMask, size_t elementsCount)
757 {
758     static constexpr size_t SIZE_1 = sizeof(ElementType);
759     static constexpr size_t SIZE_2 = SIZE_1 * 3;
760 
761     auto *memStats = new mem::MemStatsType();
762     Allocator allocator(memStats);
763     AddMemoryPoolToAllocator(allocator);
764     std::vector<std::pair<void *, size_t>> allocatedElements(elementsCount);
765 
766     // First allocations
767     for (size_t i = 0; i < elementsCount; ++i) {
768         void *mem = allocator.Alloc(SIZE_1);
769         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_1 << " bytes in " << i << " iteration";
770         size_t index = SetBytesFromByteArray(mem, SIZE_1);
771         allocatedElements[i] = {mem, index};
772     }
773     auto firstAllocatedMem = reinterpret_cast<uintptr_t>(allocatedElements[0].first);
774     // Free
775     for (size_t i = 0; i < elementsCount; i++) {
776         ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[i].first, SIZE_1, allocatedElements[i].second))
777             << "address: " << std::hex << allocatedElements[i].first << ", size: " << SIZE_1 << ", seed: " << seed_;
778         allocator.Free(allocatedElements[i].first);
779     }
780     // Second allocations
781     for (size_t i = 0; i < elementsCount; ++i) {
782         void *mem = allocator.Alloc(SIZE_2);
783         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_2 << " bytes in " << i << " iteration";
784         size_t index = SetBytesFromByteArray(mem, SIZE_2);
785         allocatedElements[i] = {mem, index};
786     }
787     auto secondAllocatedMem = reinterpret_cast<uintptr_t>(allocatedElements[0].first);
788     // Free
789     for (size_t i = 0; i < elementsCount; i++) {
790         ASSERT_TRUE(CompareBytesWithByteArray(allocatedElements[i].first, SIZE_2, allocatedElements[i].second))
791             << "address: " << std::hex << allocatedElements[i].first << ", size: " << SIZE_2 << ", seed: " << seed_;
792         allocator.Free(allocatedElements[i].first);
793     }
794     delete memStats;
795     ASSERT_EQ(firstAllocatedMem & ~alignmentMask, secondAllocatedMem & ~alignmentMask)
796         << "first address = " << std::hex << firstAllocatedMem << ", second address = " << std::hex
797         << secondAllocatedMem << std::endl
798         << "alignment mask: " << alignmentMask << ", seed: " << seed_;
799 }
800 
801 template <class Allocator>
802 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
803           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratingSetUp(size_t freeGranularity,size_t poolsCount,Allocator & allocator,size_t & elementsCount,std::vector<void * > & allocatedElements,std::unordered_set<size_t> & usedIndexes)804 inline void AllocatorTest<Allocator>::ObjectIteratingSetUp(size_t freeGranularity, size_t poolsCount,
805                                                            Allocator &allocator, size_t &elementsCount,
806                                                            std::vector<void *> &allocatedElements,
807                                                            std::unordered_set<size_t> &usedIndexes)
808 {
809     AddMemoryPoolToAllocator(allocator);
810     size_t allocatedPools = 1;
811     auto doAllocations = [poolsCount]([[maybe_unused]] size_t allocatedPoolsCount,
812                                       [[maybe_unused]] size_t count) -> bool {
813         if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
814             return allocatedPoolsCount < poolsCount;
815         } else {
816             (void)poolsCount;
817             return count < ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR;
818         }
819     };
820 
821     // Allocations
822     while (doAllocations(allocatedPools, elementsCount)) {
823         size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
824         size_t align = RandFromRange(LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE);
825         void *mem = allocator.Alloc(size, Alignment(align));
826         if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
827             if (mem == nullptr) {
828                 AddMemoryPoolToAllocator(allocator);
829                 allocatedPools++;
830                 mem = allocator.Alloc(size);
831             }
832         }
833         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes in " << elementsCount
834                                     << " iteration, seed : " << seed_;
835         allocatedElements.push_back(mem);
836         usedIndexes.insert(elementsCount++);
837     }
838     // Free some elements
839     for (size_t i = 0; i < elementsCount; i += freeGranularity) {
840         size_t index = RandFromRange(0, elementsCount - 1);
841         auto it = usedIndexes.find(index);
842         if (it == usedIndexes.end()) {
843             it = usedIndexes.begin();
844             index = *it;
845         }
846         allocator.Free(allocatedElements[index]);
847         usedIndexes.erase(it);
848     }
849 }
850 
851 template <class Allocator>
852 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
853           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectCollectionTest(size_t freeGranularity,size_t poolsCount)854 inline void AllocatorTest<Allocator>::ObjectCollectionTest(size_t freeGranularity, size_t poolsCount)
855 {
856     size_t elementsCount = 0;
857     std::vector<void *> allocatedElements;
858     auto *memStats = new mem::MemStatsType();
859     Allocator allocator(memStats);
860     std::unordered_set<size_t> usedIndexes;
861     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
862                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator, elementsCount,
863                                                                 allocatedElements, usedIndexes);
864 
865     // Collect all objects into unordered_set via allocator's method
866     allocator.Collect(&AllocatorTest<Allocator>::ReturnDeadAndPutInSet);
867     // Check in unordered_set
868     for (size_t i = 0; i < elementsCount; i++) {
869         auto it = usedIndexes.find(i);
870         if (it != usedIndexes.end()) {
871             void *mem = allocatedElements[i];
872             ASSERT_TRUE(EraseFromSet(mem))
873                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
874         }
875     }
876 
877     delete memStats;
878     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
879 }
880 
881 template <class Allocator>
882 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
883           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratorTest(size_t freeGranularity,size_t poolsCount)884 inline void AllocatorTest<Allocator>::ObjectIteratorTest(size_t freeGranularity, size_t poolsCount)
885 {
886     size_t elementsCount = 0;
887     std::vector<void *> allocatedElements;
888     auto *memStats = new mem::MemStatsType();
889     Allocator allocator(memStats);
890     std::unordered_set<size_t> usedIndexes;
891     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
892                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator, elementsCount,
893                                                                 allocatedElements, usedIndexes);
894 
895     // Collect all objects into unordered_set via allocator's method
896     allocator.IterateOverObjects(&AllocatorTest<Allocator>::VisitAndPutInSet);
897     // Free all and check in unordered_set
898     for (size_t i = 0; i < elementsCount; i++) {
899         auto it = usedIndexes.find(i);
900         if (it != usedIndexes.end()) {
901             void *mem = allocatedElements[i];
902             allocator.Free(mem);
903             ASSERT_TRUE(EraseFromSet(mem))
904                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
905         }
906     }
907 
908     delete memStats;
909     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
910 }
911 
912 template <class Allocator>
913 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
914           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratorInRangeTest(size_t rangeIterationSize,size_t freeGranularity,size_t poolsCount)915 inline void AllocatorTest<Allocator>::ObjectIteratorInRangeTest(size_t rangeIterationSize, size_t freeGranularity,
916                                                                 size_t poolsCount)
917 {
918     ASSERT((rangeIterationSize & (rangeIterationSize - 1U)) == 0U);
919     size_t elementsCount = 0;
920     std::vector<void *> allocatedElements;
921     std::unordered_set<size_t> usedIndexes;
922     auto *memStats = new mem::MemStatsType();
923     Allocator allocator(memStats);
924     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
925                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(freeGranularity, poolsCount, allocator, elementsCount,
926                                                                 allocatedElements, usedIndexes);
927 
928     void *minObjPointer = *std::min_element(allocatedElements.begin(), allocatedElements.end());
929     void *maxObjPointer = *std::max_element(allocatedElements.begin(), allocatedElements.end());
930     // Collect all objects into unordered_set via allocator's method
931     uintptr_t curPointer = ToUintPtr(minObjPointer);
932     curPointer = curPointer & (~(rangeIterationSize - 1));
933     while (curPointer <= ToUintPtr(maxObjPointer)) {
934         allocator.IterateOverObjectsInRange(&AllocatorTest<Allocator>::VisitAndPutInSet, ToVoidPtr(curPointer),
935                                             ToVoidPtr(curPointer + rangeIterationSize - 1U));
936         curPointer = curPointer + rangeIterationSize;
937     }
938 
939     // Free all and check in unordered_set
940     for (size_t i = 0; i < elementsCount; i++) {
941         auto it = usedIndexes.find(i);
942         if (it != usedIndexes.end()) {
943             void *mem = allocatedElements[i];
944             allocator.Free(mem);
945             ASSERT_TRUE(EraseFromSet(mem))
946                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
947         }
948     }
949     delete memStats;
950     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
951 }
952 
953 template <class Allocator>
954 template <size_t ELEMENTS_COUNT>
AsanTest(size_t freeGranularity,size_t poolsCount)955 inline void AllocatorTest<Allocator>::AsanTest(size_t freeGranularity, size_t poolsCount)
956 {
957     using ElementType = uint64_t;
958     static constexpr size_t ALLOC_SIZE = sizeof(ElementType);
959     static constexpr size_t ALLOCATIONS_COUNT = ELEMENTS_COUNT;
960 
961     if (freeGranularity == 0) {
962         freeGranularity = 1;
963     }
964 
965     auto *memStats = new mem::MemStatsType();
966     Allocator allocator(memStats);
967     for (size_t i = 0; i < poolsCount; i++) {
968         AddMemoryPoolToAllocatorProtected(allocator);
969     }
970     std::array<void *, ALLOCATIONS_COUNT> allocatedElements {};
971     // Allocations
972     for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
973         void *mem = allocator.Alloc(ALLOC_SIZE);
974         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << ALLOC_SIZE << " bytes on " << i << " iteration";
975         allocatedElements[i] = mem;
976     }
977     // Free some elements
978     for (size_t i = 0; i < ALLOCATIONS_COUNT; i += freeGranularity) {
979         allocator.Free(allocatedElements[i]);
980     }
981     // Asan check
982     for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
983         if (i % freeGranularity == 0) {
984 #ifdef PANDA_ASAN_ON
985             EXPECT_DEATH(DeathWriteUint64(allocatedElements[i]), "")
986                 << "Write " << sizeof(ElementType) << " bytes at address " << std::hex << allocatedElements[i];
987 #else
988             continue;
989 #endif  // PANDA_ASAN_ON
990         } else {
991             allocator.Free(allocatedElements[i]);
992         }
993     }
994     delete memStats;
995 }
996 
997 template <class Allocator>
AllocatedByThisAllocatorTest()998 inline void AllocatorTest<Allocator>::AllocatedByThisAllocatorTest()
999 {
1000     mem::MemStatsType memStats;
1001     Allocator allocator(&memStats);
1002     AllocatedByThisAllocatorTest(allocator);
1003 }
1004 
1005 template <class Allocator>
AllocatedByThisAllocatorTest(Allocator & allocator)1006 inline void AllocatorTest<Allocator>::AllocatedByThisAllocatorTest(Allocator &allocator)
1007 {
1008     static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
1009     AddMemoryPoolToAllocatorProtected(allocator);
1010     void *allocatedByThis = allocator.Alloc(ALLOC_SIZE);
1011     // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
1012     std::unique_ptr<void, void (*)(void *)> allocatedByMalloc(std::malloc(ALLOC_SIZE), free);
1013     std::array<uint8_t, ALLOC_SIZE> allocatedOnStack {};
1014     void *allocatedByMallocAddr = allocatedByMalloc.get();
1015 
1016     ASSERT_TRUE(AllocatedByThisAllocator(allocator, allocatedByThis)) << "address: " << std::hex << allocatedByThis;
1017     ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocatedByMallocAddr)) << "address: " << allocatedByMallocAddr;
1018     ASSERT_FALSE(AllocatedByThisAllocator(allocator, static_cast<void *>(allocatedOnStack.data())))
1019         << "address on stack: " << std::hex << static_cast<void *>(allocatedOnStack.data());
1020 
1021     allocator.Free(allocatedByThis);
1022     allocatedByMalloc.reset();
1023 
1024     // NOLINTNEXTLINE(clang-analyzer-unix.Malloc)
1025     ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocatedByMallocAddr))
1026         << "after free, address: " << allocatedByMallocAddr;
1027 }
1028 
1029 template <class Allocator>
MtAllocRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1030 void AllocatorTest<Allocator>::MtAllocRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1031                                           std::atomic<size_t> *numFinished, size_t minAllocSize, size_t maxAllocSize,
1032                                           size_t minElementsCount, size_t maxElementsCount)
1033 {
1034     size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1035     std::unordered_set<size_t> usedIndexes;
1036     // {memory, size, start_index_in_byte_array}
1037     std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1038 
1039     for (size_t i = 0; i < elementsCount; ++i) {
1040         size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1041         // Allocation
1042         void *mem = allocator->Alloc(size);
1043         // Do while because other threads can use the whole pool before we try to allocate smth in it
1044         while (mem == nullptr) {
1045             allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1046             mem = allocator->Alloc(size);
1047         }
1048         ASSERT_TRUE(mem != nullptr);
1049         // Write random bytes
1050         allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1051         usedIndexes.insert(i);
1052     }
1053 
1054     // Compare
1055     while (!usedIndexes.empty()) {
1056         size_t i = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1057         auto it = usedIndexes.find(i);
1058         if (it != usedIndexes.end()) {
1059             usedIndexes.erase(it);
1060         } else {
1061             i = *usedIndexes.begin();
1062             usedIndexes.erase(usedIndexes.begin());
1063         }
1064         ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[i])));
1065         ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1066             std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1067             << "Address: " << std::hex << std::get<0>(allocatedElements[i])
1068             << ", size: " << std::get<1>(allocatedElements[i])
1069             << ", start index in byte array: " << std::get<2U>(allocatedElements[i])
1070             << ", seed: " << allocatorTestInstance->seed_;
1071     }
1072     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1073     // where threads observe all modifications in the same order
1074     numFinished->fetch_add(1, std::memory_order_seq_cst);
1075 }
1076 
1077 template <class Allocator>
MtAllocFreeRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t freeGranularity,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1078 void AllocatorTest<Allocator>::MtAllocFreeRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1079                                               std::atomic<size_t> *numFinished, size_t freeGranularity,
1080                                               size_t minAllocSize, size_t maxAllocSize, size_t minElementsCount,
1081                                               size_t maxElementsCount)
1082 {
1083     size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1084     std::unordered_set<size_t> usedIndexes;
1085     // {memory, size, start_index_in_byte_array}
1086     std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1087 
1088     for (size_t i = 0; i < elementsCount; ++i) {
1089         size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1090         // Allocation
1091         void *mem = allocator->Alloc(size);
1092         // Do while because other threads can use the whole pool before we try to allocate smth in it
1093         while (mem == nullptr) {
1094             allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1095             mem = allocator->Alloc(size);
1096         }
1097         ASSERT_TRUE(mem != nullptr);
1098         // Write random bytes
1099         allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1100         usedIndexes.insert(i);
1101     }
1102 
1103     // Free some elements
1104     for (size_t i = 0; i < elementsCount; i += freeGranularity) {
1105         size_t index = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1106         auto it = usedIndexes.find(index);
1107         if (it != usedIndexes.end()) {
1108             usedIndexes.erase(it);
1109         } else {
1110             index = *usedIndexes.begin();
1111             usedIndexes.erase(usedIndexes.begin());
1112         }
1113         ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[index])));
1114         // Compare
1115         ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(std::get<0>(allocatedElements[index]),
1116                                                                      std::get<1>(allocatedElements[index]),
1117                                                                      std::get<2U>(allocatedElements[index])))
1118             << "Address: " << std::hex << std::get<0>(allocatedElements[index])
1119             << ", size: " << std::get<1>(allocatedElements[index])
1120             << ", start index in byte array: " << std::get<2U>(allocatedElements[index])
1121             << ", seed: " << allocatorTestInstance->seed_;
1122         allocator->Free(std::get<0>(allocatedElements[index]));
1123     }
1124 
1125     // Compare and free
1126     while (!usedIndexes.empty()) {
1127         size_t i = allocatorTestInstance->RandFromRange(0, elementsCount - 1);
1128         auto it = usedIndexes.find(i);
1129         if (it != usedIndexes.end()) {
1130             usedIndexes.erase(it);
1131         } else {
1132             i = *usedIndexes.begin();
1133             usedIndexes.erase(usedIndexes.begin());
1134         }
1135         // Compare
1136         ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1137             std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1138             << "Address: " << std::hex << std::get<0>(allocatedElements[i])
1139             << ", size: " << std::get<1>(allocatedElements[i])
1140             << ", start index in byte array: " << std::get<2U>(allocatedElements[i])
1141             << ", seed: " << allocatorTestInstance->seed_;
1142         allocator->Free(std::get<0>(allocatedElements[i]));
1143     }
1144     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1145     // where threads observe all modifications in the same order
1146     numFinished->fetch_add(1, std::memory_order_seq_cst);
1147 }
1148 
1149 template <class Allocator>
MtAllocIterateRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t rangeIterationSize,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount)1150 void AllocatorTest<Allocator>::MtAllocIterateRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1151                                                  std::atomic<size_t> *numFinished, size_t rangeIterationSize,
1152                                                  size_t minAllocSize, size_t maxAllocSize, size_t minElementsCount,
1153                                                  size_t maxElementsCount)
1154 {
1155     static constexpr size_t ITERATION_IN_RANGE_COUNT = 100;
1156     size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1157     // {memory, size, start_index_in_byte_array}
1158     std::vector<std::tuple<void *, size_t, size_t>> allocatedElements(elementsCount);
1159 
1160     // Iterate over all object
1161     allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1162 
1163     // Allocate objects
1164     for (size_t i = 0; i < elementsCount; ++i) {
1165         size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1166         // Allocation
1167         void *mem = allocator->Alloc(size);
1168         // Do while because other threads can use the whole pool before we try to allocate smth in it
1169         while (mem == nullptr) {
1170             allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1171             mem = allocator->Alloc(size);
1172         }
1173         ASSERT_TRUE(mem != nullptr);
1174         // Write random bytes
1175         allocatedElements[i] = {mem, size, allocatorTestInstance->SetBytesFromByteArray(mem, size)};
1176     }
1177 
1178     // Iterate over all object
1179     allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1180 
1181     size_t iteratedOverObjects = 0;
1182     // Compare values inside the objects
1183     for (size_t i = 0; i < elementsCount; ++i) {
1184         // do a lot of iterate over range calls to check possible races
1185         if (iteratedOverObjects < ITERATION_IN_RANGE_COUNT) {
1186             void *leftBorder = ToVoidPtr(ToUintPtr(std::get<0>(allocatedElements[i])) & ~(rangeIterationSize - 1U));
1187             void *rightBorder = ToVoidPtr(ToUintPtr(leftBorder) + rangeIterationSize - 1U);
1188             allocator->IterateOverObjectsInRange([&](void *mem) { (void)mem; }, leftBorder, rightBorder);
1189             iteratedOverObjects++;
1190         }
1191         ASSERT_TRUE(allocatorTestInstance->AllocatedByThisAllocator(*allocator, std::get<0>(allocatedElements[i])));
1192         // Compare
1193         ASSERT_TRUE(allocatorTestInstance->CompareBytesWithByteArray(
1194             std::get<0>(allocatedElements[i]), std::get<1>(allocatedElements[i]), std::get<2U>(allocatedElements[i])))
1195             << "Address: " << std::hex << std::get<0>(allocatedElements[i])
1196             << ", size: " << std::get<1>(allocatedElements[i])
1197             << ", start index in byte array: " << std::get<2U>(allocatedElements[i])
1198             << ", seed: " << allocatorTestInstance->seed_;
1199     }
1200     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1201     // where threads observe all modifications in the same order
1202     numFinished->fetch_add(1, std::memory_order_seq_cst);
1203 }
1204 
1205 template <class Allocator>
MtAllocCollectRun(AllocatorTest<Allocator> * allocatorTestInstance,Allocator * allocator,std::atomic<size_t> * numFinished,size_t minAllocSize,size_t maxAllocSize,size_t minElementsCount,size_t maxElementsCount,uint32_t maxThreadWithCollect,std::atomic<uint32_t> * threadWithCollect)1206 void AllocatorTest<Allocator>::MtAllocCollectRun(AllocatorTest<Allocator> *allocatorTestInstance, Allocator *allocator,
1207                                                  std::atomic<size_t> *numFinished, size_t minAllocSize,
1208                                                  size_t maxAllocSize, size_t minElementsCount, size_t maxElementsCount,
1209                                                  uint32_t maxThreadWithCollect,
1210                                                  std::atomic<uint32_t> *threadWithCollect)
1211 {
1212     size_t elementsCount = allocatorTestInstance->RandFromRange(minElementsCount, maxElementsCount);
1213 
1214     // Allocate objects
1215     for (size_t i = 0; i < elementsCount; ++i) {
1216         size_t size = allocatorTestInstance->RandFromRange(minAllocSize, maxAllocSize);
1217         // Allocation
1218         void *mem = allocator->Alloc(size);
1219         // Do while because other threads can use the whole pool before we try to allocate smth in it
1220         while (mem == nullptr) {
1221             allocatorTestInstance->AddMemoryPoolToAllocator(*allocator);
1222             mem = allocator->Alloc(size);
1223         }
1224         ASSERT_TRUE(mem != nullptr);
1225         auto object = static_cast<ObjectHeader *>(mem);
1226         object->SetMarkedForGC();
1227     }
1228 
1229     // Collect objects
1230     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1231     // where threads observe all modifications in the same order
1232     if (threadWithCollect->fetch_add(1U, std::memory_order_seq_cst) < maxThreadWithCollect) {
1233         allocator->Collect([&](ObjectHeader *object) {
1234             ObjectStatus objectStatus =
1235                 object->IsMarkedForGC() ? ObjectStatus::DEAD_OBJECT : ObjectStatus::ALIVE_OBJECT;
1236             return objectStatus;
1237         });
1238     }
1239     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1240     // where threads observe all modifications in the same order
1241     numFinished->fetch_add(1, std::memory_order_seq_cst);
1242 }
1243 
1244 template <class Allocator>
MTTestPrologue(Allocator & allocator,size_t allocSize)1245 void AllocatorTest<Allocator>::MTTestPrologue(Allocator &allocator, size_t allocSize)
1246 {
1247     // Allocator preparing:
1248     std::vector<void *> allocatedElements;
1249     AddMemoryPoolToAllocator(allocator);
1250     // Allocate objects
1251     while (true) {
1252         // Allocation
1253         void *mem = allocator.Alloc(allocSize);
1254         if (mem == nullptr) {
1255             break;
1256         }
1257         allocatedElements.push_back(mem);
1258     }
1259     // Free everything except one element:
1260     for (size_t i = 1; i < allocatedElements.size(); ++i) {
1261         allocator.Free(allocatedElements[i]);
1262     }
1263 
1264     allocator.VisitAndRemoveFreePools([&](void *mem, size_t size) {
1265         (void)mem;
1266         (void)size;
1267     });
1268 }
1269 
1270 template <class Allocator>
1271 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocTest(Allocator * allocator,size_t minElementsCount,size_t maxElementsCount)1272 inline void AllocatorTest<Allocator>::MtAllocTest(Allocator *allocator, size_t minElementsCount,
1273                                                   size_t maxElementsCount)
1274 {
1275 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1276     // We have an issue with QEMU during MT tests. Issue 2852
1277     static_assert(THREADS_COUNT == 1);
1278 #endif
1279     std::atomic<size_t> numFinished = 0;
1280     for (size_t i = 0; i < THREADS_COUNT; i++) {
1281         auto tid = os::thread::ThreadStart(&MtAllocRun, this, allocator, &numFinished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE,
1282                                            minElementsCount, maxElementsCount);
1283         os::thread::ThreadDetach(tid);
1284     }
1285 
1286     while (true) {
1287         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1288         // order where threads observe all modifications in the same order
1289         if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1290             break;
1291         }
1292         os::thread::Yield();
1293     }
1294 }
1295 
1296 template <class Allocator>
1297 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocFreeTest(size_t minElementsCount,size_t maxElementsCount,size_t freeGranularity)1298 inline void AllocatorTest<Allocator>::MtAllocFreeTest(size_t minElementsCount, size_t maxElementsCount,
1299                                                       size_t freeGranularity)
1300 {
1301 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1302     // We have an issue with QEMU during MT tests. Issue 2852
1303     static_assert(THREADS_COUNT == 1);
1304 #endif
1305     auto *memStats = new mem::MemStatsType();
1306     Allocator allocator(memStats);
1307     std::atomic<size_t> numFinished = 0;
1308 
1309     // Prepare an allocator
1310     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1311 
1312     for (size_t i = 0; i < THREADS_COUNT; i++) {
1313         (void)freeGranularity;
1314         auto tid = os::thread::ThreadStart(&MtAllocFreeRun, this, &allocator, &numFinished, freeGranularity,
1315                                            MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, minElementsCount, maxElementsCount);
1316         os::thread::ThreadDetach(tid);
1317     }
1318 
1319     while (true) {
1320         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1321         // order where threads observe all modifications in the same order
1322         if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1323             break;
1324         }
1325         os::thread::Yield();
1326     }
1327     delete memStats;
1328 }
1329 
1330 template <class Allocator>
1331 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocIterateTest(size_t minElementsCount,size_t maxElementsCount,size_t rangeIterationSize)1332 inline void AllocatorTest<Allocator>::MtAllocIterateTest(size_t minElementsCount, size_t maxElementsCount,
1333                                                          size_t rangeIterationSize)
1334 {
1335 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1336     // We have an issue with QEMU during MT tests. Issue 2852
1337     static_assert(THREADS_COUNT == 1);
1338 #endif
1339     auto *memStats = new mem::MemStatsType();
1340     Allocator allocator(memStats);
1341     std::atomic<size_t> numFinished = 0;
1342 
1343     // Prepare an allocator
1344     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1345 
1346     for (size_t i = 0; i < THREADS_COUNT; i++) {
1347         (void)rangeIterationSize;
1348         auto tid = os::thread::ThreadStart(&MtAllocIterateRun, this, &allocator, &numFinished, rangeIterationSize,
1349                                            MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, minElementsCount, maxElementsCount);
1350         os::thread::ThreadDetach(tid);
1351     }
1352 
1353     while (true) {
1354         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1355         // order where threads observe all modifications in the same order
1356         if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1357             break;
1358         }
1359         os::thread::Yield();
1360     }
1361 
1362     // Delete all objects in allocator
1363     allocator.Collect([&](ObjectHeader *object) {
1364         (void)object;
1365         return ObjectStatus::DEAD_OBJECT;
1366     });
1367     delete memStats;
1368 }
1369 
1370 template <class Allocator>
1371 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MtAllocCollectTest(size_t minElementsCount,size_t maxElementsCount,size_t maxThreadWithCollect)1372 inline void AllocatorTest<Allocator>::MtAllocCollectTest(size_t minElementsCount, size_t maxElementsCount,
1373                                                          size_t maxThreadWithCollect)
1374 {
1375 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1376     // We have an issue with QEMU during MT tests. Issue 2852
1377     static_assert(THREADS_COUNT == 1);
1378 #endif
1379     auto *memStats = new mem::MemStatsType();
1380     Allocator allocator(memStats);
1381     std::atomic<size_t> numFinished = 0;
1382     std::atomic<uint32_t> threadWithCollect {0U};
1383 
1384     // Prepare an allocator
1385     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1386 
1387     for (size_t i = 0; i < THREADS_COUNT; i++) {
1388         auto tid =
1389             os::thread::ThreadStart(&MtAllocCollectRun, this, &allocator, &numFinished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE,
1390                                     minElementsCount, maxElementsCount, maxThreadWithCollect, &threadWithCollect);
1391         os::thread::ThreadDetach(tid);
1392     }
1393 
1394     while (true) {
1395         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1396         // order where threads observe all modifications in the same order
1397         if (numFinished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1398             break;
1399         }
1400         os::thread::Yield();
1401     }
1402 
1403     // Delete all objects in allocator
1404     allocator.Collect([&](ObjectHeader *object) {
1405         (void)object;
1406         return ObjectStatus::DEAD_OBJECT;
1407     });
1408     delete memStats;
1409 }
1410 
1411 }  // namespace panda::mem
1412 
1413 #endif  // PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H
1414