• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
16 #define PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
17 
18 #include <gtest/gtest.h>
19 
20 #include <algorithm>
21 #include <array>
22 #include <cstdlib>
23 #include <cstring>
24 #include <ctime>
25 #include <tuple>
26 #include <unordered_set>
27 
28 #include "libpandabase/mem/mem.h"
29 #include "libpandabase/os/thread.h"
30 #include "runtime/mem/bump-allocator.h"
31 #include "runtime/mem/mem_stats_additional_info.h"
32 #include "runtime/mem/mem_stats_default.h"
33 #include "runtime/include/object_header.h"
34 
35 namespace panda::mem {
36 
37 template <class Allocator>
38 class AllocatorTest : public testing::Test {
39 public:
AllocatorTest()40     explicit AllocatorTest()
41     {
42 #ifdef PANDA_NIGHTLY_TEST_ON
43         seed_ = std::time(NULL);
44 #else
45         seed_ = 0xDEADBEEF;
46 #endif
47         srand(seed_);
48         InitByteArray();
49     }
50 
~AllocatorTest()51     ~AllocatorTest() {}
52 
53 protected:
54     static constexpr size_t BYTE_ARRAY_SIZE = 1000;
55 
56     unsigned int seed_;
57     std::array<uint8_t, BYTE_ARRAY_SIZE> byte_array_;
58 
59     /**
60      * Byte array initialization of random bytes
61      */
InitByteArray()62     void InitByteArray()
63     {
64         for (size_t i = 0; i < BYTE_ARRAY_SIZE; ++i) {
65             byte_array_[i] = RandFromRange(0, 255);
66         }
67     }
68 
69     /**
70      * \brief Add pool to allocator (maybe empty for some allocators)
71      * @param allocator - allocator for pool memory adding
72      */
73     virtual void AddMemoryPoolToAllocator([[maybe_unused]] Allocator &allocator) = 0;
74 
75     /**
76      * \brief Add pool to allocator and protect (maybe empty for some allocators)
77      * @param allocator - allocator for pool memory addition and protection
78      */
79     virtual void AddMemoryPoolToAllocatorProtected([[maybe_unused]] Allocator &allocator) = 0;
80 
81     /**
82      * \brief Check to allocated by this allocator
83      * @param allocator - allocator
84      * @param mem - allocated memory
85      */
86     virtual bool AllocatedByThisAllocator([[maybe_unused]] Allocator &allocator, [[maybe_unused]] void *mem) = 0;
87 
88     /**
89      * \brief Generate random value from [min_value, max_value]
90      * @param min_value - minimum size_t value in range
91      * @param max_value - maximum size_t value in range
92      * @return random size_t value [min_value, max_value]
93      */
RandFromRange(size_t min_value,size_t max_value)94     size_t RandFromRange(size_t min_value, size_t max_value)
95     {
96         // rand() is not thread-safe method.
97         // So do it under the lock
98         static os::memory::Mutex rand_lock;
99         os::memory::LockHolder lock(rand_lock);
100         return min_value + rand() % (max_value - min_value + 1);
101     }
102 
103     /**
104      * \brief Write value in memory for death test
105      * @param mem - memory for writing
106      *
107      * Write value in memory for address sanitizer test
108      */
DeathWriteUint64(void * mem)109     void DeathWriteUint64(void *mem)
110     {
111         *(static_cast<uint64_t *>(mem)) = 0xDEADBEEF;
112     }
113 
114     /**
115      * \brief Set random bytes in memory from byte array
116      * @param mem - memory for random bytes from byte array writing
117      * @param size - size memory in bytes
118      * @return start index in byte_array
119      */
SetBytesFromByteArray(void * mem,size_t size)120     size_t SetBytesFromByteArray(void *mem, size_t size)
121     {
122         size_t start_index = RandFromRange(0, BYTE_ARRAY_SIZE - 1);
123         size_t copied = 0;
124         size_t first_copy_size = std::min(size, BYTE_ARRAY_SIZE - start_index);
125         // Set head of memory
126         memcpy_s(mem, first_copy_size, &byte_array_[start_index], first_copy_size);
127         size -= first_copy_size;
128         copied += first_copy_size;
129         // Set middle part of memory
130         while (size > BYTE_ARRAY_SIZE) {
131             memcpy_s(ToVoidPtr(ToUintPtr(mem) + copied), BYTE_ARRAY_SIZE, byte_array_.data(), BYTE_ARRAY_SIZE);
132             size -= BYTE_ARRAY_SIZE;
133             copied += BYTE_ARRAY_SIZE;
134         }
135         // Set tail of memory
136         memcpy_s(ToVoidPtr(ToUintPtr(mem) + copied), size, byte_array_.data(), size);
137 
138         return start_index;
139     }
140 
141     /**
142      * \brief Compare bytes in memory with byte array
143      * @param mem - memory for random bytes from byte array writing
144      * @param size - size memory in bytes
145      * @param start_index_in_byte_array - start index in byte array for comaration with memory
146      * @return boolean value: true if bytes are equal and fasle if not equal
147      */
CompareBytesWithByteArray(void * mem,size_t size,size_t start_index_in_byte_array)148     bool CompareBytesWithByteArray(void *mem, size_t size, size_t start_index_in_byte_array)
149     {
150         size_t compared = 0;
151         size_t first_compare_size = std::min(size, BYTE_ARRAY_SIZE - start_index_in_byte_array);
152         // Compare head of memory
153         if (memcmp(mem, &byte_array_[start_index_in_byte_array], first_compare_size) != 0) {
154             return false;
155         }
156         compared += first_compare_size;
157         size -= first_compare_size;
158         // Compare middle part of memory
159         while (size >= BYTE_ARRAY_SIZE) {
160             if (memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byte_array_.data(), BYTE_ARRAY_SIZE) != 0) {
161                 return false;
162             }
163             size -= BYTE_ARRAY_SIZE;
164             compared += BYTE_ARRAY_SIZE;
165         }
166         // Compare tail of memory
167         if (memcmp(ToVoidPtr(ToUintPtr(mem) + compared), byte_array_.data(), size) != 0) {
168             return false;
169         }
170 
171         return true;
172     }
173 
174     /**
175      * \brief Allocate with one alignment
176      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
177      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
178      * @tparam ALIGNMENT - enum Alignment value for allocations
179      * @tparam AllocatorArgs - arguments types for allocor creation
180      * @param pools_count - count of pools needed by allocation
181      * @param allocator_args - arguments for allocator creation
182      *
183      * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with ALIGNMENT alignment
184      */
185     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment ALIGNMENT, class... AllocatorArgs>
186     void OneAlignedAllocFreeTest(size_t pools_count, AllocatorArgs &&... allocator_args);
187 
188     /**
189      * \brief Allocate with all alignment
190      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
191      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
192      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
193      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
194      * @param pools_count - count of pools needed by allocation
195      *
196      * Allocate all possible sizes from [MIN_ALLOC_SIZE, MAX_ALLOC_SIZE] with all possible alignment from
197      * [LOG_ALIGN_MIN, LOG_ALIGN_MAX]
198      */
199     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
200               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX>
201     void AlignedAllocFreeTest(size_t pools_count = 1);
202 
203     /**
204      * \brief Simple test for allocate and free
205      * @param alloc_size - size in bytes for each allocation
206      * @param elements_count - count of elements for allocation
207      * @param pools_count - count of pools needed by allocation
208      *
209      * Allocate elements with random values setting, check and free memory
210      */
211     void AllocateAndFree(size_t alloc_size, size_t elements_count, size_t pools_count = 1);
212 
213     /**
214      * \brief Simple test for checking iteration over free pools method.
215      * @tparam pools_count - count of pools needed by allocation, must be bigger than 3
216      * @param alloc_size - size in bytes for each allocation
217      *
218      * Allocate and use memory pools; free all elements from first, last
219      * and one in the middle; call iteration over free pools
220      * and allocate smth again.
221      */
222     template <size_t POOLS_COUNT = 5>
223     void VisitAndRemoveFreePools(size_t alloc_size);
224 
225     /**
226      * \brief Allocate with different sizes and free in random order
227      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
228      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
229      * @tparam AllocatorArgs - arguments types for allocor creation
230      * @param elements_count - count of elements for allocation
231      * @param pools_count - count of pools needed by allocation
232      * @param allocator_args - arguments for allocator creation
233      * Allocate elements with random size and random values setting in random order, check and free memory in random
234      * order too
235      */
236     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, class... AllocatorArgs>
237     void AllocateFreeDifferentSizesTest(size_t elements_count, size_t pools_count, AllocatorArgs &&... allocatorArgs);
238 
239     /**
240      * \brief Try to allocate too big object, must not allocate memory
241      * @tparam MAX_ALLOC_SIZE - maximum possible size for allocation by this allocator
242      */
243     template <size_t MAX_ALLOC_SIZE>
244     void AllocateTooBigObjectTest();
245 
246     /**
247      * \brief Try to allocate too many objects, must not allocate all objects
248      * @param alloc_size - size in bytes for one allocation
249      * @param elements_count - count of elements for allocation
250      *
251      * Allocate too many elements, so must not allocate all objects
252      */
253     void AllocateTooMuchTest(size_t alloc_size, size_t elements_count);
254 
255     /**
256      * \brief Use allocator in std::vector
257      * @param elements_count - count of elements for allocation
258      *
259      * Check working of adapter of this allocator on example std::vector
260      */
261     void AllocateVectorTest(size_t elements_count = 32);
262 
263     /**
264      * \brief Allocate and reuse
265      * @tparam element_type - type of elements for allocations
266      * @param alignment_mask - mask for alignment of two addresses
267      * @param elements_count - count of elements for allocation
268      *
269      * Allocate and free memory and later reuse. Checking for two start addresses
270      */
271     template <class element_type = uint64_t>
272     void AllocateReuseTest(size_t alignment_mask, size_t elements_count = 100);
273 
274     /**
275      * \brief Allocate and free objects, collect via allocator method
276      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
277      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
278      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
279      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
280      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
281      * don't use pools
282      * @param free_granularity - granularity for objects free before collection
283      * @param pools_count - count of pools needed by allocation
284      *
285      * Allocate objects, free part of objects and collect via allocator method with free calls during the collection.
286      * Check of collection.
287      */
288     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
289               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
290     void ObjectCollectionTest(size_t free_granularity = 4, size_t pools_count = 2);
291 
292     /**
293      * \brief Allocate and free objects, collect via allocator method
294      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
295      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
296      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
297      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
298      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
299      * don't use pools
300      * @param free_granularity - granularity for objects free before collection
301      * @param pools_count - count of pools needed by allocation
302      *
303      * Allocate objects, free part of objects and iterate via allocator method.
304      * Check the iterated elements and free later.
305      */
306     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
307               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
308     void ObjectIteratorTest(size_t free_granularity = 4, size_t pools_count = 2);
309 
310     /**
311      * \brief Allocate and free objects, iterate via allocator method iterating in range
312      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
313      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
314      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
315      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
316      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
317      * don't use pools
318      * @param range_iteration_size - size of a iteration range during test. Must be a power of two
319      * @param free_granularity - granularity for objects free before collection
320      * @param pools_count - count of pools needed by allocation
321      *
322      * Allocate objects, free part of objects and iterate via allocator method iterating in range. Check of iteration
323      * and free later.
324      */
325     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE = LOG_ALIGN_MIN,
326               Alignment LOG_ALIGN_MAX_VALUE = LOG_ALIGN_MAX, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR = 0>
327     void ObjectIteratorInRangeTest(size_t range_iteration_size, size_t free_granularity = 4, size_t pools_count = 2);
328 
329     /**
330      * \brief Address sanitizer test for allocator
331      * @tparam elements_count - count of elements for allocation
332      * @param free_granularity - granularity for freed elements
333      * @param pools_count - count of pools needed by allocation
334      *
335      * Test for address sanitizer. Free some elements and try to write value in freed elements.
336      */
337     template <size_t ELEMENTS_COUNT = 100>
338     void AsanTest(size_t free_granularity = 3, size_t pools_count = 1);
339 
340     /**
341      * \brief Test to allocated by this allocator
342      *
343      * Test for allocator function which check memory on allocaion by this allocator
344      */
345     void AllocatedByThisAllocatorTest();
346 
347     /**
348      * \brief Test to allocated by this allocator
349      *
350      * Test for allocator function which check memory on allocaion by this allocator
351      */
352     void AllocatedByThisAllocatorTest(Allocator &allocator);
353 
354     /**
355      * \brief Simultaneously allocate/free objects in different threads
356      * @tparam allocator - target allocator for test
357      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
358      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
359      * @tparam THREADS_COUNT - the number of threads used in this test
360      * @param min_elements_count - minimum elements which will be allocated during test for each thread
361      * @param max_elements_count - maximum elements which will be allocated during test for each thread
362      */
363     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
364     void MT_AllocTest(Allocator *allocator, size_t min_elements_count, size_t max_elements_count);
365 
366     /**
367      * \brief Simultaneously allocate/free objects in different threads
368      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
369      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
370      * @tparam THREADS_COUNT - the number of threads used in this test
371      * @param min_elements_count - minimum elements which will be allocated during test for each thread
372      * @param max_elements_count - maximum elements which will be allocated during test for each thread
373      * @param free_granularity - granularity for objects free before total free
374      */
375     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
376     void MT_AllocFreeTest(size_t min_elements_count, size_t max_elements_count, size_t free_granularity = 4);
377 
378     /**
379      * \brief Simultaneously allocate objects and iterate over objects (in range too) in different threads
380      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
381      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
382      * @tparam THREADS_COUNT - the number of threads used in this test
383      * @param min_elements_count - minimum elements which will be allocated during test for each thread
384      * @param max_elements_count - maximum elements which will be allocated during test for each thread
385      * @param range_iteration_size - size of a iteration range during test. Must be a power of two
386      */
387     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
388     void MT_AllocIterateTest(size_t min_elements_count, size_t max_elements_count, size_t range_iteration_size);
389 
390     /**
391      * \brief Simultaneously allocate and collect objects in different threads
392      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
393      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
394      * @tparam THREADS_COUNT - the number of threads used in this test
395      * @param min_elements_count - minimum elements which will be allocated during test for each thread
396      * @param max_elements_count - maximum elements which will be allocated during test for each thread
397      * @param max_thread_with_collect - maximum threads which will call collect simultaneously
398      */
399     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
400     void MT_AllocCollectTest(size_t min_elements_count, size_t max_elements_count, size_t max_thread_with_collect = 1);
401 
402 private:
403     /**
404      * \brief Allocate and free objects in allocator for future collecting/iterating checks
405      * @tparam MIN_ALLOC_SIZE - minimum possible size for one allocation
406      * @tparam MAX_ALLOC_SIZE - maximum possible size for one allocation
407      * @tparam LOG_ALIGN_MIN_VALUE - minimum possible alignment for one allocation
408      * @tparam LOG_ALIGN_MAX_VALUE - maximum possible alignment for one allocation
409      * @tparam ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR - 0 if allocator use pools, count of elements for allocation if
410      * don't use pools
411      * @param free_granularity - granularity for objects free before collection
412      * @param pools_count - count of pools needed by allocation
413      *
414      * Allocate objects and free part of objects.
415      */
416     template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE,
417               Alignment LOG_ALIGN_MAX_VALUE, size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
418     void ObjectIteratingSetUp(size_t free_granularity, size_t pools_count, Allocator &allocator, size_t &elements_count,
419                               std::vector<void *> &allocated_elements, std::unordered_set<size_t> &used_indexes);
420 
421     /**
422      * \brief Prepare Allocator for the MT work. Allocate and free everything except one element
423      * It will generate a common allocator state before specific tests.
424      */
425     void MTTestPrologue(Allocator &allocator, size_t alloc_size);
426 
427     static void MT_AllocRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
428                             std::atomic<size_t> *num_finished, size_t min_alloc_size, size_t max_alloc_size,
429                             size_t min_elements_count, size_t max_elements_count);
430 
431     static void MT_AllocFreeRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
432                                 std::atomic<size_t> *num_finished, size_t free_granularity, size_t min_alloc_size,
433                                 size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count);
434 
435     static void MT_AllocIterateRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
436                                    std::atomic<size_t> *num_finished, size_t range_iteration_size,
437                                    size_t min_alloc_size, size_t max_alloc_size, size_t min_elements_count,
438                                    size_t max_elements_count);
439 
440     static void MT_AllocCollectRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
441                                    std::atomic<size_t> *num_finished, size_t min_alloc_size, size_t max_alloc_size,
442                                    size_t min_elements_count, size_t max_elements_count,
443                                    uint32_t max_thread_with_collect, std::atomic<uint32_t> *thread_with_collect);
444 
445     static std::unordered_set<void *> objects_set_;
446 
VisitAndPutInSet(void * obj_mem)447     static void VisitAndPutInSet(void *obj_mem)
448     {
449         objects_set_.insert(obj_mem);
450     }
451 
ReturnDeadAndPutInSet(ObjectHeader * obj_mem)452     static ObjectStatus ReturnDeadAndPutInSet(ObjectHeader *obj_mem)
453     {
454         objects_set_.insert(obj_mem);
455         return ObjectStatus::DEAD_OBJECT;
456     }
457 
EraseFromSet(void * obj_mem)458     static bool EraseFromSet(void *obj_mem)
459     {
460         auto it = objects_set_.find(obj_mem);
461         if (it != objects_set_.end()) {
462             objects_set_.erase(it);
463             return true;
464         }
465         return false;
466     }
467 
IsEmptySet()468     static bool IsEmptySet() noexcept
469     {
470         return objects_set_.empty();
471     }
472 };
473 
474 template <class Allocator>
475 std::unordered_set<void *> AllocatorTest<Allocator>::objects_set_;
476 
477 template <class Allocator>
478 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment ALIGNMENT, class... AllocatorArgs>
OneAlignedAllocFreeTest(size_t pools_count,AllocatorArgs &&...allocator_args)479 inline void AllocatorTest<Allocator>::OneAlignedAllocFreeTest(size_t pools_count, AllocatorArgs &&... allocator_args)
480 {
481     static constexpr size_t ALLOCATIONS_COUNT = MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1;
482 
483     mem::MemStatsType *mem_stats = new mem::MemStatsType();
484     Allocator allocator(mem_stats, std::forward<AllocatorArgs>(allocator_args)...);
485     for (size_t i = 0; i < pools_count; ++i) {
486         AddMemoryPoolToAllocator(allocator);
487     }
488     std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocated_elements;
489 
490     // Allocations
491     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
492         void *mem = allocator.Alloc(size, Alignment(ALIGNMENT));
493         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes with  " << static_cast<size_t>(ALIGNMENT)
494                                     << " log alignment, seed: " << seed_;
495         ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(ALIGNMENT)) - 1), 0UL)
496             << size << " bytes, " << static_cast<size_t>(ALIGNMENT) << " log alignment, seed: " << seed_;
497         allocated_elements[size - MIN_ALLOC_SIZE] = {mem, SetBytesFromByteArray(mem, size)};
498     }
499     // Check and Free
500     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; size++) {
501         size_t k = size - MIN_ALLOC_SIZE;
502         ASSERT_TRUE(CompareBytesWithByteArray(allocated_elements[k].first, size, allocated_elements[k].second))
503             << "address: " << std::hex << allocated_elements[k].first << ", size: " << size
504             << ", alignment: " << static_cast<size_t>(ALIGNMENT) << ", seed: " << seed_;
505         allocator.Free(allocated_elements[k].first);
506     }
507     delete mem_stats;
508 }
509 
510 template <class Allocator>
511 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE>
AlignedAllocFreeTest(size_t pools_count)512 inline void AllocatorTest<Allocator>::AlignedAllocFreeTest(size_t pools_count)
513 {
514     static_assert(MIN_ALLOC_SIZE <= MAX_ALLOC_SIZE);
515     static_assert(LOG_ALIGN_MIN_VALUE <= LOG_ALIGN_MAX_VALUE);
516     static constexpr size_t ALLOCATIONS_COUNT =
517         (MAX_ALLOC_SIZE - MIN_ALLOC_SIZE + 1) * (LOG_ALIGN_MAX_VALUE - LOG_ALIGN_MIN_VALUE + 1);
518 
519     std::array<std::pair<void *, size_t>, ALLOCATIONS_COUNT> allocated_elements;
520     mem::MemStatsType *mem_stats = new mem::MemStatsType();
521     Allocator allocator(mem_stats);
522     for (size_t i = 0; i < pools_count; i++) {
523         AddMemoryPoolToAllocator(allocator);
524     }
525 
526     // Allocations with alignment
527     size_t k = 0;
528     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
529         for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
530             void *mem = allocator.Alloc(size, Alignment(align));
531             ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes with  " << align
532                                         << " log alignment, seed: " << seed_;
533             ASSERT_EQ(reinterpret_cast<uintptr_t>(mem) & (GetAlignmentInBytes(Alignment(align)) - 1), 0UL)
534                 << size << " bytes, " << align << " log alignment, seed: " << seed_;
535             allocated_elements[k] = {mem, SetBytesFromByteArray(mem, size)};
536         }
537     }
538     // Check and free
539     k = 0;
540     for (size_t size = MIN_ALLOC_SIZE; size <= MAX_ALLOC_SIZE; ++size) {
541         for (size_t align = LOG_ALIGN_MIN_VALUE; align <= LOG_ALIGN_MAX_VALUE; ++align, ++k) {
542             ASSERT_TRUE(CompareBytesWithByteArray(allocated_elements[k].first, size, allocated_elements[k].second))
543                 << "address: " << std::hex << allocated_elements[k].first << ", size: " << size
544                 << ", alignment: " << align << ", seed: " << seed_;
545             allocator.Free(allocated_elements[k].first);
546         }
547     }
548     delete mem_stats;
549 }
550 
551 template <class Allocator>
AllocateAndFree(size_t alloc_size,size_t elements_count,size_t pools_count)552 inline void AllocatorTest<Allocator>::AllocateAndFree(size_t alloc_size, size_t elements_count, size_t pools_count)
553 {
554     mem::MemStatsType *mem_stats = new mem::MemStatsType();
555     Allocator allocator(mem_stats);
556     for (size_t i = 0; i < pools_count; i++) {
557         AddMemoryPoolToAllocator(allocator);
558     }
559     std::vector<std::pair<void *, size_t>> allocated_elements(elements_count);
560 
561     // Allocations
562     for (size_t i = 0; i < elements_count; ++i) {
563         void *mem = allocator.Alloc(alloc_size);
564         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << alloc_size << " bytes in " << i
565                                     << " iteration, seed: " << seed_;
566         size_t index = SetBytesFromByteArray(mem, alloc_size);
567         allocated_elements[i] = {mem, index};
568     }
569     // Free
570     for (auto &element : allocated_elements) {
571         ASSERT_TRUE(CompareBytesWithByteArray(element.first, alloc_size, element.second))
572             << "address: " << std::hex << element.first << ", size: " << alloc_size << ", seed: " << seed_;
573         allocator.Free(element.first);
574     }
575     delete mem_stats;
576 }
577 
578 template <class Allocator>
579 template <size_t POOLS_COUNT>
VisitAndRemoveFreePools(size_t alloc_size)580 inline void AllocatorTest<Allocator>::VisitAndRemoveFreePools(size_t alloc_size)
581 {
582     static constexpr size_t POOLS_TO_FREE = 3;
583     static_assert(POOLS_COUNT > POOLS_TO_FREE);
584     std::array<std::vector<void *>, POOLS_COUNT> allocated_elements;
585     mem::MemStatsType *mem_stats = new mem::MemStatsType();
586     Allocator allocator(mem_stats);
587 
588     for (size_t i = 0; i < POOLS_COUNT; i++) {
589         AddMemoryPoolToAllocator(allocator);
590         while (true) {
591             void *mem = allocator.Alloc(alloc_size);
592             if (mem == nullptr) {
593                 break;
594             }
595             allocated_elements[i].push_back(mem);
596         }
597     }
598     std::array<size_t, POOLS_TO_FREE> freed_pools_indexes = {0, POOLS_COUNT / 2, POOLS_COUNT - 1};
599     // free all elements in pools
600     for (auto i : freed_pools_indexes) {
601         for (auto j : allocated_elements[i]) {
602             allocator.Free(j);
603         }
604         allocated_elements[i].clear();
605     }
606     size_t freed_pools = 0;
607     allocator.VisitAndRemoveFreePools([&](void *mem, size_t size) {
608         (void)mem;
609         (void)size;
610         freed_pools++;
611     });
612     ASSERT_TRUE(freed_pools == POOLS_TO_FREE) << ", seed: " << seed_;
613     ASSERT_TRUE(allocator.Alloc(alloc_size) == nullptr) << ", seed: " << seed_;
614     // allocate again
615     for (auto i : freed_pools_indexes) {
616         AddMemoryPoolToAllocator(allocator);
617         while (true) {
618             void *mem = allocator.Alloc(alloc_size);
619             if (mem == nullptr) {
620                 break;
621             }
622             allocated_elements[i].push_back(mem);
623         }
624     }
625     // free everything:
626     for (size_t i = 0; i < POOLS_COUNT; i++) {
627         for (auto j : allocated_elements[i]) {
628             allocator.Free(j);
629         }
630         allocated_elements[i].clear();
631     }
632     freed_pools = 0;
633     allocator.VisitAndRemoveFreePools([&](void *mem, size_t size) {
634         (void)mem;
635         (void)size;
636         freed_pools++;
637     });
638     delete mem_stats;
639     ASSERT_TRUE(freed_pools == POOLS_COUNT) << ", seed: " << seed_;
640 }
641 
642 template <class Allocator>
643 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, class... AllocatorArgs>
AllocateFreeDifferentSizesTest(size_t elements_count,size_t pools_count,AllocatorArgs &&...allocator_args)644 inline void AllocatorTest<Allocator>::AllocateFreeDifferentSizesTest(size_t elements_count, size_t pools_count,
645                                                                      AllocatorArgs &&... allocator_args)
646 {
647     std::unordered_set<size_t> used_indexes;
648     // {memory, size, start_index_in_byte_array}
649     std::vector<std::tuple<void *, size_t, size_t>> allocated_elements(elements_count);
650     mem::MemStatsType *mem_stats = new mem::MemStatsType();
651     Allocator allocator(mem_stats, std::forward<AllocatorArgs>(allocator_args)...);
652     for (size_t i = 0; i < pools_count; i++) {
653         AddMemoryPoolToAllocator(allocator);
654     }
655 
656     size_t full_size_allocated = 0;
657     for (size_t i = 0; i < elements_count; ++i) {
658         size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
659         // Allocation
660         void *mem = allocator.Alloc(size);
661         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes, full allocated: " << full_size_allocated
662                                     << ", seed: " << seed_;
663         full_size_allocated += size;
664         // Write random bytes
665         allocated_elements[i] = {mem, size, SetBytesFromByteArray(mem, size)};
666         used_indexes.insert(i);
667     }
668     // Compare and free
669     while (!used_indexes.empty()) {
670         size_t i = RandFromRange(0, elements_count - 1);
671         auto it = used_indexes.find(i);
672         if (it != used_indexes.end()) {
673             used_indexes.erase(it);
674         } else {
675             i = *used_indexes.begin();
676             used_indexes.erase(used_indexes.begin());
677         }
678         // Compare
679         ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(allocated_elements[i]), std::get<1>(allocated_elements[i]),
680                                               std::get<2>(allocated_elements[i])))
681             << "Address: " << std::hex << std::get<0>(allocated_elements[i])
682             << ", size: " << std::get<1>(allocated_elements[i])
683             << ", start index in byte array: " << std::get<2>(allocated_elements[i]) << ", seed: " << seed_;
684         allocator.Free(std::get<0>(allocated_elements[i]));
685     }
686     delete mem_stats;
687 }
688 
689 template <class Allocator>
690 template <size_t MAX_ALLOC_SIZE>
AllocateTooBigObjectTest()691 inline void AllocatorTest<Allocator>::AllocateTooBigObjectTest()
692 {
693     mem::MemStatsType *mem_stats = new mem::MemStatsType();
694     Allocator allocator(mem_stats);
695     AddMemoryPoolToAllocator(allocator);
696 
697     size_t size_obj = MAX_ALLOC_SIZE + 1 + static_cast<size_t>(rand());
698     void *mem = allocator.Alloc(size_obj);
699     ASSERT_TRUE(mem == nullptr) << "Allocate too big object with " << size_obj << " size at address " << std::hex
700                                 << mem;
701     delete mem_stats;
702 }
703 
704 template <class Allocator>
AllocateTooMuchTest(size_t alloc_size,size_t elements_count)705 inline void AllocatorTest<Allocator>::AllocateTooMuchTest(size_t alloc_size, size_t elements_count)
706 {
707     mem::MemStatsType *mem_stats = new mem::MemStatsType();
708     Allocator allocator(mem_stats);
709     AddMemoryPoolToAllocatorProtected(allocator);
710 
711     bool is_not_all = false;
712     for (size_t i = 0; i < elements_count; i++) {
713         void *mem = allocator.Alloc(alloc_size);
714         if (mem == nullptr) {
715             is_not_all = true;
716             break;
717         } else {
718             SetBytesFromByteArray(mem, alloc_size);
719         }
720     }
721     ASSERT_TRUE(is_not_all) << "elements count: " << elements_count << ", element size: " << alloc_size
722                             << ", seed: " << seed_;
723     delete mem_stats;
724 }
725 
726 template <class Allocator>
AllocateVectorTest(size_t elements_count)727 inline void AllocatorTest<Allocator>::AllocateVectorTest(size_t elements_count)
728 {
729     using element_type = size_t;
730     static constexpr size_t MAGIC_CONST = 3;
731     mem::MemStatsType *mem_stats = new mem::MemStatsType();
732     Allocator allocator(mem_stats);
733     AddMemoryPoolToAllocatorProtected(allocator);
734     using adapter_type = typename decltype(allocator.Adapter())::template rebind<element_type>::other;
735     std::vector<element_type, adapter_type> vec(allocator.Adapter());
736 
737     for (size_t i = 0; i < elements_count; i++) {
738         vec.push_back(i * MAGIC_CONST);
739     }
740     for (size_t i = 0; i < elements_count; i++) {
741         ASSERT_EQ(vec[i], i * MAGIC_CONST) << "iteration: " << i;
742     }
743 
744     vec.clear();
745 
746     for (size_t i = 0; i < elements_count; i++) {
747         vec.push_back(i * (MAGIC_CONST + 1));
748     }
749     for (size_t i = 0; i < elements_count; i++) {
750         ASSERT_EQ(vec[i], i * (MAGIC_CONST + 1)) << "iteration: " << i;
751     }
752     delete mem_stats;
753 }
754 
755 template <class Allocator>
756 template <class element_type>
AllocateReuseTest(size_t alignmnent_mask,size_t elements_count)757 inline void AllocatorTest<Allocator>::AllocateReuseTest(size_t alignmnent_mask, size_t elements_count)
758 {
759     static constexpr size_t SIZE_1 = sizeof(element_type);
760     static constexpr size_t SIZE_2 = SIZE_1 * 3;
761 
762     mem::MemStatsType *mem_stats = new mem::MemStatsType();
763     Allocator allocator(mem_stats);
764     AddMemoryPoolToAllocator(allocator);
765     std::vector<std::pair<void *, size_t>> allocated_elements(elements_count);
766 
767     // First allocations
768     for (size_t i = 0; i < elements_count; ++i) {
769         void *mem = allocator.Alloc(SIZE_1);
770         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_1 << " bytes in " << i << " iteration";
771         size_t index = SetBytesFromByteArray(mem, SIZE_1);
772         allocated_elements[i] = {mem, index};
773     }
774     uintptr_t first_allocated_mem = reinterpret_cast<uintptr_t>(allocated_elements[0].first);
775     // Free
776     for (size_t i = 0; i < elements_count; i++) {
777         ASSERT_TRUE(CompareBytesWithByteArray(allocated_elements[i].first, SIZE_1, allocated_elements[i].second))
778             << "address: " << std::hex << allocated_elements[i].first << ", size: " << SIZE_1 << ", seed: " << seed_;
779         allocator.Free(allocated_elements[i].first);
780     }
781     // Second allocations
782     for (size_t i = 0; i < elements_count; ++i) {
783         void *mem = allocator.Alloc(SIZE_2);
784         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << SIZE_2 << " bytes in " << i << " iteration";
785         size_t index = SetBytesFromByteArray(mem, SIZE_2);
786         allocated_elements[i] = {mem, index};
787     }
788     uintptr_t second_allocated_mem = reinterpret_cast<uintptr_t>(allocated_elements[0].first);
789     // Free
790     for (size_t i = 0; i < elements_count; i++) {
791         ASSERT_TRUE(CompareBytesWithByteArray(allocated_elements[i].first, SIZE_2, allocated_elements[i].second))
792             << "address: " << std::hex << allocated_elements[i].first << ", size: " << SIZE_2 << ", seed: " << seed_;
793         allocator.Free(allocated_elements[i].first);
794     }
795     delete mem_stats;
796     ASSERT_EQ(first_allocated_mem & ~alignmnent_mask, second_allocated_mem & ~alignmnent_mask)
797         << "first address = " << std::hex << first_allocated_mem << ", second address = " << std::hex
798         << second_allocated_mem << std::endl
799         << "alignment mask: " << alignmnent_mask << ", seed: " << seed_;
800 }
801 
802 template <class Allocator>
803 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
804           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratingSetUp(size_t free_granularity,size_t pools_count,Allocator & allocator,size_t & elements_count,std::vector<void * > & allocated_elements,std::unordered_set<size_t> & used_indexes)805 inline void AllocatorTest<Allocator>::ObjectIteratingSetUp(size_t free_granularity, size_t pools_count,
806                                                            Allocator &allocator, size_t &elements_count,
807                                                            std::vector<void *> &allocated_elements,
808                                                            std::unordered_set<size_t> &used_indexes)
809 {
810     AddMemoryPoolToAllocator(allocator);
811     size_t allocated_pools = 1;
812     auto doAllocations = [pools_count]([[maybe_unused]] size_t allocated_pools_count,
813                                        [[maybe_unused]] size_t count) -> bool {
814         if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
815             return allocated_pools_count < pools_count;
816         } else {
817             (void)pools_count;
818             return count < ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR;
819         }
820     };
821 
822     // Allocations
823     while (doAllocations(allocated_pools, elements_count)) {
824         size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE);
825         size_t align = RandFromRange(LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE);
826         void *mem = allocator.Alloc(size, Alignment(align));
827         if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) {
828             if (mem == nullptr) {
829                 AddMemoryPoolToAllocator(allocator);
830                 allocated_pools++;
831                 mem = allocator.Alloc(size);
832             }
833         }
834         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << size << " bytes in " << elements_count
835                                     << " iteration, seed : " << seed_;
836         allocated_elements.push_back(mem);
837         used_indexes.insert(elements_count++);
838     }
839     // Free some elements
840     for (size_t i = 0; i < elements_count; i += free_granularity) {
841         size_t index = RandFromRange(0, elements_count - 1);
842         auto it = used_indexes.find(index);
843         if (it == used_indexes.end()) {
844             it = used_indexes.begin();
845             index = *it;
846         }
847         allocator.Free(allocated_elements[index]);
848         used_indexes.erase(it);
849     }
850 }
851 
852 template <class Allocator>
853 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
854           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectCollectionTest(size_t free_granularity,size_t pools_count)855 inline void AllocatorTest<Allocator>::ObjectCollectionTest(size_t free_granularity, size_t pools_count)
856 {
857     size_t elements_count = 0;
858     std::vector<void *> allocated_elements;
859     mem::MemStatsType *mem_stats = new mem::MemStatsType();
860     Allocator allocator(mem_stats);
861     std::unordered_set<size_t> used_indexes;
862     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
863                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(free_granularity, pools_count, allocator,
864                                                                 elements_count, allocated_elements, used_indexes);
865 
866     // Collect all objects into unordered_set via allocator's method
867     allocator.Collect(&AllocatorTest<Allocator>::ReturnDeadAndPutInSet);
868     // Check in unordered_set
869     for (size_t i = 0; i < elements_count; i++) {
870         auto it = used_indexes.find(i);
871         if (it != used_indexes.end()) {
872             void *mem = allocated_elements[i];
873             ASSERT_TRUE(EraseFromSet(mem))
874                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
875         }
876     }
877 
878     delete mem_stats;
879     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
880 }
881 
882 template <class Allocator>
883 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
884           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratorTest(size_t free_granularity,size_t pools_count)885 inline void AllocatorTest<Allocator>::ObjectIteratorTest(size_t free_granularity, size_t pools_count)
886 {
887     size_t elements_count = 0;
888     std::vector<void *> allocated_elements;
889     mem::MemStatsType *mem_stats = new mem::MemStatsType();
890     Allocator allocator(mem_stats);
891     std::unordered_set<size_t> used_indexes;
892     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
893                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(free_granularity, pools_count, allocator,
894                                                                 elements_count, allocated_elements, used_indexes);
895 
896     // Collect all objects into unordered_set via allocator's method
897     allocator.IterateOverObjects(&AllocatorTest<Allocator>::VisitAndPutInSet);
898     // Free all and check in unordered_set
899     for (size_t i = 0; i < elements_count; i++) {
900         auto it = used_indexes.find(i);
901         if (it != used_indexes.end()) {
902             void *mem = allocated_elements[i];
903             allocator.Free(mem);
904             ASSERT_TRUE(EraseFromSet(mem))
905                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
906         }
907     }
908 
909     delete mem_stats;
910     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
911 }
912 
913 template <class Allocator>
914 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, Alignment LOG_ALIGN_MIN_VALUE, Alignment LOG_ALIGN_MAX_VALUE,
915           size_t ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>
ObjectIteratorInRangeTest(size_t range_iteration_size,size_t free_granularity,size_t pools_count)916 inline void AllocatorTest<Allocator>::ObjectIteratorInRangeTest(size_t range_iteration_size, size_t free_granularity,
917                                                                 size_t pools_count)
918 {
919     ASSERT((range_iteration_size & (range_iteration_size - 1U)) == 0U);
920     size_t elements_count = 0;
921     std::vector<void *> allocated_elements;
922     std::unordered_set<size_t> used_indexes;
923     mem::MemStatsType *mem_stats = new mem::MemStatsType();
924     Allocator allocator(mem_stats);
925     ObjectIteratingSetUp<MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE,
926                          ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR>(free_granularity, pools_count, allocator,
927                                                                 elements_count, allocated_elements, used_indexes);
928 
929     void *min_obj_pointer = *std::min_element(allocated_elements.begin(), allocated_elements.end());
930     void *max_obj_pointer = *std::max_element(allocated_elements.begin(), allocated_elements.end());
931     // Collect all objects into unordered_set via allocator's method
932     uintptr_t cur_pointer = ToUintPtr(min_obj_pointer);
933     cur_pointer = cur_pointer & (~(range_iteration_size - 1));
934     while (cur_pointer <= ToUintPtr(max_obj_pointer)) {
935         allocator.IterateOverObjectsInRange(&AllocatorTest<Allocator>::VisitAndPutInSet, ToVoidPtr(cur_pointer),
936                                             ToVoidPtr(cur_pointer + range_iteration_size - 1U));
937         cur_pointer = cur_pointer + range_iteration_size;
938     }
939 
940     // Free all and check in unordered_set
941     for (size_t i = 0; i < elements_count; i++) {
942         auto it = used_indexes.find(i);
943         if (it != used_indexes.end()) {
944             void *mem = allocated_elements[i];
945             allocator.Free(mem);
946             ASSERT_TRUE(EraseFromSet(mem))
947                 << "Object at address " << std::hex << mem << " isn't in collected objects, seed: " << seed_;
948         }
949     }
950     delete mem_stats;
951     ASSERT_TRUE(IsEmptySet()) << "seed: " << seed_;
952 }
953 
954 template <class Allocator>
955 template <size_t ELEMENTS_COUNT>
AsanTest(size_t free_granularity,size_t pools_count)956 inline void AllocatorTest<Allocator>::AsanTest(size_t free_granularity, size_t pools_count)
957 {
958     using element_type = uint64_t;
959     static constexpr size_t ALLOC_SIZE = sizeof(element_type);
960     static constexpr size_t ALLOCATIONS_COUNT = ELEMENTS_COUNT;
961 
962     if (free_granularity == 0) {
963         free_granularity = 1;
964     }
965 
966     mem::MemStatsType *mem_stats = new mem::MemStatsType();
967     Allocator allocator(mem_stats);
968     for (size_t i = 0; i < pools_count; i++) {
969         AddMemoryPoolToAllocatorProtected(allocator);
970     }
971     std::array<void *, ALLOCATIONS_COUNT> allocated_elements;
972     // Allocations
973     for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
974         void *mem = allocator.Alloc(ALLOC_SIZE);
975         ASSERT_TRUE(mem != nullptr) << "Didn't allocate " << ALLOC_SIZE << " bytes on " << i << " iteration";
976         allocated_elements[i] = mem;
977     }
978     // Free some elements
979     for (size_t i = 0; i < ALLOCATIONS_COUNT; i += free_granularity) {
980         allocator.Free(allocated_elements[i]);
981     }
982     // Asan check
983     for (size_t i = 0; i < ALLOCATIONS_COUNT; ++i) {
984         if (i % free_granularity == 0) {
985 #ifdef PANDA_ASAN_ON
986             EXPECT_DEATH(DeathWriteUint64(allocated_elements[i]), "")
987                 << "Write " << sizeof(element_type) << " bytes at address " << std::hex << allocated_elements[i];
988 #else
989             continue;
990 #endif  // PANDA_ASAN_ON
991         } else {
992             allocator.Free(allocated_elements[i]);
993         }
994     }
995     delete mem_stats;
996 }
997 
998 template <class Allocator>
AllocatedByThisAllocatorTest()999 inline void AllocatorTest<Allocator>::AllocatedByThisAllocatorTest()
1000 {
1001     mem::MemStatsType mem_stats;
1002     Allocator allocator(&mem_stats);
1003     AllocatedByThisAllocatorTest(allocator);
1004 }
1005 
1006 template <class Allocator>
AllocatedByThisAllocatorTest(Allocator & allocator)1007 inline void AllocatorTest<Allocator>::AllocatedByThisAllocatorTest(Allocator &allocator)
1008 {
1009     static constexpr size_t ALLOC_SIZE = sizeof(uint64_t);
1010     AddMemoryPoolToAllocatorProtected(allocator);
1011     void *allocated_by_this = allocator.Alloc(ALLOC_SIZE);
1012     void *allocated_by_malloc = std::malloc(ALLOC_SIZE);
1013     uint8_t allocated_on_stack[ALLOC_SIZE];
1014 
1015     ASSERT_TRUE(AllocatedByThisAllocator(allocator, allocated_by_this)) << "address: " << std::hex << allocated_by_this;
1016     ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocated_by_malloc))
1017         << "address: " << std::hex << allocated_by_malloc;
1018     ASSERT_FALSE(AllocatedByThisAllocator(allocator, static_cast<void *>(allocated_on_stack)))
1019         << "address on stack: " << std::hex << static_cast<void *>(allocated_on_stack);
1020 
1021     allocator.Free(allocated_by_this);
1022     std::free(allocated_by_malloc);
1023 
1024     ASSERT_FALSE(AllocatedByThisAllocator(allocator, allocated_by_malloc))
1025         << "after free, address: " << std::hex << allocated_by_malloc;
1026 }
1027 
1028 template <class Allocator>
MT_AllocRun(AllocatorTest<Allocator> * allocator_test_instance,Allocator * allocator,std::atomic<size_t> * num_finished,size_t min_alloc_size,size_t max_alloc_size,size_t min_elements_count,size_t max_elements_count)1029 void AllocatorTest<Allocator>::MT_AllocRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
1030                                            std::atomic<size_t> *num_finished, size_t min_alloc_size,
1031                                            size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count)
1032 {
1033     size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count);
1034     std::unordered_set<size_t> used_indexes;
1035     // {memory, size, start_index_in_byte_array}
1036     std::vector<std::tuple<void *, size_t, size_t>> allocated_elements(elements_count);
1037 
1038     for (size_t i = 0; i < elements_count; ++i) {
1039         size_t size = allocator_test_instance->RandFromRange(min_alloc_size, max_alloc_size);
1040         // Allocation
1041         void *mem = allocator->Alloc(size);
1042         // Do while because other threads can use the whole pool before we try to allocate smth in it
1043         while (mem == nullptr) {
1044             allocator_test_instance->AddMemoryPoolToAllocator(*allocator);
1045             mem = allocator->Alloc(size);
1046         }
1047         ASSERT_TRUE(mem != nullptr);
1048         // Write random bytes
1049         allocated_elements[i] = {mem, size, allocator_test_instance->SetBytesFromByteArray(mem, size)};
1050         used_indexes.insert(i);
1051     }
1052 
1053     // Compare
1054     while (!used_indexes.empty()) {
1055         size_t i = allocator_test_instance->RandFromRange(0, elements_count - 1);
1056         auto it = used_indexes.find(i);
1057         if (it != used_indexes.end()) {
1058             used_indexes.erase(it);
1059         } else {
1060             i = *used_indexes.begin();
1061             used_indexes.erase(used_indexes.begin());
1062         }
1063         ASSERT_TRUE(allocator_test_instance->AllocatedByThisAllocator(*allocator, std::get<0>(allocated_elements[i])));
1064         ASSERT_TRUE(allocator_test_instance->CompareBytesWithByteArray(
1065             std::get<0>(allocated_elements[i]), std::get<1>(allocated_elements[i]), std::get<2>(allocated_elements[i])))
1066             << "Address: " << std::hex << std::get<0>(allocated_elements[i])
1067             << ", size: " << std::get<1>(allocated_elements[i])
1068             << ", start index in byte array: " << std::get<2>(allocated_elements[i])
1069             << ", seed: " << allocator_test_instance->seed_;
1070     }
1071     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1072     // where threads observe all modifications in the same order
1073     num_finished->fetch_add(1, std::memory_order_seq_cst);
1074 }
1075 
1076 template <class Allocator>
MT_AllocFreeRun(AllocatorTest<Allocator> * allocator_test_instance,Allocator * allocator,std::atomic<size_t> * num_finished,size_t free_granularity,size_t min_alloc_size,size_t max_alloc_size,size_t min_elements_count,size_t max_elements_count)1077 void AllocatorTest<Allocator>::MT_AllocFreeRun(AllocatorTest<Allocator> *allocator_test_instance, Allocator *allocator,
1078                                                std::atomic<size_t> *num_finished, size_t free_granularity,
1079                                                size_t min_alloc_size, size_t max_alloc_size, size_t min_elements_count,
1080                                                size_t max_elements_count)
1081 {
1082     size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count);
1083     std::unordered_set<size_t> used_indexes;
1084     // {memory, size, start_index_in_byte_array}
1085     std::vector<std::tuple<void *, size_t, size_t>> allocated_elements(elements_count);
1086 
1087     for (size_t i = 0; i < elements_count; ++i) {
1088         size_t size = allocator_test_instance->RandFromRange(min_alloc_size, max_alloc_size);
1089         // Allocation
1090         void *mem = allocator->Alloc(size);
1091         // Do while because other threads can use the whole pool before we try to allocate smth in it
1092         while (mem == nullptr) {
1093             allocator_test_instance->AddMemoryPoolToAllocator(*allocator);
1094             mem = allocator->Alloc(size);
1095         }
1096         ASSERT_TRUE(mem != nullptr);
1097         // Write random bytes
1098         allocated_elements[i] = {mem, size, allocator_test_instance->SetBytesFromByteArray(mem, size)};
1099         used_indexes.insert(i);
1100     }
1101 
1102     // Free some elements
1103     for (size_t i = 0; i < elements_count; i += free_granularity) {
1104         size_t index = allocator_test_instance->RandFromRange(0, elements_count - 1);
1105         auto it = used_indexes.find(index);
1106         if (it != used_indexes.end()) {
1107             used_indexes.erase(it);
1108         } else {
1109             index = *used_indexes.begin();
1110             used_indexes.erase(used_indexes.begin());
1111         }
1112         ASSERT_TRUE(
1113             allocator_test_instance->AllocatedByThisAllocator(*allocator, std::get<0>(allocated_elements[index])));
1114         // Compare
1115         ASSERT_TRUE(allocator_test_instance->CompareBytesWithByteArray(std::get<0>(allocated_elements[index]),
1116                                                                        std::get<1>(allocated_elements[index]),
1117                                                                        std::get<2>(allocated_elements[index])))
1118             << "Address: " << std::hex << std::get<0>(allocated_elements[index])
1119             << ", size: " << std::get<1>(allocated_elements[index])
1120             << ", start index in byte array: " << std::get<2>(allocated_elements[index])
1121             << ", seed: " << allocator_test_instance->seed_;
1122         allocator->Free(std::get<0>(allocated_elements[index]));
1123     }
1124 
1125     // Compare and free
1126     while (!used_indexes.empty()) {
1127         size_t i = allocator_test_instance->RandFromRange(0, elements_count - 1);
1128         auto it = used_indexes.find(i);
1129         if (it != used_indexes.end()) {
1130             used_indexes.erase(it);
1131         } else {
1132             i = *used_indexes.begin();
1133             used_indexes.erase(used_indexes.begin());
1134         }
1135         // Compare
1136         ASSERT_TRUE(allocator_test_instance->CompareBytesWithByteArray(
1137             std::get<0>(allocated_elements[i]), std::get<1>(allocated_elements[i]), std::get<2>(allocated_elements[i])))
1138             << "Address: " << std::hex << std::get<0>(allocated_elements[i])
1139             << ", size: " << std::get<1>(allocated_elements[i])
1140             << ", start index in byte array: " << std::get<2>(allocated_elements[i])
1141             << ", seed: " << allocator_test_instance->seed_;
1142         allocator->Free(std::get<0>(allocated_elements[i]));
1143     }
1144     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1145     // where threads observe all modifications in the same order
1146     num_finished->fetch_add(1, std::memory_order_seq_cst);
1147 }
1148 
1149 template <class Allocator>
MT_AllocIterateRun(AllocatorTest<Allocator> * allocator_test_instance,Allocator * allocator,std::atomic<size_t> * num_finished,size_t range_iteration_size,size_t min_alloc_size,size_t max_alloc_size,size_t min_elements_count,size_t max_elements_count)1150 void AllocatorTest<Allocator>::MT_AllocIterateRun(AllocatorTest<Allocator> *allocator_test_instance,
1151                                                   Allocator *allocator, std::atomic<size_t> *num_finished,
1152                                                   size_t range_iteration_size, size_t min_alloc_size,
1153                                                   size_t max_alloc_size, size_t min_elements_count,
1154                                                   size_t max_elements_count)
1155 {
1156     static constexpr size_t ITERATION_IN_RANGE_COUNT = 100;
1157     size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count);
1158     // {memory, size, start_index_in_byte_array}
1159     std::vector<std::tuple<void *, size_t, size_t>> allocated_elements(elements_count);
1160 
1161     // Iterate over all object
1162     allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1163 
1164     // Allocate objects
1165     for (size_t i = 0; i < elements_count; ++i) {
1166         size_t size = allocator_test_instance->RandFromRange(min_alloc_size, max_alloc_size);
1167         // Allocation
1168         void *mem = allocator->Alloc(size);
1169         // Do while because other threads can use the whole pool before we try to allocate smth in it
1170         while (mem == nullptr) {
1171             allocator_test_instance->AddMemoryPoolToAllocator(*allocator);
1172             mem = allocator->Alloc(size);
1173         }
1174         ASSERT_TRUE(mem != nullptr);
1175         // Write random bytes
1176         allocated_elements[i] = {mem, size, allocator_test_instance->SetBytesFromByteArray(mem, size)};
1177     }
1178 
1179     // Iterate over all object
1180     allocator->IterateOverObjects([&](void *mem) { (void)mem; });
1181 
1182     size_t iterated_over_objects = 0;
1183     // Compare values inside the objects
1184     for (size_t i = 0; i < elements_count; ++i) {
1185         // do a lot of iterate over range calls to check possible races
1186         if (iterated_over_objects < ITERATION_IN_RANGE_COUNT) {
1187             void *left_border = ToVoidPtr(ToUintPtr(std::get<0>(allocated_elements[i])) & ~(range_iteration_size - 1U));
1188             void *right_border = ToVoidPtr(ToUintPtr(left_border) + range_iteration_size - 1U);
1189             allocator->IterateOverObjectsInRange([&](void *mem) { (void)mem; }, left_border, right_border);
1190             iterated_over_objects++;
1191         }
1192         ASSERT_TRUE(allocator_test_instance->AllocatedByThisAllocator(*allocator, std::get<0>(allocated_elements[i])));
1193         // Compare
1194         ASSERT_TRUE(allocator_test_instance->CompareBytesWithByteArray(
1195             std::get<0>(allocated_elements[i]), std::get<1>(allocated_elements[i]), std::get<2>(allocated_elements[i])))
1196             << "Address: " << std::hex << std::get<0>(allocated_elements[i])
1197             << ", size: " << std::get<1>(allocated_elements[i])
1198             << ", start index in byte array: " << std::get<2>(allocated_elements[i])
1199             << ", seed: " << allocator_test_instance->seed_;
1200     }
1201     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1202     // where threads observe all modifications in the same order
1203     num_finished->fetch_add(1, std::memory_order_seq_cst);
1204 }
1205 
1206 template <class Allocator>
MT_AllocCollectRun(AllocatorTest<Allocator> * allocator_test_instance,Allocator * allocator,std::atomic<size_t> * num_finished,size_t min_alloc_size,size_t max_alloc_size,size_t min_elements_count,size_t max_elements_count,uint32_t max_thread_with_collect,std::atomic<uint32_t> * thread_with_collect)1207 void AllocatorTest<Allocator>::MT_AllocCollectRun(AllocatorTest<Allocator> *allocator_test_instance,
1208                                                   Allocator *allocator, std::atomic<size_t> *num_finished,
1209                                                   size_t min_alloc_size, size_t max_alloc_size,
1210                                                   size_t min_elements_count, size_t max_elements_count,
1211                                                   uint32_t max_thread_with_collect,
1212                                                   std::atomic<uint32_t> *thread_with_collect)
1213 {
1214     size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count);
1215 
1216     // Allocate objects
1217     for (size_t i = 0; i < elements_count; ++i) {
1218         size_t size = allocator_test_instance->RandFromRange(min_alloc_size, max_alloc_size);
1219         // Allocation
1220         void *mem = allocator->Alloc(size);
1221         // Do while because other threads can use the whole pool before we try to allocate smth in it
1222         while (mem == nullptr) {
1223             allocator_test_instance->AddMemoryPoolToAllocator(*allocator);
1224             mem = allocator->Alloc(size);
1225         }
1226         ASSERT_TRUE(mem != nullptr);
1227         auto object = static_cast<ObjectHeader *>(mem);
1228         object->SetMarkedForGC();
1229     }
1230 
1231     // Collect objects
1232     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1233     // where threads observe all modifications in the same order
1234     if (thread_with_collect->fetch_add(1U, std::memory_order_seq_cst) < max_thread_with_collect) {
1235         allocator->Collect([&](ObjectHeader *object) {
1236             ObjectStatus object_status =
1237                 object->IsMarkedForGC() ? ObjectStatus::DEAD_OBJECT : ObjectStatus::ALIVE_OBJECT;
1238             return object_status;
1239         });
1240     }
1241     // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent order
1242     // where threads observe all modifications in the same order
1243     num_finished->fetch_add(1, std::memory_order_seq_cst);
1244 }
1245 
1246 template <class Allocator>
MTTestPrologue(Allocator & allocator,size_t alloc_size)1247 void AllocatorTest<Allocator>::MTTestPrologue(Allocator &allocator, size_t alloc_size)
1248 {
1249     // Allocator preparing:
1250     std::vector<void *> allocated_elements;
1251     AddMemoryPoolToAllocator(allocator);
1252     // Allocate objects
1253     while (true) {
1254         // Allocation
1255         void *mem = allocator.Alloc(alloc_size);
1256         if (mem == nullptr) {
1257             break;
1258         }
1259         allocated_elements.push_back(mem);
1260     }
1261     // Free everything except one element:
1262     for (size_t i = 1; i < allocated_elements.size(); ++i) {
1263         allocator.Free(allocated_elements[i]);
1264     }
1265 
1266     allocator.VisitAndRemoveFreePools([&](void *mem, size_t size) {
1267         (void)mem;
1268         (void)size;
1269     });
1270 }
1271 
1272 template <class Allocator>
1273 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MT_AllocTest(Allocator * allocator,size_t min_elements_count,size_t max_elements_count)1274 inline void AllocatorTest<Allocator>::MT_AllocTest(Allocator *allocator, size_t min_elements_count,
1275                                                    size_t max_elements_count)
1276 {
1277 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1278     // We have an issue with QEMU during MT tests. Issue 2852
1279     static_assert(THREADS_COUNT == 1);
1280 #endif
1281     std::atomic<size_t> num_finished = 0;
1282     for (size_t i = 0; i < THREADS_COUNT; i++) {
1283         auto tid = os::thread::ThreadStart(&MT_AllocRun, this, allocator, &num_finished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE,
1284                                            min_elements_count, max_elements_count);
1285         os::thread::ThreadDetach(tid);
1286     }
1287 
1288     while (true) {
1289         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1290         // order where threads observe all modifications in the same order
1291         if (num_finished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1292             break;
1293         }
1294         os::thread::ThreadYield();
1295     }
1296 }
1297 
1298 template <class Allocator>
1299 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MT_AllocFreeTest(size_t min_elements_count,size_t max_elements_count,size_t free_granularity)1300 inline void AllocatorTest<Allocator>::MT_AllocFreeTest(size_t min_elements_count, size_t max_elements_count,
1301                                                        size_t free_granularity)
1302 {
1303 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1304     // We have an issue with QEMU during MT tests. Issue 2852
1305     static_assert(THREADS_COUNT == 1);
1306 #endif
1307     mem::MemStatsType *mem_stats = new mem::MemStatsType();
1308     Allocator allocator(mem_stats);
1309     std::atomic<size_t> num_finished = 0;
1310 
1311     // Prepare an allocator
1312     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1313 
1314     for (size_t i = 0; i < THREADS_COUNT; i++) {
1315         (void)free_granularity;
1316         auto tid = os::thread::ThreadStart(&MT_AllocFreeRun, this, &allocator, &num_finished, free_granularity,
1317                                            MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count);
1318         os::thread::ThreadDetach(tid);
1319     }
1320 
1321     while (true) {
1322         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1323         // order where threads observe all modifications in the same order
1324         if (num_finished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1325             break;
1326         }
1327         os::thread::ThreadYield();
1328     }
1329     delete mem_stats;
1330 }
1331 
1332 template <class Allocator>
1333 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MT_AllocIterateTest(size_t min_elements_count,size_t max_elements_count,size_t range_iteration_size)1334 inline void AllocatorTest<Allocator>::MT_AllocIterateTest(size_t min_elements_count, size_t max_elements_count,
1335                                                           size_t range_iteration_size)
1336 {
1337 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1338     // We have an issue with QEMU during MT tests. Issue 2852
1339     static_assert(THREADS_COUNT == 1);
1340 #endif
1341     mem::MemStatsType *mem_stats = new mem::MemStatsType();
1342     Allocator allocator(mem_stats);
1343     std::atomic<size_t> num_finished = 0;
1344 
1345     // Prepare an allocator
1346     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1347 
1348     for (size_t i = 0; i < THREADS_COUNT; i++) {
1349         (void)range_iteration_size;
1350         auto tid = os::thread::ThreadStart(&MT_AllocIterateRun, this, &allocator, &num_finished, range_iteration_size,
1351                                            MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count);
1352         os::thread::ThreadDetach(tid);
1353     }
1354 
1355     while (true) {
1356         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1357         // order where threads observe all modifications in the same order
1358         if (num_finished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1359             break;
1360         }
1361         os::thread::ThreadYield();
1362     }
1363 
1364     // Delete all objects in allocator
1365     allocator.Collect([&](ObjectHeader *object) {
1366         (void)object;
1367         return ObjectStatus::DEAD_OBJECT;
1368     });
1369     delete mem_stats;
1370 }
1371 
1372 template <class Allocator>
1373 template <size_t MIN_ALLOC_SIZE, size_t MAX_ALLOC_SIZE, size_t THREADS_COUNT>
MT_AllocCollectTest(size_t min_elements_count,size_t max_elements_count,size_t max_thread_with_collect)1374 inline void AllocatorTest<Allocator>::MT_AllocCollectTest(size_t min_elements_count, size_t max_elements_count,
1375                                                           size_t max_thread_with_collect)
1376 {
1377 #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32)
1378     // We have an issue with QEMU during MT tests. Issue 2852
1379     static_assert(THREADS_COUNT == 1);
1380 #endif
1381     mem::MemStatsType *mem_stats = new mem::MemStatsType();
1382     Allocator allocator(mem_stats);
1383     std::atomic<size_t> num_finished = 0;
1384     std::atomic<uint32_t> thread_with_collect {0U};
1385 
1386     // Prepare an allocator
1387     MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE));
1388 
1389     for (size_t i = 0; i < THREADS_COUNT; i++) {
1390         auto tid = os::thread::ThreadStart(&MT_AllocCollectRun, this, &allocator, &num_finished, MIN_ALLOC_SIZE,
1391                                            MAX_ALLOC_SIZE, min_elements_count, max_elements_count,
1392                                            max_thread_with_collect, &thread_with_collect);
1393         os::thread::ThreadDetach(tid);
1394     }
1395 
1396     while (true) {
1397         // Atomic with seq_cst order reason: data race with num_finished with requirement for sequentially consistent
1398         // order where threads observe all modifications in the same order
1399         if (num_finished.load(std::memory_order_seq_cst) == THREADS_COUNT) {
1400             break;
1401         }
1402         os::thread::ThreadYield();
1403     }
1404 
1405     // Delete all objects in allocator
1406     allocator.Collect([&](ObjectHeader *object) {
1407         (void)object;
1408         return ObjectStatus::DEAD_OBJECT;
1409     });
1410     delete mem_stats;
1411 }
1412 
1413 }  // namespace panda::mem
1414 
1415 #endif  // PANDA_RUNTIME_TESTS_ALLOCATOR_TEST_BASE_H_
1416