1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <ctime>
17
18 #include "gtest/gtest.h"
19 #include "runtime/mem/alloc_config.h"
20 #include "runtime/mem/bump-allocator-inl.h"
21
22 namespace panda::mem {
23
24 template <bool UseTlabs>
25 using NonObjectBumpAllocator =
26 BumpPointerAllocator<EmptyMemoryConfig, BumpPointerAllocatorLockConfig::CommonLock, UseTlabs>;
27
28 class BumpAllocatorTest : public testing::Test {
29 public:
BumpAllocatorTest()30 BumpAllocatorTest()
31 {
32 // Logger::InitializeStdLogging(Logger::Level::DEBUG, Logger::Component::ALL);
33 #ifdef PANDA_NIGHTLY_TEST_ON
34 seed_ = std::time(NULL);
35 #else
36 seed_ = 0x0BADDEAD;
37 #endif
38 srand(seed_);
39 panda::mem::MemConfig::Initialize(0, 8_MB, 0, 0);
40 PoolManager::Initialize();
41 }
42
~BumpAllocatorTest()43 ~BumpAllocatorTest()
44 {
45 for (auto i : allocated_mem_mmap_) {
46 panda::os::mem::UnmapRaw(std::get<0>(i), std::get<1>(i));
47 }
48 for (auto i : allocated_arenas) {
49 delete i;
50 }
51 PoolManager::Finalize();
52 panda::mem::MemConfig::Finalize();
53 // Logger::Destroy();
54 }
55
56 protected:
AllocateArena(size_t size)57 Arena *AllocateArena(size_t size)
58 {
59 void *mem = panda::os::mem::MapRWAnonymousRaw(size);
60 ASAN_UNPOISON_MEMORY_REGION(mem, size);
61 std::pair<void *, size_t> new_pair {mem, size};
62 allocated_mem_mmap_.push_back(new_pair);
63 auto arena = new Arena(size, mem);
64 allocated_arenas.push_back(arena);
65 return arena;
66 }
67
68 std::vector<std::pair<void *, size_t>> allocated_mem_mmap_;
69 std::vector<Arena *> allocated_arenas;
70 unsigned seed_;
71 };
72
TEST_F(BumpAllocatorTest,AlignedAlloc)73 TEST_F(BumpAllocatorTest, AlignedAlloc)
74 {
75 testing::FLAGS_gtest_death_test_style = "fast";
76
77 constexpr size_t BUFF_SIZE = SIZE_1M;
78 constexpr size_t ARRAY_SIZE = 1024;
79 auto pool = PoolManager::GetMmapMemPool()->AllocPool(BUFF_SIZE, SpaceType::SPACE_TYPE_INTERNAL,
80 AllocatorType::BUMP_ALLOCATOR);
81 mem::MemStatsType mem_stats;
82 NonObjectBumpAllocator<false> bp_allocator(pool, SpaceType::SPACE_TYPE_INTERNAL, &mem_stats);
83 Alignment align = DEFAULT_ALIGNMENT;
84 std::array<int *, ARRAY_SIZE> arr;
85
86 size_t mask = GetAlignmentInBytes(align) - 1;
87
88 // Allocations
89 srand(seed_);
90 for (size_t i = 0; i < ARRAY_SIZE; ++i) {
91 arr[i] = static_cast<int *>(bp_allocator.Alloc(sizeof(int), align));
92 *arr[i] = rand() % std::numeric_limits<int>::max();
93 }
94
95 // Allocations checking
96 srand(seed_);
97 for (size_t i = 0; i < ARRAY_SIZE; ++i) {
98 ASSERT_NE(arr[i], nullptr) << "value of i: " << i << ", align: " << align << ", seed:" << seed_;
99 ASSERT_EQ(reinterpret_cast<size_t>(arr[i]) & mask, static_cast<size_t>(0))
100 << "value of i: " << i << ", align: " << align << ", seed:" << seed_;
101 ASSERT_EQ(*arr[i], rand() % std::numeric_limits<int>::max())
102 << "value of i: " << i << ", align: " << align << ", seed:" << seed_;
103 }
104 static_assert(LOG_ALIGN_MAX != DEFAULT_ALIGNMENT, "We expect minimal alignment != DEFAULT_ALIGNMENT");
105 void *ptr;
106 #ifndef NDEBUG
107 EXPECT_DEATH_IF_SUPPORTED(ptr = bp_allocator.Alloc(sizeof(int), LOG_ALIGN_MAX), "alignment == DEFAULT_ALIGNMENT")
108 << ", seed:" << seed_;
109 #endif
110 ptr = bp_allocator.Alloc(SIZE_1M);
111 ASSERT_EQ(ptr, nullptr) << "Here Alloc with allocation size = 1 MB should return nullptr"
112 << ", seed:" << seed_;
113 }
114
TEST_F(BumpAllocatorTest,CreateTLABAndAlloc)115 TEST_F(BumpAllocatorTest, CreateTLABAndAlloc)
116 {
117 using ALLOC_TYPE = uint64_t;
118 static_assert(sizeof(ALLOC_TYPE) % DEFAULT_ALIGNMENT_IN_BYTES == 0);
119 constexpr size_t TLAB_SIZE = SIZE_1M;
120 constexpr size_t COMMON_BUFFER_SIZE = SIZE_1M;
121 constexpr size_t ALLOC_SIZE = sizeof(ALLOC_TYPE);
122 constexpr size_t TLAB_ALLOC_COUNT_SIZE = TLAB_SIZE / ALLOC_SIZE;
123 constexpr size_t COMMON_ALLOC_COUNT_SIZE = COMMON_BUFFER_SIZE / ALLOC_SIZE;
124
125 size_t mask = DEFAULT_ALIGNMENT_IN_BYTES - 1;
126
127 std::array<ALLOC_TYPE *, TLAB_ALLOC_COUNT_SIZE> tlab_elements;
128 std::array<ALLOC_TYPE *, TLAB_ALLOC_COUNT_SIZE> common_elements;
129 auto pool = PoolManager::GetMmapMemPool()->AllocPool(TLAB_SIZE + COMMON_BUFFER_SIZE, SpaceType::SPACE_TYPE_INTERNAL,
130 AllocatorType::BUMP_ALLOCATOR);
131 mem::MemStatsType mem_stats;
132 NonObjectBumpAllocator<true> allocator(pool, SpaceType::SPACE_TYPE_OBJECT, &mem_stats, 1);
133 {
134 // Allocations in common buffer
135 srand(seed_);
136 for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) {
137 common_elements[i] = static_cast<ALLOC_TYPE *>(allocator.Alloc(sizeof(ALLOC_TYPE)));
138 ASSERT_TRUE(common_elements[i] != nullptr) << ", seed:" << seed_;
139 *common_elements[i] = rand() % std::numeric_limits<ALLOC_TYPE>::max();
140 }
141
142 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
143 ASSERT_TRUE(tlab != nullptr) << ", seed:" << seed_;
144 ASSERT_TRUE(allocator.CreateNewTLAB(TLAB_SIZE) == nullptr) << ", seed:" << seed_;
145 // Allocations in TLAB
146 srand(seed_);
147 for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) {
148 tlab_elements[i] = static_cast<ALLOC_TYPE *>(tlab->Alloc(sizeof(ALLOC_TYPE)));
149 ASSERT_TRUE(tlab_elements[i] != nullptr) << ", seed:" << seed_;
150 *tlab_elements[i] = rand() % std::numeric_limits<ALLOC_TYPE>::max();
151 }
152
153 // Check that we don't have memory in the buffer:
154 ASSERT_TRUE(allocator.Alloc(sizeof(ALLOC_TYPE)) == nullptr);
155 ASSERT_TRUE(tlab->Alloc(sizeof(ALLOC_TYPE)) == nullptr);
156
157 // Allocations checking in common buffer
158 srand(seed_);
159 for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) {
160 ASSERT_NE(common_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_;
161 ASSERT_EQ(reinterpret_cast<size_t>(common_elements[i]) & mask, static_cast<size_t>(0))
162 << "value of i: " << i << ", seed:" << seed_;
163 ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits<ALLOC_TYPE>::max())
164 << "value of i: " << i << ", seed:" << seed_;
165 }
166
167 // Allocations checking in TLAB
168 srand(seed_);
169 for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) {
170 ASSERT_NE(tlab_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_;
171 ASSERT_EQ(reinterpret_cast<size_t>(tlab_elements[i]) & mask, static_cast<size_t>(0))
172 << "value of i: " << i << ", seed:" << seed_;
173 ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits<ALLOC_TYPE>::max())
174 << "value of i: " << i << ", seed:" << seed_;
175 }
176 }
177 allocator.Reset();
178 {
179 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
180 ASSERT_TRUE(tlab != nullptr) << ", seed:" << seed_;
181 ASSERT_TRUE(allocator.CreateNewTLAB(TLAB_SIZE) == nullptr) << ", seed:" << seed_;
182 // Allocations in TLAB
183 srand(seed_);
184 for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) {
185 tlab_elements[i] = static_cast<ALLOC_TYPE *>(tlab->Alloc(sizeof(ALLOC_TYPE)));
186 ASSERT_TRUE(tlab_elements[i] != nullptr) << ", seed:" << seed_;
187 *tlab_elements[i] = rand() % std::numeric_limits<ALLOC_TYPE>::max();
188 }
189
190 // Allocations in common buffer
191 srand(seed_);
192 for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) {
193 common_elements[i] = static_cast<ALLOC_TYPE *>(allocator.Alloc(sizeof(ALLOC_TYPE)));
194 ASSERT_TRUE(common_elements[i] != nullptr) << ", seed:" << seed_;
195 *common_elements[i] = rand() % std::numeric_limits<ALLOC_TYPE>::max();
196 }
197
198 // Check that we don't have memory in the buffer:
199 ASSERT_TRUE(allocator.Alloc(sizeof(ALLOC_TYPE)) == nullptr);
200 ASSERT_TRUE(tlab->Alloc(sizeof(ALLOC_TYPE)) == nullptr);
201
202 // Allocations checking in TLAB
203 srand(seed_);
204 for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) {
205 ASSERT_NE(tlab_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_;
206 ASSERT_EQ(reinterpret_cast<size_t>(tlab_elements[i]) & mask, static_cast<size_t>(0))
207 << "value of i: " << i << ", seed:" << seed_;
208 ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits<ALLOC_TYPE>::max())
209 << "value of i: " << i << ", seed:" << seed_;
210 }
211
212 // Allocations checking in common buffer
213 srand(seed_);
214 for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) {
215 ASSERT_NE(common_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_;
216 ASSERT_EQ(reinterpret_cast<size_t>(common_elements[i]) & mask, static_cast<size_t>(0))
217 << "value of i: " << i << ", seed:" << seed_;
218 ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits<ALLOC_TYPE>::max())
219 << "value of i: " << i << ", seed:" << seed_;
220 }
221 }
222 }
223
TEST_F(BumpAllocatorTest,CreateTooManyTLABS)224 TEST_F(BumpAllocatorTest, CreateTooManyTLABS)
225 {
226 constexpr size_t TLAB_SIZE = SIZE_1M;
227 constexpr size_t TLAB_COUNT = 3;
228 auto pool = PoolManager::GetMmapMemPool()->AllocPool(TLAB_SIZE * TLAB_COUNT, SpaceType::SPACE_TYPE_INTERNAL,
229 AllocatorType::BUMP_ALLOCATOR);
230 mem::MemStatsType mem_stats;
231 NonObjectBumpAllocator<true> allocator(pool, SpaceType::SPACE_TYPE_OBJECT, &mem_stats, TLAB_COUNT - 1);
232 {
233 for (size_t i = 0; i < TLAB_COUNT - 1; i++) {
234 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
235 ASSERT_TRUE(tlab != nullptr) << ", seed:" << seed_;
236 }
237 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
238 ASSERT_TRUE(tlab == nullptr) << ", seed:" << seed_;
239 }
240 allocator.Reset();
241 {
242 for (size_t i = 0; i < TLAB_COUNT - 1; i++) {
243 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
244 ASSERT_TRUE(tlab != nullptr) << ", seed:" << seed_;
245 }
246 TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE);
247 ASSERT_TRUE(tlab == nullptr) << ", seed:" << seed_;
248 }
249 }
250
251 } // namespace panda::mem
252