1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include "pw_allocator/buddy_allocator.h"
16
17 #include <cstring>
18 #include <utility>
19
20 #include "lib/stdcompat/bit.h"
21 #include "pw_allocator/hardening.h"
22 #include "pw_assert/check.h"
23 #include "pw_bytes/alignment.h"
24
25 namespace pw::allocator::internal {
26
BuddyBlock(size_t outer_size)27 BuddyBlock::BuddyBlock(size_t outer_size) {
28 outer_size_log2_ = cpp20::countr_zero(outer_size);
29 }
30
CanAlloc(Layout layout) const31 StatusWithSize BuddyBlock::CanAlloc(Layout layout) const {
32 return layout.size() > InnerSize() ? StatusWithSize::ResourceExhausted()
33 : StatusWithSize(0);
34 }
35
Split()36 BuddyBlock* BuddyBlock::Split() {
37 outer_size_log2_--;
38 std::byte* ptr = UsableSpace() + InnerSize();
39 return new (ptr) BuddyBlock(OuterSize());
40 }
41
Merge(BuddyBlock * && left,BuddyBlock * && right)42 BuddyBlock* BuddyBlock::Merge(BuddyBlock*&& left, BuddyBlock*&& right) {
43 if (right < left) {
44 return BuddyBlock::Merge(std::move(right), std::move(left));
45 }
46 left->outer_size_log2_++;
47 return left;
48 }
49
GenericBuddyAllocator(span<BucketType> buckets,size_t min_outer_size)50 GenericBuddyAllocator::GenericBuddyAllocator(span<BucketType> buckets,
51 size_t min_outer_size)
52 : buckets_(buckets) {
53 min_outer_size_ = min_outer_size;
54 for (BucketType& bucket : buckets) {
55 size_t max_inner_size = BuddyBlock::InnerSizeFromOuterSize(min_outer_size);
56 bucket.set_max_inner_size(max_inner_size);
57 min_outer_size <<= 1;
58 }
59 }
60
Init(ByteSpan region)61 void GenericBuddyAllocator::Init(ByteSpan region) {
62 CrashIfAllocated();
63
64 // Ensure there is a byte preceding the first `BuddyBlock`.
65 region = GetAlignedSubspan(region.subspan(1), min_outer_size_);
66 region = ByteSpan(region.data() - 1, region.size() + 1);
67 if constexpr (Hardening::kIncludesBasicChecks) {
68 PW_CHECK_INT_GE(region.size(), min_outer_size_);
69 }
70
71 // Build up the available memory by successively freeing (and thus merging)
72 // minimum sized blocks.
73 std::byte* data = region.data();
74 size_t count = 0;
75 while (region.size() >= min_outer_size_) {
76 new (region.data()) BuddyBlock(min_outer_size_);
77 region = region.subspan(min_outer_size_);
78 ++count;
79 }
80 region_ = ByteSpan(data, min_outer_size_ * count);
81 data++;
82 for (size_t i = 0; i < count; ++i) {
83 Deallocate(data);
84 data += min_outer_size_;
85 }
86 }
87
CrashIfAllocated()88 void GenericBuddyAllocator::CrashIfAllocated() {
89 size_t total_free = 0;
90 // Drain all the buckets before destroying the list. Although O(n), this
91 // should be reasonably quick since all memory should have been freed and
92 // coalesced prior to calling this method.
93 for (auto& bucket : buckets_) {
94 while (!bucket.empty()) {
95 BuddyBlock* block = bucket.RemoveAny();
96 total_free += block->OuterSize();
97 }
98 }
99 if constexpr (Hardening::kIncludesRobustChecks) {
100 PW_CHECK_INT_EQ(region_.size(),
101 total_free,
102 "%zu bytes were still in use when an allocator was "
103 "destroyed. All memory allocated by an allocator must be "
104 "released before the allocator goes out of scope.",
105 region_.size() - total_free);
106 }
107 region_ = ByteSpan();
108 }
109
Allocate(Layout layout)110 void* GenericBuddyAllocator::Allocate(Layout layout) {
111 if (layout.size() > buckets_.back().max_inner_size()) {
112 return nullptr;
113 }
114 if (layout.alignment() > min_outer_size_) {
115 return nullptr;
116 }
117
118 for (auto& bucket : buckets_) {
119 size_t inner_size = bucket.max_inner_size();
120 size_t outer_size = BuddyBlock::OuterSizeFromInnerSize(inner_size);
121 if (inner_size < layout.size()) {
122 continue;
123 }
124 layout = Layout(inner_size, layout.alignment());
125
126 // Check if this bucket has a compatible free block available.
127 if (auto* block = bucket.RemoveCompatible(layout); block != nullptr) {
128 return block->UsableSpace();
129 }
130
131 // No compatible free blocks available, allocate one from the next bucket
132 // and split it.
133 void* ptr = Allocate(layout.Extend(outer_size));
134 if (ptr == nullptr) {
135 break;
136 }
137
138 auto* block = BuddyBlock::FromUsableSpace(ptr);
139 BuddyBlock* buddy = block->Split();
140 std::ignore = bucket.Add(*buddy);
141 return ptr;
142 }
143 return nullptr;
144 }
145
Deallocate(void * ptr)146 void GenericBuddyAllocator::Deallocate(void* ptr) {
147 if (ptr == nullptr) {
148 return;
149 }
150
151 auto* block = BuddyBlock::FromUsableSpace(ptr);
152 BucketType* bucket = nullptr;
153 PW_CHECK_INT_GT(buckets_.size(), 0);
154 for (auto& current : span(buckets_.data(), buckets_.size() - 1)) {
155 size_t outer_size =
156 BuddyBlock::OuterSizeFromInnerSize(current.max_inner_size());
157 if (outer_size < block->OuterSize()) {
158 continue;
159 }
160 bucket = ¤t;
161
162 // Determine the expected address of this free block's buddy by determining
163 // if it would be first or second in a merged block of the next larger size.
164 std::byte* item = block->UsableSpace();
165 if ((item - region_.data()) % (block->OuterSize() * 2) == 0) {
166 item += outer_size;
167 } else {
168 item -= outer_size;
169 }
170 // Blocks at the end of the range may not have a buddy.
171 if (item < region_.data() || region_.data() + region_.size() < item) {
172 break;
173 }
174
175 // Look for the buddy block in the previous bucket. If found, remove it from
176 // that bucket, and repeat the whole process with the merged block.
177 auto* buddy = BuddyBlock::FromUsableSpace(item);
178 if (!current.Remove(*buddy)) {
179 break;
180 }
181
182 block = BuddyBlock::Merge(std::move(block), std::move(buddy));
183 bucket = nullptr;
184 }
185 if (bucket == nullptr) {
186 bucket = &buckets_.back();
187 }
188
189 // Add the (possibly merged) free block to the correct bucket.
190 std::ignore = bucket->Add(*block);
191 }
192
GetLayout(const void * ptr) const193 Result<Layout> GenericBuddyAllocator::GetLayout(const void* ptr) const {
194 if (ptr < region_.data()) {
195 return Status::OutOfRange();
196 }
197 size_t offset = cpp20::bit_cast<uintptr_t>(ptr) -
198 cpp20::bit_cast<uintptr_t>(region_.data());
199 if (region_.size() <= offset || offset % min_outer_size_ != 0) {
200 return Status::OutOfRange();
201 }
202 const auto* block = BuddyBlock::FromUsableSpace(ptr);
203 return Layout(block->InnerSize(), min_outer_size_);
204 }
205
206 } // namespace pw::allocator::internal
207