• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_allocator/buddy_allocator.h"
16 
17 #include <cstring>
18 
19 #include "pw_allocator/buffer.h"
20 #include "pw_assert/check.h"
21 #include "pw_bytes/alignment.h"
22 
23 namespace pw::allocator::internal {
24 
GenericBuddyAllocator(span<Bucket> buckets,size_t min_chunk_size)25 GenericBuddyAllocator::GenericBuddyAllocator(span<Bucket> buckets,
26                                              size_t min_chunk_size)
27     : buckets_(buckets) {
28   Bucket::Init(buckets, min_chunk_size);
29 }
30 
Init(ByteSpan region)31 void GenericBuddyAllocator::Init(ByteSpan region) {
32   CrashIfAllocated();
33   size_t min_chunk_size = buckets_[0].chunk_size();
34   region_ = GetAlignedSubspan(region, min_chunk_size);
35   PW_CHECK_INT_GE(region_.size(), min_chunk_size);
36 
37   // Build up the available memory by successively freeing (and thus merging)
38   // minimum sized chunks.
39   std::memset(region_.data(), 0, region_.size());
40   for (region = region_; region.size() >= min_chunk_size;
41        region = region.subspan(min_chunk_size)) {
42     Deallocate(region.data());
43   }
44 }
45 
CrashIfAllocated()46 void GenericBuddyAllocator::CrashIfAllocated() {
47   size_t total_free = 0;
48   for (auto& bucket : buckets_) {
49     size_t bucket_size;
50     PW_CHECK_MUL(bucket.chunk_size(), bucket.count(), &bucket_size);
51     PW_CHECK_ADD(total_free, bucket_size, &total_free);
52   }
53   PW_CHECK_INT_EQ(region_.size(),
54                   total_free,
55                   "%zu bytes were still in use when an allocator was "
56                   "destroyed. All memory allocated by an allocator must be "
57                   "released before the allocator goes out of scope.",
58                   region_.size() - total_free);
59   region_ = ByteSpan();
60 }
61 
Allocate(Layout layout)62 void* GenericBuddyAllocator::Allocate(Layout layout) {
63   if (layout.alignment() > buckets_[0].chunk_size()) {
64     return nullptr;
65   }
66 
67   std::byte* chunk = nullptr;
68   size_t chunk_size = 0;
69   size_t index = 0;
70   for (auto& bucket : buckets_) {
71     chunk_size = bucket.chunk_size();
72     if (chunk_size < layout.size()) {
73       ++index;
74       continue;
75     }
76     layout = Layout(chunk_size, layout.alignment());
77 
78     // Check if this bucket has chunks available.
79     chunk = bucket.Remove();
80     if (chunk != nullptr) {
81       break;
82     }
83 
84     // No chunk available, allocate one from the next bucket and split it.
85     void* ptr = Allocate(layout.Extend(chunk_size));
86     if (ptr == nullptr) {
87       break;
88     }
89     chunk = std::launder(reinterpret_cast<std::byte*>(ptr));
90     bucket.Add(chunk + chunk_size);
91     break;
92   }
93   if (chunk == nullptr) {
94     return nullptr;
95   }
96   // Store the bucket index in the byte *before* the usable space. Use the last
97   // byte for the first chunk.
98   if (chunk == region_.data()) {
99     region_[region_.size() - 1] = std::byte(index);
100   } else {
101     *(chunk - 1) = std::byte(index);
102   }
103   return chunk;
104 }
105 
Deallocate(void * ptr)106 void GenericBuddyAllocator::Deallocate(void* ptr) {
107   if (ptr == nullptr) {
108     return;
109   }
110   auto* chunk = std::launder(reinterpret_cast<std::byte*>(ptr));
111   auto layout = GetLayout(ptr);
112   PW_CHECK_OK(layout.status());
113   size_t chunk_size = layout->size();
114 
115   Bucket* bucket = nullptr;
116   PW_CHECK_INT_GT(buckets_.size(), 0);
117   for (auto& current : span(buckets_.data(), buckets_.size() - 1)) {
118     if (current.chunk_size() < chunk_size) {
119       continue;
120     }
121     bucket = &current;
122 
123     // Determine the expected address of this chunk's buddy by determining if
124     // it would be first or second in a merged chunk of the next larger size.
125     std::byte* buddy = chunk;
126     if ((chunk - region_.data()) % (chunk_size * 2) == 0) {
127       buddy += current.chunk_size();
128     } else {
129       buddy -= current.chunk_size();
130     }
131 
132     // Look for the buddy chunk in the previous bucket. If found, remove it from
133     // that bucket, and repeat the whole process with the merged chunk.
134     void* match =
135         current.RemoveIf([buddy](void* other) { return buddy == other; });
136     if (match == nullptr) {
137       break;
138     }
139     chunk = std::min(chunk, buddy);
140     chunk_size *= 2;
141     bucket = nullptr;
142   }
143   if (bucket == nullptr) {
144     bucket = &buckets_.back();
145   }
146 
147   // Add the (possibly merged) chunk to the correct bucket of free chunks.
148   bucket->Add(chunk);
149 }
150 
GetLayout(const void * ptr) const151 Result<Layout> GenericBuddyAllocator::GetLayout(const void* ptr) const {
152   if (ptr < region_.data()) {
153     return Status::OutOfRange();
154   }
155   size_t min_chunk_size = buckets_.front().chunk_size();
156   size_t offset = reinterpret_cast<uintptr_t>(ptr) -
157                   reinterpret_cast<uintptr_t>(region_.data());
158   if (region_.size() <= offset || offset % min_chunk_size != 0) {
159     return Status::OutOfRange();
160   }
161   const auto* chunk = std::launder(reinterpret_cast<const std::byte*>(ptr));
162   std::byte index =
163       ptr == region_.data() ? region_[region_.size() - 1] : *(chunk - 1);
164   return Layout(buckets_[size_t(index)].chunk_size(), min_chunk_size);
165 }
166 
167 }  // namespace pw::allocator::internal
168