• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <array>
17 #include <cstddef>
18 #include <cstdint>
19 
20 #include "pw_allocator/block/detailed_block.h"
21 #include "pw_allocator/block_allocator.h"
22 #include "pw_allocator/bucket/fast_sorted.h"
23 #include "pw_allocator/bucket/sorted.h"
24 
25 namespace pw::allocator {
26 
27 /// Alias for a default block type that is compatible with `TlsfAllocator`.
28 template <typename OffsetType>
29 using TlsfBlock = DetailedBlock<OffsetType, GenericFastSortedItem>;
30 
31 /// Default values for the template parameters of `TlsfAllocator`.
32 ///
33 /// By default, this is tuned for allocations between 64B and 64KB.
34 struct TlsfDefaults {
35   /// Default maximum inner size of the smallest bucket in a TLSF allocator's
36   /// two-dimensional array of buckets.
37   static constexpr size_t kMinSize = 64;
38 
39   /// Default number of rows in a TLSF allocator's two-dimensional array of
40   /// buckets.
41   static constexpr size_t kNumShelves = 10;
42 };
43 
44 /// Pair used to index a bucket in a two dimensional array.
45 struct TlsfIndices {
46   size_t shelf;
47   size_t bucket;
48 };
49 
50 /// Two-layered, segregated fit allocator.
51 ///
52 /// This allocator uses a two-dimensional array of buckets to quickly satisfy
53 /// memory allocations with best-fit blocks as described by
54 /// http://www.gii.upv.es/tlsf/files/papers/ecrts04_tlsf.pdf
55 ///
56 /// This class refers to the "second-level arrays" in that paper as "shelves".
57 /// Each shelf holds an array of Buckets, and an instance of this class holds an
58 /// array of shelves. Conceptually, buckets can be thought of as being
59 /// organized on a set of "shelves", with each shelf having 16 buckets arranged
60 /// from smallest maximum inner size to largest. The smallest maximum inner size
61 /// on a shelf is a power of 2, and the shelves are arranged from the `kMinSize`
62 /// on the "bottom" to the largest maximum inner sizes on the "top". The last
63 /// bucket on the topmost shelf is unbounded to handle any blocks of arbitrary
64 /// size.
65 ///
66 /// For example, if `kMinSize` is 64, and `kNumShelves` is 10, than the maximum
67 /// inner sizes of buckets on each shelf could be represented as:
68 ///
69 /// @code
70 /// {
71 ///   shelves_[9]: { 32k, 34k, ..., 62k, inf },
72 ///           ...: { ..., ..., ..., ..., ... },
73 ///   shelves_[1]: { 128, 136, ..., 240, 248 },
74 ///   shelves_[0]: {  64,  68, ..., 120, 124 },
75 /// }
76 /// @endcode
77 ///
78 /// @tparam   BlockType     Block implementation
79 /// @tparam   kMinSize      Maximum inner size of blocks in the first bucket on
80 ///                         lowest shelf.
81 /// @tparam   kNumShelves   Number of rows in the two-dimensional array.
82 template <typename BlockType = TlsfBlock<uint32_t>,
83           size_t kMinSize = TlsfDefaults::kMinSize,
84           size_t kNumShelves = TlsfDefaults::kNumShelves>
85 class TlsfAllocator : public BlockAllocator<BlockType> {
86  private:
87   using Base = BlockAllocator<BlockType>;
88   using BucketType = FastSortedBucket<BlockType>;
89 
90   static constexpr size_t kNumBucketsPerShelf = 16;
91   static constexpr size_t kBucketBits = cpp20::countr_zero(kNumBucketsPerShelf);
92   using Shelf = std::array<BucketType, kNumBucketsPerShelf>;
93 
94   static_assert(kMinSize >= kNumBucketsPerShelf,
95                 "kMinSize must be at least 16.");
96   static_assert(
97       kMinSize >= sizeof(GenericFastSortedItem),
98       "kMinSize must be large enough to hold a FastSortedBucket item.");
99   static_assert((kMinSize & (kMinSize - 1)) == 0,
100                 "kMinSize must be a power of two.");
101 
102   static_assert(kNumShelves <= 32, "kNumShelves cannot be larger than 32");
103 
104  public:
105   /// Constexpr constructor. Callers must explicitly call `Init`.
106   constexpr TlsfAllocator();
107 
108   /// Non-constexpr constructor that automatically calls `Init`.
109   ///
110   /// @param[in]  region      Region of memory to use when satisfying allocation
111   ///                         requests. The region MUST be valid as an argument
112   ///                         to `BlockType::Init`.
TlsfAllocator(ByteSpan region)113   explicit TlsfAllocator(ByteSpan region) : TlsfAllocator() {
114     Base::Init(region);
115   }
116 
117  private:
118   /// @copydoc BlockAllocator::ChooseBlock
119   BlockResult<BlockType> ChooseBlock(Layout layout) override;
120 
121   /// @copydoc BlockAllocator::ReserveBlock
122   void ReserveBlock(BlockType& block) override;
123 
124   /// @copydoc BlockAllocator::RecycleBlock
125   void RecycleBlock(BlockType& block) override;
126 
127   /// Returns the shelf and bucket indices for the bucket with the smallest
128   /// maximum inner size greater than the given size.
129   static TlsfIndices MapToIndices(size_t size);
130 
131   /// Starting with the bucket indicated by the given `indices`, searches for
132   /// the non-empty bucket with the smallest maximum inner size. Updates the
133   /// given `indices` and returns true if such a bucket is found; otherwise
134   /// returns false.
135   bool FindNextAvailable(TlsfIndices& indices);
136 
137   /// Updates the shelf and bucket bitmaps to reflect whether the bucket
138   /// referenced by the given `indices` is `empty`.
139   void UpdateBitmaps(const TlsfIndices& indices, bool empty);
140 
141   uint32_t shelf_bitmap_ = 0;
142   std::array<uint16_t, kNumShelves> bucket_bitmaps_;
143   std::array<Shelf, kNumShelves> shelves_;
144   ForwardSortedBucket<BlockType> small_bucket_;
145 };
146 
147 // Template method implementations.
148 
149 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
TlsfAllocator()150 constexpr TlsfAllocator<BlockType, kMinSize, kNumShelves>::TlsfAllocator() {
151   size_t size = kMinSize;
152   size_t step = kMinSize / kNumBucketsPerShelf;
153   for (Shelf& shelf : shelves_) {
154     for (BucketType& bucket : shelf) {
155       size += step;
156       bucket.set_max_inner_size(size - 1);
157     }
158     step *= 2;
159   }
160 
161   // The largest bucket is unbounded.
162   BucketType& largest = shelves_[kNumShelves - 1][kNumBucketsPerShelf - 1];
163   largest.set_max_inner_size(std::numeric_limits<size_t>::max());
164 
165   bucket_bitmaps_.fill(0);
166 }
167 
168 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
169 BlockResult<BlockType>
ChooseBlock(Layout layout)170 TlsfAllocator<BlockType, kMinSize, kNumShelves>::ChooseBlock(Layout layout) {
171   // Check the small bucket.
172   if (layout.size() < small_bucket_.max_inner_size()) {
173     BlockType* block = small_bucket_.RemoveCompatible(layout);
174     if (block != nullptr) {
175       return BlockType::AllocFirst(std::move(block), layout);
176     }
177   }
178 
179   // Check the buckets on the shelves.
180   for (TlsfIndices indices = MapToIndices(layout.size());
181        FindNextAvailable(indices);
182        indices.bucket++) {
183     FastSortedBucket<BlockType>& bucket =
184         shelves_[indices.shelf][indices.bucket];
185     BlockType* block = bucket.RemoveCompatible(layout);
186     if (block != nullptr) {
187       UpdateBitmaps(indices, bucket.empty());
188       return BlockType::AllocFirst(std::move(block), layout);
189     }
190   }
191 
192   // No sufficiently large block found.
193   return BlockResult<BlockType>(nullptr, Status::NotFound());
194 }
195 
196 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
ReserveBlock(BlockType & block)197 void TlsfAllocator<BlockType, kMinSize, kNumShelves>::ReserveBlock(
198     BlockType& block) {
199   if (block.InnerSize() <= sizeof(SortedItem)) {
200     std::ignore = small_bucket_.Remove(block);
201     return;
202   }
203   TlsfIndices indices = MapToIndices(block.InnerSize());
204   FastSortedBucket<BlockType>& large_bucket =
205       shelves_[indices.shelf][indices.bucket];
206   if (large_bucket.Remove(block)) {
207     UpdateBitmaps(indices, large_bucket.empty());
208   }
209 }
210 
211 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
RecycleBlock(BlockType & block)212 void TlsfAllocator<BlockType, kMinSize, kNumShelves>::RecycleBlock(
213     BlockType& block) {
214   if (block.InnerSize() <= sizeof(SortedItem)) {
215     std::ignore = small_bucket_.Add(block);
216     return;
217   }
218   TlsfIndices indices = MapToIndices(block.InnerSize());
219   FastSortedBucket<BlockType>& large_bucket =
220       shelves_[indices.shelf][indices.bucket];
221   std::ignore = large_bucket.Add(block);
222   UpdateBitmaps(indices, false);
223 }
224 
225 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
MapToIndices(size_t size)226 TlsfIndices TlsfAllocator<BlockType, kMinSize, kNumShelves>::MapToIndices(
227     size_t size) {
228   if (size <= kMinSize) {
229     return TlsfIndices{.shelf = 0, .bucket = 0};
230   }
231 
232   // Most significant bit set determines the shelf.
233   size_t shelf = cpp20::countr_zero(cpp20::bit_floor(size));
234   // Each shelf has 16 buckets, so next 4 bits determine the bucket.
235   auto bucket = static_cast<uint16_t>((size >> (shelf - kBucketBits)) & 0xF);
236 
237   // Adjust for minimum size, and clamp to the valid range.
238   shelf -= cpp20::countr_zero(kMinSize);
239   if (shelf >= kNumShelves) {
240     shelf = kNumShelves - 1;
241     bucket = kNumBucketsPerShelf - 1;
242   }
243   return TlsfIndices{.shelf = static_cast<uint32_t>(shelf), .bucket = bucket};
244 }
245 
246 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
FindNextAvailable(TlsfIndices & indices)247 bool TlsfAllocator<BlockType, kMinSize, kNumShelves>::FindNextAvailable(
248     TlsfIndices& indices) {
249   // Are we past the end of a shelf? If so, move up a shelf.
250   if (indices.bucket == kNumBucketsPerShelf) {
251     indices.shelf++;
252     indices.bucket = 0;
253   }
254 
255   // Have we passed the top shelf? If so, no larger blocks are available.
256   if (indices.shelf >= kNumShelves) {
257     return false;
258   }
259 
260   // Use the bitmaps to find the next largest non-empty bucket.
261   uint16_t bucket_bitmap =
262       bucket_bitmaps_[indices.shelf] & (~uint32_t(0) << indices.bucket);
263   if (bucket_bitmap != 0) {
264     // There's at least one non-empty bucket on the current shelf whose
265     // blocks are at least as large as the requested size.
266     indices.bucket = cpp20::countr_zero(bucket_bitmap);
267     return true;
268   }
269 
270   // The buckets for large enough blocks on this shelf are all empty.
271   // Move up to the first shelf with non-empty buckets and find the
272   // non-empty bucket with the smallest blocks.
273   uint32_t shelf_bitmap = shelf_bitmap_ & (~uint32_t(0) << (indices.shelf + 1));
274   if (shelf_bitmap != 0) {
275     indices.shelf = cpp20::countr_zero(shelf_bitmap);
276     indices.bucket = cpp20::countr_zero(bucket_bitmaps_[indices.shelf]);
277     return true;
278   }
279 
280   // No larger blocks are available.
281   return false;
282 }
283 
284 template <typename BlockType, size_t kMinSize, size_t kNumShelves>
UpdateBitmaps(const TlsfIndices & indices,bool empty)285 void TlsfAllocator<BlockType, kMinSize, kNumShelves>::UpdateBitmaps(
286     const TlsfIndices& indices, bool empty) {
287   uint16_t bucket_bitmap = uint32_t(1) << indices.bucket;
288   if (empty) {
289     bucket_bitmaps_[indices.shelf] &= ~bucket_bitmap;
290   } else {
291     bucket_bitmaps_[indices.shelf] |= bucket_bitmap;
292   }
293 
294   uint32_t shelf_bitmap = uint32_t(1) << indices.shelf;
295   if (bucket_bitmaps_[indices.shelf] == 0) {
296     shelf_bitmap_ &= ~shelf_bitmap;
297   } else {
298     shelf_bitmap_ |= shelf_bitmap;
299   }
300 }
301 
302 }  // namespace pw::allocator
303