• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <array>
17 
18 #include "pw_allocator/block_allocator_base.h"
19 #include "pw_allocator/bucket.h"
20 #include "pw_status/try.h"
21 
22 namespace pw::allocator {
23 
24 /// Block allocator that uses sized buckets of free blocks..
25 ///
26 /// In this strategy, the allocator handles an allocation request by starting
27 /// with the bucket with the smallest size that is larger than the requested
28 /// size. It tries to allocate using the blocks in that block, if any, before
29 /// trying the bucket with the next largest size.
30 ///
31 /// On deallocation, blocks are placed in the bucket of the smallest size that
32 /// is larger than usable space of the block being freed.
33 ///
34 /// The last bucket always has an unbounded size.
35 ///
36 /// As an example, assume that the allocator is configured with a minimum chunk
37 /// size of 64 and 5 buckets. The internal state may look like the following:
38 ///
39 /// @code{.unparsed}
40 /// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
41 /// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
42 /// bucket[2] (256B) --> NULL
43 /// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
44 /// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
45 /// @endcode
46 template <typename OffsetType = uintptr_t,
47           size_t kMinChunkSize = 32,
48           size_t kNumBuckets = 5,
49           size_t kPoisonInterval = 0,
50           size_t kAlign = std::max(alignof(OffsetType), alignof(std::byte*))>
51 class BucketBlockAllocator
52     : public BlockAllocator<OffsetType,
53                             kPoisonInterval,
54                             std::max(kAlign, alignof(std::byte*))> {
55  public:
56   using Base = BlockAllocator<OffsetType,
57                               kPoisonInterval,
58                               std::max(kAlign, alignof(std::byte*))>;
59   using BlockType = typename Base::BlockType;
60 
61   /// Constexpr constructor. Callers must explicitly call `Init`.
BucketBlockAllocator()62   constexpr BucketBlockAllocator() : Base() {
63     internal::Bucket::Init(span(buckets_.data(), buckets_.size() - 1),
64                            kMinChunkSize);
65   }
66 
67   /// Non-constexpr constructor that automatically calls `Init`.
68   ///
69   /// @param[in]  region  Region of memory to use when satisfying allocation
70   ///                     requests. The region MUST be large enough to fit an
71   ///                     aligned block with overhead. It MUST NOT be larger
72   ///                     than what is addressable by `OffsetType`.
BucketBlockAllocator(ByteSpan region)73   explicit BucketBlockAllocator(ByteSpan region) : BucketBlockAllocator() {
74     Base::Init(region);
75   }
76 
77   /// @copydoc BlockAllocator::Init
Init(ByteSpan region)78   void Init(ByteSpan region) { Base::Init(region); }
79 
80   /// @copydoc BlockAllocator::Init
Init(BlockType * begin)81   void Init(BlockType* begin) { Base::Init(begin); }
82 
83   /// @copydoc BlockAllocator::Init
Init(BlockType * begin,BlockType * end)84   void Init(BlockType* begin, BlockType* end) override {
85     Base::Init(begin, end);
86     for (auto* block : Base::blocks()) {
87       if (!block->Used()) {
88         RecycleBlock(block);
89       }
90     }
91   }
92 
93  private:
94   /// @copydoc BlockAllocator::ChooseBlock
ChooseBlock(Layout layout)95   BlockType* ChooseBlock(Layout layout) override {
96     BlockType* block = nullptr;
97     for (auto& bucket : buckets_) {
98       if (bucket.chunk_size() < layout.size()) {
99         continue;
100       }
101       void* leading = bucket.RemoveIf([&layout](void* chunk) {
102         BlockType* candidate = BlockType::FromUsableSpace(chunk);
103         return BlockType::AllocLast(candidate, layout).ok();
104       });
105       if (leading != nullptr) {
106         block = BlockType::FromUsableSpace(leading);
107         break;
108       }
109     }
110     if (block == nullptr) {
111       return nullptr;
112     }
113     // If the chunk was split, what we have is the leading free block.
114     if (!block->Used()) {
115       RecycleBlock(block);
116       block = block->Next();
117     }
118     return block;
119   }
120 
121   /// @copydoc BlockAllocator::RecycleBlock
RecycleBlock(BlockType * block)122   void RecycleBlock(BlockType* block) override {
123     // Free blocks that are too small to be added to buckets will be "garbage
124     // collected" by merging them with their neighbors when the latter are
125     // freed.
126     size_t inner_size = block->InnerSize();
127     if (inner_size < sizeof(void*)) {
128       return;
129     }
130     for (auto& bucket : buckets_) {
131       if (inner_size <= bucket.chunk_size()) {
132         bucket.Add(block->UsableSpace());
133         break;
134       }
135     }
136   }
137 
138   std::array<internal::Bucket, kNumBuckets> buckets_;
139 };
140 
141 }  // namespace pw::allocator
142