1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include "pw_multibuf/simple_allocator.h"
16
17 #include <algorithm>
18 #include <mutex>
19
20 #include "pw_assert/check.h"
21 #include "pw_bytes/alignment.h"
22
23 namespace pw::multibuf {
24 namespace internal {
25
~LinkedRegionTracker()26 LinkedRegionTracker::~LinkedRegionTracker() {
27 // The ``LinkedRegionTracker`` *must* be removed from the parent allocator's
28 // region list prior to being destroyed, as doing so requires holding a lock.
29 //
30 // The destructor is only called via ``Destroy()``'s invocation of
31 // ``metadata_alloc_ref.Delete(this);``
32 PW_DCHECK(unlisted());
33 }
34
Destroy()35 void LinkedRegionTracker::Destroy() {
36 SimpleAllocator::AvailableMemorySize available;
37 {
38 // N.B.: this lock *must* go out of scope before the call to
39 // ``Delete(this)`` below in order to prevent referencing the ``parent_``
40 // field after this tracker has been destroyed.
41 std::lock_guard lock(parent_.lock_);
42 unlist();
43 available = parent_.GetAvailableMemorySize();
44 }
45 parent_.MoreMemoryAvailable(available.total, available.contiguous);
46 parent_.metadata_alloc_.Delete(this);
47 }
48
AllocateChunkClass()49 void* LinkedRegionTracker::AllocateChunkClass() {
50 return parent_.metadata_alloc_.Allocate(allocator::Layout::Of<Chunk>());
51 }
52
DeallocateChunkClass(void * ptr)53 void LinkedRegionTracker::DeallocateChunkClass(void* ptr) {
54 return parent_.metadata_alloc_.Deallocate(ptr);
55 }
56
57 } // namespace internal
58
59 // PW_CHECK doesn't like %, so...
IsAlignedSize(size_t num,size_t alignment)60 static bool IsAlignedSize(size_t num, size_t alignment) {
61 return num % alignment == 0;
62 }
63
SimpleAllocator(ByteSpan data_area,pw::allocator::Allocator & metadata_alloc,size_t alignment)64 SimpleAllocator::SimpleAllocator(ByteSpan data_area,
65 pw::allocator::Allocator& metadata_alloc,
66 size_t alignment)
67 : metadata_alloc_(metadata_alloc),
68 data_area_(data_area),
69 alignment_(alignment) {
70 PW_CHECK(IsAlignedAs(data_area_.data(), alignment));
71 PW_CHECK(IsAlignedSize(data_area_.size(), alignment));
72 }
73
DoAllocate(size_t min_size,size_t desired_size,ContiguityRequirement contiguity_requirement)74 pw::Result<MultiBuf> SimpleAllocator::DoAllocate(
75 size_t min_size,
76 size_t desired_size,
77 ContiguityRequirement contiguity_requirement) {
78 if (min_size > data_area_.size()) {
79 return Status::OutOfRange();
80 }
81 // NB: std::lock_guard is not used here in order to release the lock
82 // prior to destroying ``buf`` below.
83 lock_.lock();
84 auto available_memory_size = GetAvailableMemorySize();
85 size_t available = (contiguity_requirement == kNeedsContiguous)
86 ? available_memory_size.contiguous
87 : available_memory_size.total;
88 if (available < min_size) {
89 lock_.unlock();
90 return Status::ResourceExhausted();
91 }
92 // All regions should be aligned, so `available` should be aligned.
93 PW_CHECK(IsAlignedSize(available, alignment_));
94 size_t goal_size = std::min(desired_size, available);
95 if (goal_size == 0) {
96 lock_.unlock();
97 return MultiBuf();
98 }
99 if (contiguity_requirement == kNeedsContiguous) {
100 auto out = InternalAllocateContiguous(goal_size);
101 lock_.unlock();
102 return out;
103 }
104
105 MultiBuf buf;
106 Status status;
107 const size_t unaligned = goal_size % alignment_;
108 const size_t extra_for_alignment = unaligned ? alignment_ - unaligned : 0;
109 // There's no danger of increasing the goal here to be more than `available`
110 // because `available` is guaranteed to be aligned.
111 size_t remaining_goal = goal_size + extra_for_alignment;
112 ForEachFreeBlock(
113 [this, &buf, &status, extra_for_alignment, remaining_goal](
114 const FreeBlock& block) PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) mutable {
115 PW_CHECK(IsAlignedAs(block.span.data(), alignment_));
116 size_t chunk_size = std::min(block.span.size(), remaining_goal);
117 pw::Result<OwnedChunk> chunk =
118 InsertRegion({block.iter, ByteSpan(block.span.data(), chunk_size)});
119 if (!chunk.ok()) {
120 status = chunk.status();
121 return ControlFlow::Break;
122 }
123 remaining_goal -= chunk->size();
124 if (remaining_goal == 0) {
125 if (extra_for_alignment) {
126 // If we had to adjust the goal for alignment, trim the chunk
127 // now. This will keep the regions aligned in size even though
128 // the chunk isn't.
129 (*chunk)->Truncate(chunk->size() - extra_for_alignment);
130 }
131 buf.PushFrontChunk(std::move(*chunk));
132 return ControlFlow::Break;
133 }
134 buf.PushFrontChunk(std::move(*chunk));
135 return ControlFlow::Continue;
136 });
137 // Lock must be released prior to possibly free'ing the `buf` in the case
138 // where `!status.ok()`. This is necessary so that the destructing chunks
139 // can free their regions.
140 lock_.unlock();
141 if (!status.ok()) {
142 return status;
143 }
144 return buf;
145 }
146
InternalAllocateContiguous(size_t size)147 pw::Result<MultiBuf> SimpleAllocator::InternalAllocateContiguous(size_t size) {
148 pw::Result<MultiBuf> buf = Status::ResourceExhausted();
149 const size_t aligned_size = (size + alignment_ - 1) / alignment_ * alignment_;
150 ForEachFreeBlock(
151 [this, &buf, size, aligned_size](const FreeBlock& block)
152 PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
153 if (block.span.size() >= aligned_size) {
154 PW_CHECK(IsAlignedAs(block.span.data(), alignment_));
155 ByteSpan buf_span(block.span.data(), aligned_size);
156 buf = InsertRegion({block.iter, buf_span})
157 .transform([size](OwnedChunk&& owned_chunk) {
158 owned_chunk->Truncate(size);
159 return MultiBuf::FromChunk(std::move(owned_chunk));
160 });
161 return ControlFlow::Break;
162 }
163 return ControlFlow::Continue;
164 });
165 return buf;
166 }
167
InsertRegion(const FreeBlock & block)168 pw::Result<OwnedChunk> SimpleAllocator::InsertRegion(const FreeBlock& block) {
169 internal::LinkedRegionTracker* new_region =
170 metadata_alloc_.New<internal::LinkedRegionTracker>(*this, block.span);
171 if (new_region == nullptr) {
172 return Status::OutOfRange();
173 }
174 std::optional<OwnedChunk> chunk = new_region->CreateFirstChunk();
175 if (!chunk.has_value()) {
176 metadata_alloc_.Delete(new_region);
177 return Status::OutOfRange();
178 }
179 regions_.insert_after(block.iter, *new_region);
180 return std::move(*chunk);
181 }
182
GetAvailableMemorySize()183 SimpleAllocator::AvailableMemorySize SimpleAllocator::GetAvailableMemorySize() {
184 size_t total = 0;
185 size_t max_contiguous = 0;
186 ForEachFreeBlock([&total, &max_contiguous](const FreeBlock& block) {
187 total += block.span.size();
188 if (block.span.size() > max_contiguous) {
189 max_contiguous = block.span.size();
190 }
191 return ControlFlow::Continue;
192 });
193
194 AvailableMemorySize available;
195 available.total = total;
196 available.contiguous = max_contiguous;
197 return available;
198 }
199
200 } // namespace pw::multibuf
201