1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include "pw_multibuf/simple_allocator.h"
16
17 #include <algorithm>
18 #include <mutex>
19
20 #include "pw_assert/check.h"
21
22 namespace pw::multibuf {
23 namespace internal {
24
~LinkedRegionTracker()25 LinkedRegionTracker::~LinkedRegionTracker() {
26 // The ``LinkedRegionTracker`` *must* be removed from the parent allocator's
27 // region list prior to being destroyed, as doing so requires holding a lock.
28 //
29 // The destructor is only called via ``Destroy()``'s invocation of
30 // ``metadata_alloc_ref.Delete(this);``
31 PW_DCHECK(unlisted());
32 }
33
Destroy()34 void LinkedRegionTracker::Destroy() {
35 SimpleAllocator::AvailableMemorySize available;
36 {
37 // N.B.: this lock *must* go out of scope before the call to
38 // ``Delete(this)`` below in order to prevent referencing the ``parent_``
39 // field after this tracker has been destroyed.
40 std::lock_guard lock(parent_.lock_);
41 unlist();
42 available = parent_.GetAvailableMemorySize();
43 }
44 parent_.MoreMemoryAvailable(available.total, available.contiguous);
45 parent_.metadata_alloc_.Delete(this);
46 }
47
AllocateChunkClass()48 void* LinkedRegionTracker::AllocateChunkClass() {
49 return parent_.metadata_alloc_.Allocate(allocator::Layout::Of<Chunk>());
50 }
51
DeallocateChunkClass(void * ptr)52 void LinkedRegionTracker::DeallocateChunkClass(void* ptr) {
53 return parent_.metadata_alloc_.Deallocate(ptr);
54 }
55
56 } // namespace internal
57
DoAllocate(size_t min_size,size_t desired_size,bool needs_contiguous)58 pw::Result<MultiBuf> SimpleAllocator::DoAllocate(size_t min_size,
59 size_t desired_size,
60 bool needs_contiguous) {
61 if (min_size > data_area_.size()) {
62 return Status::OutOfRange();
63 }
64 // NB: std::lock_guard is not used here in order to release the lock
65 // prior to destroying ``buf`` below.
66 lock_.lock();
67 auto available_memory_size = GetAvailableMemorySize();
68 size_t available = needs_contiguous ? available_memory_size.contiguous
69 : available_memory_size.total;
70 if (available < min_size) {
71 lock_.unlock();
72 return Status::ResourceExhausted();
73 }
74 size_t goal_size = std::min(desired_size, available);
75 if (needs_contiguous) {
76 auto out = InternalAllocateContiguous(goal_size);
77 lock_.unlock();
78 return out;
79 }
80
81 MultiBuf buf;
82 Status status;
83 size_t remaining_goal = goal_size;
84 ForEachFreeBlock(
85 [this, &buf, &status, remaining_goal](const FreeBlock& block)
86 PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) mutable {
87 if (remaining_goal == 0) {
88 return ControlFlow::Break;
89 }
90 size_t chunk_size = std::min(block.span.size(), remaining_goal);
91 pw::Result<OwnedChunk> chunk = InsertRegion(
92 {block.iter, ByteSpan(block.span.data(), chunk_size)});
93 if (!chunk.ok()) {
94 status = chunk.status();
95 return ControlFlow::Break;
96 }
97 remaining_goal -= chunk->size();
98 buf.PushFrontChunk(std::move(*chunk));
99 return ControlFlow::Continue;
100 });
101 // Lock must be released prior to possibly free'ing the `buf` in the case
102 // where `!status.ok()`. This is necessary so that the destructing chunks
103 // can free their regions.
104 lock_.unlock();
105 if (!status.ok()) {
106 return status;
107 }
108 return buf;
109 }
110
InternalAllocateContiguous(size_t size)111 pw::Result<MultiBuf> SimpleAllocator::InternalAllocateContiguous(size_t size) {
112 pw::Result<MultiBuf> buf = Status::ResourceExhausted();
113 ForEachFreeBlock([this, &buf, size](const FreeBlock& block)
114 PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
115 if (block.span.size() >= size) {
116 ByteSpan buf_span(block.span.data(), size);
117 buf = InsertRegion({block.iter, buf_span})
118 .transform(MultiBuf::FromChunk);
119 return ControlFlow::Break;
120 }
121 return ControlFlow::Continue;
122 });
123 return buf;
124 }
125
InsertRegion(const FreeBlock & block)126 pw::Result<OwnedChunk> SimpleAllocator::InsertRegion(const FreeBlock& block) {
127 internal::LinkedRegionTracker* new_region =
128 metadata_alloc_.New<internal::LinkedRegionTracker>(*this, block.span);
129 if (new_region == nullptr) {
130 return Status::OutOfRange();
131 }
132 std::optional<OwnedChunk> chunk = new_region->CreateFirstChunk();
133 if (!chunk.has_value()) {
134 metadata_alloc_.Delete(new_region);
135 return Status::OutOfRange();
136 }
137 regions_.insert_after(block.iter, *new_region);
138 return std::move(*chunk);
139 }
140
GetAvailableMemorySize()141 SimpleAllocator::AvailableMemorySize SimpleAllocator::GetAvailableMemorySize() {
142 size_t total = 0;
143 size_t max_contiguous = 0;
144 ForEachFreeBlock([&total, &max_contiguous](const FreeBlock& block) {
145 total += block.span.size();
146 if (block.span.size() > max_contiguous) {
147 max_contiguous = block.span.size();
148 }
149 return ControlFlow::Continue;
150 });
151
152 AvailableMemorySize available;
153 available.total = total;
154 available.contiguous = max_contiguous;
155 return available;
156 }
157
158 } // namespace pw::multibuf
159