• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <optional>
17 
18 #include "pw_assert/assert.h"
19 #include "pw_bytes/span.h"
20 #include "pw_sync/interrupt_spin_lock.h"
21 
22 namespace pw::multibuf {
23 
24 class ChunkRegionTracker;
25 class OwnedChunk;
26 
27 /// A handle to a contiguous slice of data.
28 ///
29 /// A ``Chunk`` is similar to a ``ByteSpan``, but is aware of the underlying
30 /// memory allocation, and is able to split, shrink, and grow into neighboring
31 /// empty space.
32 ///
33 /// This class is optimized to allow multiple owners to write into neighboring
34 /// regions of the same allocation. One important usecase for this is
35 /// communication protocols which want to reserve space at the front or rear of
36 /// a buffer for headers or footers.
37 ///
38 /// In order to support zero-copy DMA of communications buffers, allocators can
39 /// create properly-aligned ``Chunk`` regions in appropriate memory. The driver
40 /// can then ``DiscardPrefix`` in order to reserve bytes for headers,
41 /// ``Truncate`` in order to reserve bytes for footers, and then pass the
42 /// ``Chunk`` to the user to fill in. The header and footer space can then
43 /// be reclaimed using the ``ClaimPrefix`` and ``ClaimSuffix`` methods.
44 class Chunk {
45  public:
46   Chunk() = delete;
47   // Not copyable or movable.
48   Chunk(Chunk&) = delete;
49   Chunk& operator=(Chunk&) = delete;
50   Chunk(Chunk&&) = delete;
51   Chunk& operator=(Chunk&&) = delete;
52 
data()53   std::byte* data() { return span_.data(); }
data()54   const std::byte* data() const { return span_.data(); }
size()55   size_t size() const { return span_.size(); }
empty()56   [[nodiscard]] bool empty() const { return span_.empty(); }
57 
58   std::byte& operator[](size_t index) { return span_[index]; }
59   const std::byte& operator[](size_t index) const { return span_[index]; }
60 
61   // Container declarations
62   using element_type = std::byte;
63   using value_type = std::byte;
64   using size_type = size_t;
65   using difference_type = ptrdiff_t;
66   using pointer = std::byte*;
67   using const_pointer = const std::byte*;
68   using reference = std::byte&;
69   using const_reference = const std::byte&;
70   using iterator = std::byte*;
71   using const_iterator = const std::byte*;
72   using reverse_iterator = std::byte*;
73   using const_reverse_iterator = const std::byte*;
74 
begin()75   std::byte* begin() { return span_.data(); }
begin()76   const std::byte* begin() const { return cbegin(); }
cbegin()77   const std::byte* cbegin() const { return span_.data(); }
end()78   std::byte* end() { return span_.data() + span_.size(); }
end()79   const std::byte* end() const { return cend(); }
cend()80   const std::byte* cend() const { return span_.data() + span_.size(); }
81 
82   /// Returns if ``next_chunk`` is mergeable into the end of this ``Chunk``.
83   ///
84   /// This will only succeed when the two ``Chunk`` s are adjacent in
85   /// memory and originated from the same allocation.
86   [[nodiscard]] bool CanMerge(const Chunk& next_chunk) const;
87 
88   /// Attempts to merge ``next_chunk`` into the end of this ``Chunk``.
89   ///
90   /// If the chunks are successfully merged, this ``Chunk`` will be extended
91   /// forwards to encompass the space of ``next_chunk``, and ``next_chunk``
92   /// will be emptied and ``Release``d.
93   ///
94   /// This will only succeed when the two ``Chunk`` s are adjacent in
95   /// memory and originated from the same allocation.
96   ///
97   /// If the chunks are not mergeable, neither ``Chunk`` will be modified.
98   bool Merge(OwnedChunk& next_chunk);
99 
100   /// Attempts to add ``bytes_to_claim`` to the front of this buffer by
101   /// advancing its range backwards in memory. Returns ``true`` if the operation
102   /// succeeded.
103   ///
104   /// This will only succeed if this ``Chunk`` points to a section of a region
105   /// that has unreferenced bytes preceeding it. For example, a ``Chunk`` which
106   /// has been shrunk using ``DiscardPrefix`` can be re-expanded using
107   /// ``ClaimPrefix``.
108   ///
109   /// This method will acquire a mutex and is not IRQ safe.
110   [[nodiscard]] bool ClaimPrefix(size_t bytes_to_claim);
111 
112   /// Attempts to add ``bytes_to_claim`` to the front of this buffer by
113   /// advancing its range forwards in memory. Returns ``true`` if the operation
114   /// succeeded.
115   ///
116   /// This will only succeed if this ``Chunk`` points to a section of a region
117   /// that has unreferenced bytes following it. For example, a ``Chunk`` which
118   /// has been shrunk using ``Truncate`` can be re-expanded using
119   ///
120   /// This method will acquire a mutex and is not IRQ safe.
121   [[nodiscard]] bool ClaimSuffix(size_t bytes_to_claim);
122 
123   /// Shrinks this handle to refer to the data beginning at offset
124   /// ``bytes_to_discard``.
125   ///
126   /// Does not modify the underlying data.
127   ///
128   /// This method will acquire a mutex and is not IRQ safe.
129   void DiscardPrefix(size_t bytes_to_discard);
130 
131   /// Shrinks this handle to refer to data in the range ``begin..<end``.
132   ///
133   /// Does not modify the underlying data.
134   ///
135   /// This method will acquire a mutex and is not IRQ safe.
136   void Slice(size_t begin, size_t end);
137 
138   /// Shrinks this handle to refer to only the first ``len`` bytes.
139   ///
140   /// Does not modify the underlying data.
141   ///
142   /// This method will acquire a mutex and is not IRQ safe.
143   void Truncate(size_t len);
144 
145   /// Attempts to shrink this handle to refer to the data beginning at
146   /// offset ``bytes_to_take``, returning the first ``bytes_to_take`` bytes as
147   /// a new ``OwnedChunk``.
148   ///
149   /// If the inner call to ``AllocateChunkClass`` fails, this function
150   /// will return ``std::nullopt` and this handle's span will not change.
151   ///
152   /// This method will acquire a mutex and is not IRQ safe.
153   std::optional<OwnedChunk> TakePrefix(size_t bytes_to_take);
154 
155   /// Attempts to shrink this handle to refer only the first
156   /// ``len - bytes_to_take`` bytes, returning the last ``bytes_to_take``
157   /// bytes as a new ``OwnedChunk``.
158   ///
159   /// If the inner call to ``AllocateChunkClass`` fails, this function
160   /// will return ``std::nullopt`` and this handle's span will not change.
161   ///
162   /// This method will acquire a mutex and is not IRQ safe.
163   std::optional<OwnedChunk> TakeSuffix(size_t bytes_to_take);
164 
165  private:
Chunk(ChunkRegionTracker * region_tracker,ByteSpan span)166   Chunk(ChunkRegionTracker* region_tracker, ByteSpan span)
167       : region_tracker_(region_tracker),
168         next_in_region_(nullptr),
169         prev_in_region_(nullptr),
170         next_in_buf_(nullptr),
171         span_(span) {}
172 
173   // NOTE: these functions are logically
174   // `PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_)`, however this
175   // annotation cannot be applied due to the forward declaration of
176   // `region_tracker_`.
177   void InsertAfterInRegionList(Chunk* new_chunk);
178   void InsertBeforeInRegionList(Chunk* new_chunk);
179   void RemoveFromRegionList();
180 
181   /// Frees this ``Chunk``. Future accesses to this class after calls to
182   /// ``Free`` are undefined behavior.
183   ///
184   /// Decrements the reference count on the underlying chunk of data.
185   ///
186   /// Does not modify the underlying data, but may cause it to be deallocated
187   /// if this was the only remaining ``Chunk`` referring to its region.
188   ///
189   /// This method will acquire a mutex and is not IRQ safe.
190   void Free();
191 
192   /// The region_tracker_ for this chunk.
193   ChunkRegionTracker* const region_tracker_;
194 
195   /// Pointer to the next chunk in the same ``region_tracker_``.
196   ///
197   /// Guarded by ``region_tracker_->lock_`` (but not annotated as such due to
198   /// the fact that annotations aren't smart enough to understand that all
199   /// chunks within a region share a tracker + lock).
200   ///
201   /// ``nullptr`` if this is the last ``Chunk`` in its ``region_tracker_``.
202   Chunk* next_in_region_;
203 
204   /// Pointer to the previous chunk in the same ``region_tracker_``.
205   ///
206   /// Guarded by ``region_tracker_->lock_`` (but not annotated as such due to
207   /// the fact that annotations aren't smart enough to understand that all
208   /// chunks within a region share a tracker + lock).
209   ///
210   /// ``nullptr`` if this is the first ``Chunk`` in its ``region_tracker_``.
211   Chunk* prev_in_region_;
212 
213   /// Pointer to the next chunk in a ``MultiBuf``.
214   ///
215   /// This is ``nullptr`` if this chunk is not in a ``MultiBuf``, or is the
216   /// last element of a ``MultiBuf``.
217   //
218   // reserved for use by the MultiBuf class.
219   Chunk* next_in_buf_;
220 
221   /// Pointer to the sub-region to which this chunk has exclusive access.
222   ///
223   /// This ``span_`` is conceptually owned by this ``Chunk`` object, and
224   /// may be read or written to by a ``Chunk`` user (the normal rules of
225   /// thread compatibility apply-- `const` methods may be called
226   /// concurrently, non-`const` methods may not).
227   ///
228   /// Neighboring ``Chunk`` objects in this region looking to expand via
229   /// ``ClaimPrefix`` or ``ClaimSuffix`` may read this span's
230   /// boundaries. These other ``Chunk``s must acquire
231   /// ``region_tracker_->lock_`` before reading, and this ``Chunk`` must
232   /// acquire ``region_tracker_->lock_`` before changing ``span_``.
233   ByteSpan span_;
234 
235   friend class ChunkRegionTracker;  // For the constructor
236   friend class OwnedChunk;          // for ``Free``.
237   friend class MultiBuf;            // for ``Free`` and ``next_in_buf_``.
238 };
239 
240 /// An object that manages a single allocated region which is referenced by one
241 /// or more ``Chunk`` objects.
242 ///
243 /// This class is typically implemented by ``MultiBufAllocator``
244 /// implementations in order to customize the behavior of region deallocation.
245 ///
246 /// ``ChunkRegionTracker`` s have three responsibilities:
247 ///
248 /// - Tracking the region of memory into which ``Chunk`` s can expand.
249 ///   This is reported via the ``Region`` method. ``Chunk`` s in this region
250 ///   can refer to memory within this region sparsely, but they can grow or
251 ///   shrink so long as they stay within the bounds of the ``Region``.
252 ///
253 /// - Deallocating the region and the ``ChunkRegionTracker`` itself.
254 ///   This is implemented via the ``Destroy`` method, which is called once all
255 ///   of the ``Chunk`` s in a region have been released.
256 ///
257 /// - Allocating and deallocating space for ``Chunk`` classes. This is merely
258 ///   allocating space for the ``Chunk`` object itself, not the memory to which
259 ///   it refers. This can be implemented straightforwardly by delegating to an
260 ///   existing generic allocator such as ``malloc`` or a
261 ///   ``pw::allocator::Allocator`` implementation.
262 class ChunkRegionTracker {
263  public:
264   /// Creates the first ``Chunk`` referencing a whole region of memory.
265   ///
266   /// This must only be called once per ``ChunkRegionTracker``, when the region
267   /// is first created. Multiple calls will result in undefined behavior.
268   ///
269   /// Returns ``std::nullopt`` if ``AllocateChunkStorage`` returns ``nullptr``.
270   std::optional<OwnedChunk> CreateFirstChunk();
271 
272  protected:
273   ChunkRegionTracker() = default;
274   virtual ~ChunkRegionTracker() = default;
275 
276   /// Destroys the ``ChunkRegionTracker``.
277   ///
278   /// Typical implementations will call ``std::destroy_at(this)`` and then free
279   /// the memory associated with the region and the tracker.
280   virtual void Destroy() = 0;
281 
282   /// Returns the entire span of the region being managed.
283   ///
284   /// ``Chunk`` s referencing this tracker will not expand beyond this region,
285   /// nor into one another's portions of the region.
286   ///
287   /// This region does not provide any alignment guarantees by default.
288   ///
289   /// This region must not change for the lifetime of this
290   /// ``ChunkRegionTracker``.
291   virtual ByteSpan Region() const = 0;
292 
293   /// Returns a pointer to ``sizeof(Chunk)`` bytes with `alignas(Chunk)`.
294   /// Returns ``nullptr`` on failure.
295   virtual void* AllocateChunkClass() = 0;
296 
297   /// Deallocates a pointer returned by ``AllocateChunkClass``.
298   virtual void DeallocateChunkClass(void*) = 0;
299 
300  private:
301   /// A lock used to manage an internal linked-list of ``Chunk``s.
302   ///
303   /// This allows chunks to:
304   /// - know whether they can expand to fill neighboring regions of memory.
305   /// - know when the last chunk has been destructed, triggering `Destroy`.
306   pw::sync::InterruptSpinLock lock_;
307   friend Chunk;
308 };
309 
310 /// An RAII handle to a contiguous slice of data.
311 ///
312 /// Note: ``OwnedChunk`` may acquire a ``pw::sync::Mutex`` during destruction,
313 /// and so must not be destroyed within ISR contexts.
314 class OwnedChunk {
315  public:
OwnedChunk()316   OwnedChunk() : inner_(nullptr) {}
OwnedChunk(OwnedChunk && other)317   OwnedChunk(OwnedChunk&& other) noexcept : inner_(other.inner_) {
318     other.inner_ = nullptr;
319   }
320   OwnedChunk& operator=(OwnedChunk&& other) noexcept {
321     inner_ = other.inner_;
322     other.inner_ = nullptr;
323     return *this;
324   }
325   /// This method will acquire a mutex and is not IRQ safe.
~OwnedChunk()326   ~OwnedChunk() { Release(); }
327 
data()328   std::byte* data() {
329     return const_cast<std::byte*>(std::as_const(*this).data());
330   }
data()331   const std::byte* data() const {
332     return inner_ == nullptr ? nullptr : inner_->data();
333   }
334 
size()335   size_t size() const { return inner_ == nullptr ? 0 : inner_->size(); }
336 
337   std::byte& operator[](size_t index) { return data()[index]; }
338   std::byte operator[](size_t index) const { return data()[index]; }
339 
340   // Container declarations
341   using element_type = std::byte;
342   using value_type = std::byte;
343   using size_type = size_t;
344   using difference_type = ptrdiff_t;
345   using pointer = std::byte*;
346   using const_pointer = const std::byte*;
347   using reference = std::byte&;
348   using const_reference = const std::byte&;
349   using iterator = std::byte*;
350   using const_iterator = const std::byte*;
351   using reverse_iterator = std::byte*;
352   using const_reverse_iterator = const std::byte*;
353 
begin()354   std::byte* begin() { return data(); }
begin()355   const std::byte* begin() const { return cbegin(); }
cbegin()356   const std::byte* cbegin() const { return data(); }
end()357   std::byte* end() { return data() + size(); }
end()358   const std::byte* end() const { return cend(); }
cend()359   const std::byte* cend() const { return data() + size(); }
360 
361   Chunk& operator*() { return *inner_; }
362   const Chunk& operator*() const { return *inner_; }
363   Chunk* operator->() { return inner_; }
364   const Chunk* operator->() const { return inner_; }
365 
366   /// Decrements the reference count on the underlying chunk of data and
367   /// empties this handle so that `span()` now returns an empty (zero-sized)
368   /// span.
369   ///
370   /// Does not modify the underlying data, but may cause it to be deallocated
371   /// if this was the only remaining ``Chunk`` referring to its region.
372   ///
373   /// This method is equivalent to ``{ Chunk _unused = std::move(chunk_ref); }``
374   ///
375   /// This method will acquire a mutex and is not IRQ safe.
376   void Release();
377 
378   /// Returns the contained ``Chunk*`` and empties this ``OwnedChunk`` without
379   /// releasing the underlying ``Chunk``.
Take()380   Chunk* Take() && {
381     Chunk* result = inner_;
382     inner_ = nullptr;
383     return result;
384   }
385 
386  private:
387   /// Constructs a new ``OwnedChunk`` from an existing ``Chunk*``.
OwnedChunk(Chunk * inner)388   OwnedChunk(Chunk* inner) : inner_(inner) {}
389 
390   /// A pointer to the owned ``Chunk``.
391   Chunk* inner_;
392 
393   /// Allow ``ChunkRegionTracker`` and ``MultiBuf`` to create ``OwnedChunk``s
394   /// using the private constructor above.
395   friend class Chunk;
396   friend class ChunkRegionTracker;
397   friend class MultiBuf;
398 };
399 
400 }  // namespace pw::multibuf
401