• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <climits>
17 #include <cstddef>
18 #include <cstdint>
19 #include <cstring>
20 
21 #include "pw_allocator/allocator.h"
22 #include "pw_allocator/buffer.h"
23 #include "pw_bytes/alignment.h"
24 #include "pw_bytes/span.h"
25 #include "pw_result/result.h"
26 #include "pw_status/status.h"
27 #include "pw_status/try.h"
28 
29 namespace pw::allocator {
30 namespace internal {
31 
32 // Types of corrupted blocks, and functions to crash with an error message
33 // corresponding to each type. These functions are implemented independent of
34 // any template parameters to allow them to use `PW_CHECK`.
35 enum BlockStatus {
36   kValid,
37   kMisaligned,
38   kPrevMismatched,
39   kNextMismatched,
40   kPoisonCorrupted,
41 };
42 void CrashMisaligned(uintptr_t addr);
43 void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
44 void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
45 void CrashPoisonCorrupted(uintptr_t addr);
46 
47 }  // namespace internal
48 
49 /// Memory region with links to adjacent blocks.
50 ///
51 /// The blocks do not encode their size directly. Instead, they encode offsets
52 /// to the next and previous blocks using the type given by the `OffsetType`
53 /// template parameter. The encoded offsets are simply the offsets divded by the
54 /// minimum block alignment, `kAlignment`.
55 ///
56 /// The `kAlignment` constant provided by the derived block is typically the
57 /// minimum value of `alignof(OffsetType)`. Since the addressable range of a
58 /// block is given by `std::numeric_limits<OffsetType>::max() * kAlignment`, it
59 /// may be advantageous to set a higher alignment if it allows using a smaller
60 /// offset type, even if this wastes some bytes in order to align block headers.
61 ///
62 /// Blocks will always be aligned to a `kAlignment` boundary. Block sizes will
63 /// always be rounded up to a multiple of `kAlignment`.
64 ///
65 /// If `kCanPoison` is set, allocators may call `Poison` to overwrite the
66 /// contents of a block with a poison pattern. This pattern will subsequently be
67 /// checked when allocating blocks, and can detect memory corruptions such as
68 /// use-after-frees.
69 ///
70 /// As an example, the diagram below represents two contiguous
71 /// `Block<uint32_t, true, 8>`s. The indices indicate byte offsets:
72 ///
73 /// @code{.unparsed}
74 /// Block 1:
75 /// +---------------------+------+--------------+
76 /// | Header              | Info | Usable space |
77 /// +----------+----------+------+--------------+
78 /// | Prev     | Next     |      |              |
79 /// | 0......3 | 4......7 | 8..9 | 10.......280 |
80 /// | 00000000 | 00000046 | 8008 |  <app data>  |
81 /// +----------+----------+------+--------------+
82 /// Block 2:
83 /// +---------------------+------+--------------+
84 /// | Header              | Info | Usable space |
85 /// +----------+----------+------+--------------+
86 /// | Prev     | Next     |      |              |
87 /// | 0......3 | 4......7 | 8..9 | 10......1056 |
88 /// | 00000046 | 00000106 | 6008 | f7f7....f7f7 |
89 /// +----------+----------+------+--------------+
90 /// @endcode
91 ///
92 /// The overall size of the block (e.g. 280 bytes) is given by its next offset
93 /// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
94 /// block matches the previous offset of its next block. The first block in a
95 /// list is denoted by having a previous offset of `0`.
96 ///
97 /// @tparam   OffsetType  Unsigned integral type used to encode offsets. Larger
98 ///                       types can address more memory, but consume greater
99 ///                       overhead.
100 /// @tparam   kCanPoison  Indicates whether to enable poisoning free blocks.
101 /// @tparam   kAlign      Sets the overall alignment for blocks. Minimum is
102 ///                       `alignof(OffsetType)` (the default). Larger values can
103 ///                       address more memory, but consume greater overhead.
104 template <typename OffsetType = uintptr_t,
105           size_t kAlign = alignof(OffsetType),
106           bool kCanPoison = false>
107 class Block {
108  public:
109   using offset_type = OffsetType;
110   static_assert(std::is_unsigned_v<offset_type>,
111                 "offset type must be unsigned");
112 
113   static constexpr size_t kAlignment = std::max(kAlign, alignof(offset_type));
114   static constexpr size_t kBlockOverhead = AlignUp(sizeof(Block), kAlignment);
115 
116   // No copy or move.
117   Block(const Block& other) = delete;
118   Block& operator=(const Block& other) = delete;
119 
120   /// @brief Creates the first block for a given memory region.
121   ///
122   /// @returns @rst
123   ///
124   /// .. pw-status-codes::
125   ///
126   ///    OK: Returns a block representing the region.
127   ///
128   ///    INVALID_ARGUMENT: The region is null.
129   ///
130   ///    RESOURCE_EXHAUSTED: The region is too small for a block.
131   ///
132   ///    OUT_OF_RANGE: The region is too big to be addressed using
133   ///    ``OffsetType``.
134   ///
135   /// @endrst
136   static Result<Block*> Init(ByteSpan region);
137 
138   /// @returns  A pointer to a `Block`, given a pointer to the start of the
139   ///           usable space inside the block.
140   ///
141   /// This is the inverse of `UsableSpace()`.
142   ///
143   /// @warning  This method does not do any checking; passing a random
144   ///           pointer will return a non-null pointer.
145   template <int&... DeducedTypesOnly,
146             typename PtrType,
147             bool is_const_ptr = std::is_const_v<std::remove_pointer_t<PtrType>>,
148             typename BytesPtr =
149                 std::conditional_t<is_const_ptr, const std::byte*, std::byte*>,
150             typename BlockPtr =
151                 std::conditional_t<is_const_ptr, const Block*, Block*>>
FromUsableSpace(PtrType usable_space)152   static BlockPtr FromUsableSpace(PtrType usable_space) {
153     // Perform memory laundering to prevent the compiler from tracing the memory
154     // used to store the block and to avoid optimaztions that may be invalidated
155     // by the use of placement-new to create blocks in `Init` and `Split`.
156     auto* bytes = reinterpret_cast<BytesPtr>(usable_space);
157     return std::launder(reinterpret_cast<BlockPtr>(bytes - kBlockOverhead));
158   }
159 
160   /// @returns The total size of the block in bytes, including the header.
OuterSize()161   size_t OuterSize() const { return next_ * kAlignment; }
162 
163   /// @returns The number of usable bytes inside the block.
InnerSize()164   size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
165 
166   /// @returns The number of bytes requested using AllocFirst, AllocLast, or
167   /// Resize.
RequestedSize()168   size_t RequestedSize() const { return InnerSize() - padding_; }
169 
170   /// @returns A pointer to the usable space inside this block.
UsableSpace()171   std::byte* UsableSpace() {
172     return std::launder(reinterpret_cast<std::byte*>(this) + kBlockOverhead);
173   }
UsableSpace()174   const std::byte* UsableSpace() const {
175     return std::launder(reinterpret_cast<const std::byte*>(this) +
176                         kBlockOverhead);
177   }
178 
179   /// Splits an aligned block from the start of the block, and marks it as used.
180   ///
181   /// If successful, `block` will be replaced by a block that has an inner
182   /// size of at least `inner_size`, and whose starting address is aligned to an
183   /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
184   ///
185   /// This method is static in order to consume and replace the given block
186   /// pointer with a pointer to the new, smaller block. In total, up to two
187   /// additional blocks may be created: one to pad the returned block to an
188   /// alignment boundary and one for the trailing space.
189   ///
190   /// @pre The block must not be in use.
191   ///
192   /// @returns @rst
193   ///
194   /// .. pw-status-codes::
195   ///
196   ///    OK: The split completed successfully.
197   ///
198   ///    FAILED_PRECONDITION: This block is in use and cannot be split.
199   ///
200   ///    OUT_OF_RANGE: The requested size plus padding needed for alignment
201   ///    is greater than the current size.
202   ///
203   /// @endrst
204   static Status AllocFirst(Block*& block, Layout layout);
205 
206   /// Checks if an aligned block could be split from the end of the block.
207   ///
208   /// On error, this method will return the same status as `AllocLast` without
209   /// performing any modifications.
210   ///
211   /// @pre The block must not be in use.
212   ///
213   /// @returns @rst
214   ///
215   /// .. pw-status-codes::
216   ///
217   ///    OK: Returns the number of bytes to shift this block in order to align
218   ///    its usable space.
219   ///
220   ///    FAILED_PRECONDITION: This block is in use and cannot be split.
221   ///
222   ///    OUT_OF_RANGE: The requested size is greater than the current size.
223   ///
224   ///    RESOURCE_EXHAUSTED: The remaining space is too small to hold a
225   ///    new block.
226   ///
227   /// @endrst
228   StatusWithSize CanAllocLast(Layout layout) const;
229 
230   /// Splits an aligned block from the end of the block, and marks it as used.
231   ///
232   /// If successful, `block` will be replaced by a block that has an inner
233   /// size of at least `inner_size`, and whose starting address is aligned to an
234   /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
235   ///
236   /// This method is static in order to consume and replace the given block
237   /// pointer with a pointer to the new, smaller block. An additional block may
238   /// be created for the leading space.
239   ///
240   /// @pre The block must not be in use.
241   ///
242   /// @returns @rst
243   ///
244   /// .. pw-status-codes::
245   ///
246   ///    OK: The split completed successfully.
247   ///
248   ///    FAILED_PRECONDITION: This block is in use and cannot be split.
249   ///
250   ///    OUT_OF_RANGE: The requested size is greater than the current size.
251   ///
252   ///    RESOURCE_EXHAUSTED: The remaining space is too small to hold a new
253   ///    block.
254   ///
255   /// @endrst
256   static Status AllocLast(Block*& block, Layout layout);
257 
258   /// Marks the block as free and merges it with any free neighbors.
259   ///
260   /// This method is static in order to consume and replace the given block
261   /// pointer. If neither member is free, the returned pointer will point to the
262   /// original block. Otherwise, it will point to the new, larger block created
263   /// by merging adjacent free blocks together.
264   static void Free(Block*& block);
265 
266   /// Grows or shrinks the block.
267   ///
268   /// If successful, `block` may be merged with the block after it in order to
269   /// provide additional memory (when growing) or to merge released memory (when
270   /// shrinking). If unsuccessful, `block` will be unmodified.
271   ///
272   /// This method is static in order to consume and replace the given block
273   /// pointer with a pointer to the new, smaller block.
274   ///
275   /// @pre The block must be in use.
276   ///
277   /// @returns @rst
278   ///
279   /// .. pw-status-codes::
280   ///
281   ///    OK: The resize completed successfully.
282   ///
283   ///    FAILED_PRECONDITION: This block is not in use.
284   ///
285   ///    OUT_OF_RANGE: The requested size is greater than the available space.
286   ///
287   /// @endrst
288   static Status Resize(Block*& block, size_t new_inner_size);
289 
290   /// Attempts to split this block.
291   ///
292   /// If successful, the block will have an inner size of `new_inner_size`,
293   /// rounded up to a `kAlignment` boundary. The remaining space will be
294   /// returned as a new block.
295   ///
296   /// This method may fail if the remaining space is too small to hold a new
297   /// block. If this method fails for any reason, the original block is
298   /// unmodified.
299   ///
300   /// This method is static in order to consume and replace the given block
301   /// pointer with a pointer to the new, smaller block.
302   ///
303   /// TODO(b/326509341): Remove from the public API when FreeList is no longer
304   /// in use.
305   ///
306   /// @pre The block must not be in use.
307   ///
308   /// @returns @rst
309   ///
310   /// .. pw-status-codes::
311   ///
312   ///    OK: The split completed successfully.
313   ///
314   ///    FAILED_PRECONDITION: This block is in use and cannot be split.
315   ///
316   ///    OUT_OF_RANGE: The requested size for this block is greater
317   ///    than the current ``inner_size``.
318   ///
319   ///    RESOURCE_EXHAUSTED: The remaining space is too small to hold a
320   ///    new block.
321   ///
322   /// @endrst
323   static Result<Block*> Split(Block*& block, size_t new_inner_size);
324 
325   /// Merges this block with the one that comes after it.
326   ///
327   /// This method is static in order to consume and replace the given block
328   /// pointer with a pointer to the new, larger block.
329   ///
330   /// TODO(b/326509341): Remove from the public API when FreeList is no longer
331   /// in use.
332   ///
333   /// @pre The blocks must not be in use.
334   ///
335   /// @returns @rst
336   ///
337   /// .. pw-status-codes::
338   ///
339   ///    OK: The merge was successful.
340   ///
341   ///    OUT_OF_RANGE: The given block is the last block.
342   ///
343   ///    FAILED_PRECONDITION: One or more of the blocks is in use.
344   ///
345   /// @endrst
346   static Status MergeNext(Block*& block);
347 
348   /// Fetches the block immediately after this one.
349   ///
350   /// For performance, this always returns a block pointer, even if the returned
351   /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
352   ///
353   /// Typically, after calling `Init` callers may save a pointer past the end of
354   /// the list using `Next()`. This makes it easy to subsequently iterate over
355   /// the list:
356   /// @code{.cpp}
357   ///   auto result = Block<>::Init(byte_span);
358   ///   Block<>* begin = *result;
359   ///   Block<>* end = begin->Next();
360   ///   ...
361   ///   for (auto* block = begin; block != end; block = block->Next()) {
362   ///     // Do something which each block.
363   ///   }
364   /// @endcode
365   Block* Next() const;
366 
367   /// @copydoc `Next`.
NextBlock(const Block * block)368   static Block* NextBlock(const Block* block) {
369     return block == nullptr ? nullptr : block->Next();
370   }
371 
372   /// @returns The block immediately before this one, or a null pointer if this
373   /// is the first block.
374   Block* Prev() const;
375 
376   /// @copydoc `Prev`.
PrevBlock(const Block * block)377   static Block* PrevBlock(const Block* block) {
378     return block == nullptr ? nullptr : block->Prev();
379   }
380 
381   /// Returns the current alignment of a block.
Alignment()382   size_t Alignment() const { return Used() ? info_.alignment : 1; }
383 
384   /// Indicates whether the block is in use.
385   ///
386   /// @returns `true` if the block is in use or `false` if not.
Used()387   bool Used() const { return info_.used; }
388 
389   /// Indicates whether this block is the last block or not (i.e. whether
390   /// `Next()` points to a valid block or not). This is needed because
391   /// `Next()` points to the end of this block, whether there is a valid
392   /// block there or not.
393   ///
394   /// @returns `true` is this is the last block or `false` if not.
Last()395   bool Last() const { return info_.last; }
396 
397   /// Marks this block as in use.
MarkUsed()398   void MarkUsed() { info_.used = 1; }
399 
400   /// Marks this block as free.
MarkFree()401   void MarkFree() { info_.used = 0; }
402 
403   /// Marks this block as the last one in the chain.
MarkLast()404   void MarkLast() { info_.last = 1; }
405 
406   /// Clears the last bit from this block.
ClearLast()407   void ClearLast() { info_.last = 1; }
408 
409   /// Poisons the block's usable space.
410   ///
411   /// This method does nothing if `kCanPoison` is false, or if the block is in
412   /// use, or if `should_poison` is false. The decision to poison a block is
413   /// deferred to the allocator to allow for more nuanced strategies than simply
414   /// all or nothing. For example, an allocator may want to balance security and
415   /// performance by only poisoning every n-th free block.
416   ///
417   /// @param  should_poison   Indicates tha block should be poisoned, if
418   ///                         poisoning is enabled.
419   void Poison(bool should_poison = true);
420 
421   /// @brief Checks if a block is valid.
422   ///
423   /// @returns `true` if and only if the following conditions are met:
424   /// * The block is aligned.
425   /// * The prev/next fields match with the previous and next blocks.
426   /// * The poisoned bytes are not damaged (if poisoning is enabled).
IsValid()427   bool IsValid() const { return CheckStatus() == internal::kValid; }
428 
429   /// @brief Crashes with an informtaional message if a block is invalid.
430   ///
431   /// Does nothing if the block is valid.
432   void CrashIfInvalid() const;
433 
434  private:
435   static constexpr uint8_t kPoisonByte = 0xf7;
436 
437   /// Consumes the block and returns as a span of bytes.
438   static ByteSpan AsBytes(Block*&& block);
439 
440   /// Consumes the span of bytes and uses it to construct and return a block.
441   static Block* AsBlock(size_t prev_outer_size, ByteSpan bytes);
442 
443   Block(size_t prev_outer_size, size_t outer_size);
444 
445   /// Returns a `BlockStatus` that is either kValid or indicates the reason why
446   /// the block is invalid.
447   ///
448   /// If the block is invalid at multiple points, this function will only return
449   /// one of the reasons.
450   internal::BlockStatus CheckStatus() const;
451 
452   /// Consumes the given number of leading bytes of a block in order to align
453   /// its usable space.
454   ///
455   /// If the number of bytes needed to align the usable space is 0, i.e. it is
456   /// already aligned, this method does nothing. If the number of bytes is less
457   /// than the overhead for a block, the bytes are added to the end of the
458   /// preceding block, if there is one. Otherwise, if it is the first block or
459   /// the number of bytes is greater than the overhead for a block, a new block
460   /// is split from this one.
461   ///
462   /// This method is static in order to consume and replace the given block
463   /// pointer with a pointer to the new, smaller block.
464   ///
465   /// @pre The block must not be in use.
466   static void ShiftBlock(Block*& block, size_t pad_size);
467 
468   /// Like `Split`, but assumes the caller has already checked to parameters to
469   /// ensure the split will succeed.
470   static Block* SplitImpl(Block*& block, size_t new_inner_size);
471 
472   /// Returns true if the block is unpoisoned or if its usable space is
473   /// untouched; false otherwise.
474   bool CheckPoison() const;
475 
476   /// Offset (in increments of the minimum alignment) from this block to the
477   /// previous block. 0 if this is the first block.
478   offset_type prev_ = 0;
479 
480   /// Offset (in increments of the minimum alignment) from this block to the
481   /// next block. Valid even if this is the last block, since it equals the
482   /// size of the block.
483   offset_type next_ = 0;
484 
485   /// Information about the current state of the block:
486   /// * If the `used` flag is set, the block's usable memory has been allocated
487   ///   and is being used.
488   /// * If the `poisoned` flag is set and the `used` flag is clear, the block's
489   ///   usable memory contains a poison pattern that will be checked when the
490   ///   block is allocated.
491   /// * If the `last` flag is set, the block does not have a next block.
492   /// * If the `used` flag is set, the alignment represents the requested value
493   ///   when the memory was allocated, which may be less strict than the actual
494   ///   alignment.
495   struct {
496     uint16_t used : 1;
497     uint16_t poisoned : 1;
498     uint16_t last : 1;
499     uint16_t alignment : 13;
500   } info_;
501 
502   /// Number of bytes allocated beyond what was requested. This will be at most
503   /// the minimum alignment, i.e. `alignof(offset_type).`
504   uint16_t padding_ = 0;
505 
506  public:
507   // Associated types.
508 
509   /// Represents an iterator that moves forward through a list of blocks.
510   ///
511   /// This class is not typically instantiated directly, but rather using a
512   /// range-based for-loop using `Block::Range`.
513   class Iterator final {
514    public:
Iterator(Block * block)515     Iterator(Block* block) : block_(block) {}
516     Iterator& operator++() {
517       block_ = Block::NextBlock(block_);
518       return *this;
519     }
520     bool operator!=(const Iterator& other) { return block_ != other.block_; }
521     Block* operator*() { return block_; }
522 
523    private:
524     Block* block_;
525   };
526 
527   /// Represents an iterator that moves forward through a list of blocks.
528   ///
529   /// This class is not typically instantiated directly, but rather using a
530   /// range-based for-loop using `Block::ReverseRange`.
531   class ReverseIterator final {
532    public:
ReverseIterator(Block * block)533     ReverseIterator(Block* block) : block_(block) {}
534     ReverseIterator& operator++() {
535       block_ = Block::PrevBlock(block_);
536       return *this;
537     }
538     bool operator!=(const ReverseIterator& other) {
539       return block_ != other.block_;
540     }
541     Block* operator*() { return block_; }
542 
543    private:
544     Block* block_;
545   };
546 
547   /// Represents a range of blocks that can be iterated over.
548   ///
549   /// The typical usage of this class is in a range-based for-loop, e.g.
550   /// @code{.cpp}
551   ///   for (auto* block : Range(first, last)) { ... }
552   /// @endcode
553   class Range final {
554    public:
555     /// Constructs a range including `begin` and all valid following blocks.
Range(Block * begin)556     explicit Range(Block* begin) : begin_(begin), end_(nullptr) {}
557 
558     /// Constructs a range of blocks from `begin` to `end`, inclusively.
Range(Block * begin_inclusive,Block * end_inclusive)559     Range(Block* begin_inclusive, Block* end_inclusive)
560         : begin_(begin_inclusive), end_(end_inclusive->Next()) {}
561 
begin()562     Iterator& begin() { return begin_; }
end()563     Iterator& end() { return end_; }
564 
565    private:
566     Iterator begin_;
567     Iterator end_;
568   };
569 
570   /// Represents a range of blocks that can be iterated over in the reverse
571   /// direction.
572   ///
573   /// The typical usage of this class is in a range-based for-loop, e.g.
574   /// @code{.cpp}
575   ///   for (auto* block : ReverseRange(last, first)) { ... }
576   /// @endcode
577   class ReverseRange final {
578    public:
579     /// Constructs a range including `rbegin` and all valid preceding blocks.
ReverseRange(Block * rbegin)580     explicit ReverseRange(Block* rbegin) : begin_(rbegin), end_(nullptr) {}
581 
582     /// Constructs a range of blocks from `rbegin` to `rend`, inclusively.
ReverseRange(Block * rbegin_inclusive,Block * rend_inclusive)583     ReverseRange(Block* rbegin_inclusive, Block* rend_inclusive)
584         : begin_(rbegin_inclusive), end_(rend_inclusive->Prev()) {}
585 
begin()586     ReverseIterator& begin() { return begin_; }
end()587     ReverseIterator& end() { return end_; }
588 
589    private:
590     ReverseIterator begin_;
591     ReverseIterator end_;
592   };
593 } __attribute__((packed, aligned(kAlign)));
594 
595 // Public template method implementations.
596 
597 template <typename OffsetType, size_t kAlign, bool kCanPoison>
598 Result<Block<OffsetType, kAlign, kCanPoison>*>
Init(ByteSpan region)599 Block<OffsetType, kAlign, kCanPoison>::Init(ByteSpan region) {
600   Result<ByteSpan> result = GetAlignedSubspan(region, kAlignment);
601   if (!result.ok()) {
602     return result.status();
603   }
604   region = result.value();
605   if (region.size() < kBlockOverhead) {
606     return Status::ResourceExhausted();
607   }
608   if (std::numeric_limits<OffsetType>::max() < region.size() / kAlignment) {
609     return Status::OutOfRange();
610   }
611   Block* block = AsBlock(0, region);
612   block->MarkLast();
613   return block;
614 }
615 
616 template <typename OffsetType, size_t kAlign, bool kCanPoison>
AllocFirst(Block * & block,Layout layout)617 Status Block<OffsetType, kAlign, kCanPoison>::AllocFirst(Block*& block,
618                                                          Layout layout) {
619   if (block == nullptr) {
620     return Status::InvalidArgument();
621   }
622   block->CrashIfInvalid();
623   if (block->Used()) {
624     return Status::FailedPrecondition();
625   }
626   Block* prev = block->Prev();
627 
628   // Check if padding will be needed at the front to align the usable space.
629   size_t alignment = std::max(layout.alignment(), kAlignment);
630   auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
631   size_t pad_size = AlignUp(addr, alignment) - addr;
632   bool should_poison = block->info_.poisoned;
633   if (pad_size == 0) {
634     // No padding needed.
635   } else if (pad_size > kBlockOverhead) {
636     // Enough padding is needed that it can be split off as a new free block.
637   } else if (prev == nullptr) {
638     // First block; increase padding to at least the minimum block size.
639     pad_size += AlignUp(kBlockOverhead, alignment);
640   }
641 
642   // Make sure everything fits.
643   size_t inner_size = AlignUp(layout.size(), kAlignment);
644   if (block->InnerSize() < pad_size + inner_size) {
645     return Status::OutOfRange();
646   }
647   ShiftBlock(block, pad_size);
648 
649   // If the block is large enough to have a trailing block, split it to get the
650   // requested usable space.
651   if (inner_size + kBlockOverhead <= block->InnerSize()) {
652     Block* trailing = SplitImpl(block, inner_size);
653     trailing->Poison(should_poison);
654   }
655 
656   block->MarkUsed();
657   block->info_.alignment = alignment;
658   block->padding_ = block->InnerSize() - layout.size();
659   return OkStatus();
660 }
661 
662 template <typename OffsetType, size_t kAlign, bool kCanPoison>
CanAllocLast(Layout layout)663 StatusWithSize Block<OffsetType, kAlign, kCanPoison>::CanAllocLast(
664     Layout layout) const {
665   if (Used()) {
666     return StatusWithSize::FailedPrecondition();
667   }
668   CrashIfInvalid();
669   // Find the last address that is aligned and is followed by enough space for
670   // block overhead and the requested size.
671   if (InnerSize() < layout.size()) {
672     return StatusWithSize::OutOfRange();
673   }
674   auto addr = reinterpret_cast<uintptr_t>(UsableSpace());
675   size_t alignment = std::max(layout.alignment(), kAlignment);
676   uintptr_t next = AlignDown(addr + (InnerSize() - layout.size()), alignment);
677   if (next < addr) {
678     // Requested size does not fit.
679     return StatusWithSize::ResourceExhausted();
680   }
681   return StatusWithSize(next - addr);
682 }
683 
684 template <typename OffsetType, size_t kAlign, bool kCanPoison>
AllocLast(Block * & block,Layout layout)685 Status Block<OffsetType, kAlign, kCanPoison>::AllocLast(Block*& block,
686                                                         Layout layout) {
687   if (block == nullptr) {
688     return Status::InvalidArgument();
689   }
690   size_t pad_size = 0;
691   PW_TRY_ASSIGN(pad_size, block->CanAllocLast(layout));
692   ShiftBlock(block, pad_size);
693 
694   block->MarkUsed();
695   block->info_.alignment = layout.alignment();
696   block->padding_ = block->InnerSize() - layout.size();
697   return OkStatus();
698 }
699 
700 template <typename OffsetType, size_t kAlign, bool kCanPoison>
ShiftBlock(Block * & block,size_t pad_size)701 void Block<OffsetType, kAlign, kCanPoison>::ShiftBlock(Block*& block,
702                                                        size_t pad_size) {
703   if (pad_size == 0) {
704     return;
705   }
706 
707   // Check if this is the first block.
708   Block* prev = block->Prev();
709   if (prev == nullptr && pad_size <= kBlockOverhead) {
710     pad_size += kBlockOverhead;
711   }
712 
713   bool should_poison = block->info_.poisoned;
714   if (pad_size <= kBlockOverhead) {
715     // The small amount of padding can be appended to the previous block.
716     Block::Resize(prev, prev->InnerSize() + pad_size).IgnoreError();
717     prev->padding_ += pad_size;
718     block = prev->Next();
719 
720   } else if (kBlockOverhead < pad_size) {
721     // Split the large padding off the front.
722     Block* leading = block;
723     block = SplitImpl(leading, pad_size - kBlockOverhead);
724     leading->Poison(should_poison);
725   }
726 }
727 
728 template <typename OffsetType, size_t kAlign, bool kCanPoison>
Free(Block * & block)729 void Block<OffsetType, kAlign, kCanPoison>::Free(Block*& block) {
730   if (block == nullptr) {
731     return;
732   }
733   block->MarkFree();
734   Block* prev = block->Prev();
735   if (MergeNext(prev).ok()) {
736     block = prev;
737   }
738   MergeNext(block).IgnoreError();
739 }
740 
741 template <typename OffsetType, size_t kAlign, bool kCanPoison>
Resize(Block * & block,size_t new_inner_size)742 Status Block<OffsetType, kAlign, kCanPoison>::Resize(Block*& block,
743                                                      size_t new_inner_size) {
744   if (block == nullptr) {
745     return Status::InvalidArgument();
746   }
747   if (!block->Used()) {
748     return Status::FailedPrecondition();
749   }
750   size_t requested_inner_size = new_inner_size;
751   size_t old_inner_size = block->InnerSize();
752   size_t alignment = block->Alignment();
753   uint16_t padding = block->padding_;
754   new_inner_size = AlignUp(new_inner_size, kAlignment);
755 
756   if (old_inner_size == new_inner_size) {
757     return OkStatus();
758   }
759 
760   // Treat the block as free and try to combine it with the next block. At most
761   // one free block is expected to follow this block.
762   block->MarkFree();
763   MergeNext(block).IgnoreError();
764 
765   Status status = OkStatus();
766 
767   if (block->InnerSize() < new_inner_size) {
768     // The merged block is too small for the resized block.
769     status = Status::OutOfRange();
770 
771   } else if (new_inner_size + kBlockOverhead <= block->InnerSize()) {
772     // There is enough room after the resized block for another trailing block.
773     bool should_poison = block->info_.poisoned;
774     Block* trailing = SplitImpl(block, new_inner_size);
775     trailing->Poison(should_poison);
776   }
777 
778   if (status.ok()) {
779     padding = block->InnerSize() - requested_inner_size;
780   } else if (block->InnerSize() != old_inner_size) {
781     // Restore the original block on failure.
782     SplitImpl(block, old_inner_size);
783   }
784   block->MarkUsed();
785   block->info_.alignment = alignment;
786   block->padding_ = padding;
787   return status;
788 }
789 
790 template <typename OffsetType, size_t kAlign, bool kCanPoison>
791 Result<Block<OffsetType, kAlign, kCanPoison>*>
Split(Block * & block,size_t new_inner_size)792 Block<OffsetType, kAlign, kCanPoison>::Split(Block*& block,
793                                              size_t new_inner_size) {
794   if (block == nullptr) {
795     return Status::InvalidArgument();
796   }
797   if (block->Used()) {
798     return Status::FailedPrecondition();
799   }
800   size_t old_inner_size = block->InnerSize();
801   new_inner_size = AlignUp(new_inner_size, kAlignment);
802   if (old_inner_size < new_inner_size) {
803     return Status::OutOfRange();
804   }
805   if (old_inner_size - new_inner_size < kBlockOverhead) {
806     return Status::ResourceExhausted();
807   }
808   return SplitImpl(block, new_inner_size);
809 }
810 
811 template <typename OffsetType, size_t kAlign, bool kCanPoison>
812 Block<OffsetType, kAlign, kCanPoison>*
SplitImpl(Block * & block,size_t new_inner_size)813 Block<OffsetType, kAlign, kCanPoison>::SplitImpl(Block*& block,
814                                                  size_t new_inner_size) {
815   size_t prev_outer_size = block->prev_ * kAlignment;
816   size_t outer_size1 = new_inner_size + kBlockOverhead;
817   bool is_last = block->Last();
818   ByteSpan bytes = AsBytes(std::move(block));
819   Block* block1 = AsBlock(prev_outer_size, bytes.subspan(0, outer_size1));
820   Block* block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
821   if (is_last) {
822     block2->MarkLast();
823   } else {
824     block2->Next()->prev_ = block2->next_;
825   }
826   block = std::move(block1);
827   return block2;
828 }
829 
830 template <typename OffsetType, size_t kAlign, bool kCanPoison>
MergeNext(Block * & block)831 Status Block<OffsetType, kAlign, kCanPoison>::MergeNext(Block*& block) {
832   if (block == nullptr) {
833     return Status::InvalidArgument();
834   }
835   if (block->Last()) {
836     return Status::OutOfRange();
837   }
838   Block* next = block->Next();
839   if (block->Used() || next->Used()) {
840     return Status::FailedPrecondition();
841   }
842   size_t prev_outer_size = block->prev_ * kAlignment;
843   bool is_last = next->Last();
844   ByteSpan prev_bytes = AsBytes(std::move(block));
845   ByteSpan next_bytes = AsBytes(std::move(next));
846   size_t outer_size = prev_bytes.size() + next_bytes.size();
847   std::byte* merged = ::new (prev_bytes.data()) std::byte[outer_size];
848   block = AsBlock(prev_outer_size, ByteSpan(merged, outer_size));
849   if (is_last) {
850     block->MarkLast();
851   } else {
852     block->Next()->prev_ = block->next_;
853   }
854   return OkStatus();
855 }
856 
857 template <typename OffsetType, size_t kAlign, bool kCanPoison>
858 Block<OffsetType, kAlign, kCanPoison>*
Next()859 Block<OffsetType, kAlign, kCanPoison>::Next() const {
860   uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + OuterSize();
861   // See the note in `FromUsableSpace` about memory laundering.
862   return std::launder(reinterpret_cast<Block*>(addr));
863 }
864 
865 template <typename OffsetType, size_t kAlign, bool kCanPoison>
866 Block<OffsetType, kAlign, kCanPoison>*
Prev()867 Block<OffsetType, kAlign, kCanPoison>::Prev() const {
868   uintptr_t addr =
869       (prev_ == 0) ? 0
870                    : reinterpret_cast<uintptr_t>(this) - (prev_ * kAlignment);
871   // See the note in `FromUsableSpace` about memory laundering.
872   return std::launder(reinterpret_cast<Block*>(addr));
873 }
874 
875 // Private template method implementations.
876 
877 template <typename OffsetType, size_t kAlign, bool kCanPoison>
Block(size_t prev_outer_size,size_t outer_size)878 Block<OffsetType, kAlign, kCanPoison>::Block(size_t prev_outer_size,
879                                              size_t outer_size) {
880   prev_ = prev_outer_size / kAlignment;
881   next_ = outer_size / kAlignment;
882   info_.used = 0;
883   info_.poisoned = 0;
884   info_.last = 0;
885   info_.alignment = kAlignment;
886 }
887 
888 template <typename OffsetType, size_t kAlign, bool kCanPoison>
AsBytes(Block * && block)889 ByteSpan Block<OffsetType, kAlign, kCanPoison>::AsBytes(Block*&& block) {
890   size_t block_size = block->OuterSize();
891   std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
892   return {bytes, block_size};
893 }
894 
895 template <typename OffsetType, size_t kAlign, bool kCanPoison>
896 Block<OffsetType, kAlign, kCanPoison>*
AsBlock(size_t prev_outer_size,ByteSpan bytes)897 Block<OffsetType, kAlign, kCanPoison>::AsBlock(size_t prev_outer_size,
898                                                ByteSpan bytes) {
899   return ::new (bytes.data()) Block(prev_outer_size, bytes.size());
900 }
901 
902 template <typename OffsetType, size_t kAlign, bool kCanPoison>
Poison(bool should_poison)903 void Block<OffsetType, kAlign, kCanPoison>::Poison(bool should_poison) {
904   if constexpr (kCanPoison) {
905     if (!Used() && should_poison) {
906       std::memset(UsableSpace(), kPoisonByte, InnerSize());
907       info_.poisoned = true;
908     }
909   }
910 }
911 
912 template <typename OffsetType, size_t kAlign, bool kCanPoison>
CheckPoison()913 bool Block<OffsetType, kAlign, kCanPoison>::CheckPoison() const {
914   if constexpr (kCanPoison) {
915     if (!info_.poisoned) {
916       return true;
917     }
918     const std::byte* begin = UsableSpace();
919     return std::all_of(begin, begin + InnerSize(), [](std::byte b) {
920       return static_cast<uint8_t>(b) == kPoisonByte;
921     });
922   } else {
923     return true;
924   }
925 }
926 
927 template <typename OffsetType, size_t kAlign, bool kCanPoison>
CheckStatus()928 internal::BlockStatus Block<OffsetType, kAlign, kCanPoison>::CheckStatus()
929     const {
930   if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
931     return internal::kMisaligned;
932   }
933   if (!Last() && (this >= Next() || this != Next()->Prev())) {
934     return internal::kNextMismatched;
935   }
936   if (Prev() && (this <= Prev() || this != Prev()->Next())) {
937     return internal::kPrevMismatched;
938   }
939   if (!Used() && !CheckPoison()) {
940     return internal::kPoisonCorrupted;
941   }
942   return internal::kValid;
943 }
944 
945 template <typename OffsetType, size_t kAlign, bool kCanPoison>
CrashIfInvalid()946 void Block<OffsetType, kAlign, kCanPoison>::CrashIfInvalid() const {
947   uintptr_t addr = reinterpret_cast<uintptr_t>(this);
948   switch (CheckStatus()) {
949     case internal::kValid:
950       break;
951     case internal::kMisaligned:
952       internal::CrashMisaligned(addr);
953       break;
954     case internal::kNextMismatched:
955       internal::CrashNextMismatched(
956           addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
957       break;
958     case internal::kPrevMismatched:
959       internal::CrashPrevMismatched(
960           addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
961       break;
962     case internal::kPoisonCorrupted:
963       internal::CrashPoisonCorrupted(addr);
964       break;
965   }
966 }
967 
968 }  // namespace pw::allocator
969