• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 // WARNING: This code is a experimental WIP & exploration only, and is far from
16 // usable.
17 #pragma once
18 
19 #include <span>
20 
21 #include "pw_assert/assert.h"
22 #include "pw_status/status.h"
23 
24 namespace pw::allocator {
25 
26 #if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
27 // Add poison offset of sizeof(void*) bytes before and after usable space in all
28 // Blocks.
29 #define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
30 #else
31 // Set the poison offset to 0 bytes; will not add poisson space before and
32 // after usable space in all Blocks.
33 #define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0)
34 #endif  // PW_ALLOCATOR_POISON_ENABLE
35 
36 // The "Block" type is intended to be a building block component for
37 // allocators. In the this design, there is an explicit pointer to next and
38 // prev from the block header; the size is not encoded. The below diagram shows
39 // what this would look like for two blocks.
40 //
41 //   .------+---------------------------------.-----------------------------
42 //   |            Block A (first)             |       Block B (second)
43 //
44 //   +------+------+--------------------------+------+------+---------------
45 //   | Next | Prev |   usable space           | Next | Prev | usable space..
46 //   +------+------+--------------------------+------+--+---+---------------
47 //   ^  |                                     ^         |
48 //   |  '-------------------------------------'         |
49 //   |                                                  |
50 //   '----------- Block B's prev points to Block A -----'
51 //
52 // One use for these blocks is to use them as allocations, where each block
53 // represents an allocation handed out by malloc(). These blocks could also be
54 // used as part of a slab or buddy allocator.
55 //
56 // Each block also contains flags for whether it is the last block (i.e. whether
57 // the "next" pointer points to a valid block, or just denotes the end of this
58 // block), and whether the block is in use. These are encoded into the last two
59 // bits of the "next" pointer, as follows:
60 //
61 //  .-----------------------------------------------------------------------.
62 //  |                            Block                                      |
63 //  +-----------------------------------------------------------------------+
64 //  |              Next            | Prev |         usable space            |
65 //  +----------------+------+------+      +                                 |
66 //  |   Ptr[N..2]    | Last | Used |      |                                 |
67 //  +----------------+------+------+------+---------------------------------+
68 //  ^
69 //  |
70 //  '----------- Next() = Next & ~0x3 --------------------------------->
71 //
72 // The first block in a chain is denoted by a nullptr "prev" field, and the last
73 // block is denoted by the "Last" bit being set.
74 //
75 // Note, This block class requires that the given block is aligned to a
76 // alignof(Block*) boundary. Because of this alignment requirement, each
77 // returned block will also be aligned to a alignof(Block*) boundary, and the
78 // size will always be rounded up to a multiple of alignof(Block*).
79 //
80 // This class must be constructed using the static Init call.
81 class Block final {
82  public:
83   // No copy/move
84   Block(const Block& other) = delete;
85   Block& operator=(const Block& other) = delete;
86   Block(Block&& other) = delete;
87   Block& operator=(Block&& other) = delete;
88 
89   // Create the first block for a given memory region.
90   // Note that the start of the given memory region must be aligned to an
91   // alignof(Block) boundary.
92   // Returns:
93   //   INVALID_ARGUMENT if the given region is unaligned to too small, or
94   //   OK otherwise.
95   static Status Init(const std::span<std::byte> region, Block** block);
96 
97   // Returns a pointer to a Block, given a pointer to the start of the usable
98   // space inside the block (i.e. the opposite operation to UsableSpace()). In
99   // reality, this method just subtracts the appropriate amount from
100   // usable_space to point to the start of the owning block.
101   //
102   // Be aware that this method does not do any sanity checking; passing a random
103   // pointer will return a non-null pointer.
FromUsableSpace(std::byte * usable_space)104   static Block* FromUsableSpace(std::byte* usable_space) {
105     return reinterpret_cast<Block*>(usable_space - sizeof(Block) -
106                                     PW_ALLOCATOR_POISON_OFFSET);
107   }
108 
109   // Size including the header.
OuterSize()110   size_t OuterSize() const {
111     return reinterpret_cast<intptr_t>(Next()) -
112            reinterpret_cast<intptr_t>(this);
113   }
114 
115   // Usable bytes inside the block.
InnerSize()116   size_t InnerSize() const {
117     return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET;
118   }
119 
120   // Return the usable space inside this block.
UsableSpace()121   std::byte* UsableSpace() {
122     return reinterpret_cast<std::byte*>(this) + sizeof(*this) +
123            PW_ALLOCATOR_POISON_OFFSET;
124   }
125 
126   // Split this block, such that this block has an inner size of
127   // `head_block_inner_size`, and return a new block in the remainder of the
128   // space in `new_block`.
129   //
130   // The "remainder" block will be aligned to a alignof(Block*) boundary (and
131   // `head_block_inner_size` will be rounded up). If the remaining space is not
132   // large enough to store a new `Block` after rounding, no splitting will
133   // occur.
134   //
135   // This may return the following:
136   //   OK: The split completed successfully.
137   //   INVALID_ARGUMENT: new_block is null
138   //   FAILED_PRECONDITION: This block is in use and cannot be split.
139   //   OUT_OF_RANGE: The requested size for "this" block is greater than the
140   //                 current inner_size.
141   //   RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block
142   //                       would not be large enough to store a block header.
143   Status Split(size_t head_block_inner_size, Block** new_block);
144 
145   // Merge this block with the one that comes after it.
146   // This function will not merge blocks if either are in use.
147   //
148   // This may return the following:
149   //   OK: Merge was successful.
150   //   OUT_OF_RANGE: Attempting to merge the "last" block.
151   //   FAILED_PRECONDITION: The blocks could not be merged because one of them
152   //                        was in use.
153   Status MergeNext();
154 
155   // Merge this block with the one that comes before it.
156   // This function will not merge blocks if either are in use.
157   //
158   // Warning: merging with a previous block will invalidate this block instance.
159   // do not perform any operations on this instance after merging.
160   //
161   // This may return the following:
162   //   OK: Merge was successful.
163   //   OUT_OF_RANGE: Attempting to merge the "first" block.
164   //   FAILED_PRECONDITION: The blocks could not be merged because one of them
165   //                        was in use.
166   Status MergePrev();
167 
168   // Returns whether this block is in-use or not
Used()169   bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
170 
171   // Returns whether this block is the last block or
172   // not (i.e. whether NextBlock points to a valid block or not).
173   // This is needed because NextBlock points to the end of this block,
174   // whether there is a valid block there or not.
Last()175   bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
176 
177   // Mark this block as in-use
MarkUsed()178   void MarkUsed() {
179     next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
180   }
181 
182   // Mark this block as free
MarkFree()183   void MarkFree() {
184     next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
185   }
186 
187   // Mark this block as the last one in the chain.
MarkLast()188   void MarkLast() {
189     next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
190   }
191 
192   // Clear the "last" bit from this block.
ClearLast()193   void ClearLast() {
194     next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
195   }
196 
197   // Fetch the block immediately after this one.
198   // Note: you should also check Last(); this function may return a valid
199   // block, even if one does not exist.
Next()200   Block* Next() const {
201     return reinterpret_cast<Block*>(
202         (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
203   }
204 
205   // Return the block immediately before this one. This will return nullptr
206   // if this is the "first" block.
Prev()207   Block* Prev() const { return prev_; }
208 
209   // Return true if the block is aligned, the prev/next field matches with the
210   // previous and next block, and the poisoned bytes is not damaged. Otherwise,
211   // return false to indicate this block is corrupted.
IsValid()212   bool IsValid() const { return CheckStatus() == BlockStatus::VALID; }
213 
214   // Uses PW_DCHECK to log information about the reason if a blcok is invalid.
215   // This function will do nothing if the block is valid.
216   void CrashIfInvalid();
217 
218  private:
219   static constexpr uintptr_t kInUseFlag = 0x1;
220   static constexpr uintptr_t kLastFlag = 0x2;
221   static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92},
222                                                   std::byte{0x88},
223                                                   std::byte{0x0a},
224                                                   std::byte{0x00},
225                                                   std::byte{0xec},
226                                                   std::byte{0xdc},
227                                                   std::byte{0xae},
228                                                   std::byte{0x4e}};
229   enum BlockStatus {
230     VALID,
231     MISALIGNED,
232     PREV_MISMATCHED,
233     NEXT_MISMATCHED,
234     POISON_CORRUPTED
235   };
236 
237   Block() = default;
238 
239   // Helper to reduce some of the casting nesting in the block management
240   // functions.
NextAsUIntPtr()241   uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
242 
243   void PoisonBlock();
244   bool CheckPoisonBytes() const;
245   BlockStatus CheckStatus() const;
246 
247   // Note: Consider instead making these next/prev offsets from the current
248   // block, with templated type for the offset size. There are some interesting
249   // tradeoffs here; perhaps a pool of small allocations could use 1-byte
250   // next/prev offsets to reduce size further.
251   Block* next_;
252   Block* prev_;
253 };
254 
255 }  // namespace pw::allocator
256