1 // Copyright 2020 The Pigweed Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not 4 // use this file except in compliance with the License. You may obtain a copy of 5 // the License at 6 // 7 // https://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 // License for the specific language governing permissions and limitations under 13 // the License. 14 15 // WARNING: This code is a experimental WIP & exploration only, and is far from 16 // usable. 17 #pragma once 18 19 #include <span> 20 21 #include "pw_status/status.h" 22 23 namespace pw::allocator { 24 25 #if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE 26 // Add poison offset of sizeof(void*) bytes before and after usable space in all 27 // Blocks. 28 #define PW_ALLOCATOR_POISON_OFFSET sizeof(void*) 29 #else 30 // Set the poison offset to 0 bytes; will not add poisson space before and 31 // after usable space in all Blocks. 32 #define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0) 33 #endif // PW_ALLOCATOR_POISON_ENABLE 34 35 // The "Block" type is intended to be a building block component for 36 // allocators. In the this design, there is an explicit pointer to next and 37 // prev from the block header; the size is not encoded. The below diagram shows 38 // what this would look like for two blocks. 39 // 40 // .------+---------------------------------.----------------------------- 41 // | Block A (first) | Block B (second) 42 // 43 // +------+------+--------------------------+------+------+--------------- 44 // | Next | Prev | usable space | Next | Prev | usable space.. 45 // +------+------+--------------------------+------+--+---+--------------- 46 // ^ | ^ | 47 // | '-------------------------------------' | 48 // | | 49 // '----------- Block B's prev points to Block A -----' 50 // 51 // One use for these blocks is to use them as allocations, where each block 52 // represents an allocation handed out by malloc(). These blocks could also be 53 // used as part of a slab or buddy allocator. 54 // 55 // Each block also contains flags for whether it is the last block (i.e. whether 56 // the "next" pointer points to a valid block, or just denotes the end of this 57 // block), and whether the block is in use. These are encoded into the last two 58 // bits of the "next" pointer, as follows: 59 // 60 // .-----------------------------------------------------------------------. 61 // | Block | 62 // +-----------------------------------------------------------------------+ 63 // | Next | Prev | usable space | 64 // +----------------+------+------+ + | 65 // | Ptr[N..2] | Last | Used | | | 66 // +----------------+------+------+------+---------------------------------+ 67 // ^ 68 // | 69 // '----------- Next() = Next & ~0x3 ---------------------------------> 70 // 71 // The first block in a chain is denoted by a nullptr "prev" field, and the last 72 // block is denoted by the "Last" bit being set. 73 // 74 // Note, This block class requires that the given block is aligned to a 75 // alignof(Block*) boundary. Because of this alignment requirement, each 76 // returned block will also be aligned to a alignof(Block*) boundary, and the 77 // size will always be rounded up to a multiple of alignof(Block*). 78 // 79 // This class must be constructed using the static Init call. 80 class Block final { 81 public: 82 // No copy/move 83 Block(const Block& other) = delete; 84 Block& operator=(const Block& other) = delete; 85 Block(Block&& other) = delete; 86 Block& operator=(Block&& other) = delete; 87 88 // Create the first block for a given memory region. 89 // Note that the start of the given memory region must be aligned to an 90 // alignof(Block) boundary. 91 // Returns: 92 // INVALID_ARGUMENT if the given region is unaligned to too small, or 93 // OK otherwise. 94 static Status Init(const std::span<std::byte> region, Block** block); 95 96 // Returns a pointer to a Block, given a pointer to the start of the usable 97 // space inside the block (i.e. the opposite operation to UsableSpace()). In 98 // reality, this method just subtracts the appropriate amount from 99 // usable_space to point to the start of the owning block. 100 // 101 // Be aware that this method does not do any checking; passing a random 102 // pointer will return a non-null pointer. FromUsableSpace(std::byte * usable_space)103 static Block* FromUsableSpace(std::byte* usable_space) { 104 return reinterpret_cast<Block*>(usable_space - sizeof(Block) - 105 PW_ALLOCATOR_POISON_OFFSET); 106 } 107 108 // Size including the header. OuterSize()109 size_t OuterSize() const { 110 return reinterpret_cast<intptr_t>(Next()) - 111 reinterpret_cast<intptr_t>(this); 112 } 113 114 // Usable bytes inside the block. InnerSize()115 size_t InnerSize() const { 116 return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET; 117 } 118 119 // Return the usable space inside this block. UsableSpace()120 std::byte* UsableSpace() { 121 return reinterpret_cast<std::byte*>(this) + sizeof(*this) + 122 PW_ALLOCATOR_POISON_OFFSET; 123 } 124 125 // Split this block, such that this block has an inner size of 126 // `head_block_inner_size`, and return a new block in the remainder of the 127 // space in `new_block`. 128 // 129 // The "remainder" block will be aligned to a alignof(Block*) boundary (and 130 // `head_block_inner_size` will be rounded up). If the remaining space is not 131 // large enough to store a new `Block` after rounding, no splitting will 132 // occur. 133 // 134 // This may return the following: 135 // OK: The split completed successfully. 136 // INVALID_ARGUMENT: new_block is null 137 // FAILED_PRECONDITION: This block is in use and cannot be split. 138 // OUT_OF_RANGE: The requested size for "this" block is greater than the 139 // current inner_size. 140 // RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block 141 // would not be large enough to store a block header. 142 Status Split(size_t head_block_inner_size, Block** new_block); 143 144 // Merge this block with the one that comes after it. 145 // This function will not merge blocks if either are in use. 146 // 147 // This may return the following: 148 // OK: Merge was successful. 149 // OUT_OF_RANGE: Attempting to merge the "last" block. 150 // FAILED_PRECONDITION: The blocks could not be merged because one of them 151 // was in use. 152 Status MergeNext(); 153 154 // Merge this block with the one that comes before it. 155 // This function will not merge blocks if either are in use. 156 // 157 // Warning: merging with a previous block will invalidate this block instance. 158 // do not perform any operations on this instance after merging. 159 // 160 // This may return the following: 161 // OK: Merge was successful. 162 // OUT_OF_RANGE: Attempting to merge the "first" block. 163 // FAILED_PRECONDITION: The blocks could not be merged because one of them 164 // was in use. 165 Status MergePrev(); 166 167 // Returns whether this block is in-use or not Used()168 bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; } 169 170 // Returns whether this block is the last block or 171 // not (i.e. whether NextBlock points to a valid block or not). 172 // This is needed because NextBlock points to the end of this block, 173 // whether there is a valid block there or not. Last()174 bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; } 175 176 // Mark this block as in-use MarkUsed()177 void MarkUsed() { 178 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag)); 179 } 180 181 // Mark this block as free MarkFree()182 void MarkFree() { 183 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag)); 184 } 185 186 // Mark this block as the last one in the chain. MarkLast()187 void MarkLast() { 188 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag)); 189 } 190 191 // Clear the "last" bit from this block. ClearLast()192 void ClearLast() { 193 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag)); 194 } 195 196 // Fetch the block immediately after this one. 197 // Note: you should also check Last(); this function may return a valid 198 // block, even if one does not exist. Next()199 Block* Next() const { 200 return reinterpret_cast<Block*>( 201 (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag))); 202 } 203 204 // Return the block immediately before this one. This will return nullptr 205 // if this is the "first" block. Prev()206 Block* Prev() const { return prev_; } 207 208 // Return true if the block is aligned, the prev/next field matches with the 209 // previous and next block, and the poisoned bytes is not damaged. Otherwise, 210 // return false to indicate this block is corrupted. IsValid()211 bool IsValid() const { return CheckStatus() == BlockStatus::VALID; } 212 213 // Uses PW_DCHECK to log information about the reason if a block is invalid. 214 // This function will do nothing if the block is valid. 215 void CrashIfInvalid(); 216 217 private: 218 static constexpr uintptr_t kInUseFlag = 0x1; 219 static constexpr uintptr_t kLastFlag = 0x2; 220 static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92}, 221 std::byte{0x88}, 222 std::byte{0x0a}, 223 std::byte{0x00}, 224 std::byte{0xec}, 225 std::byte{0xdc}, 226 std::byte{0xae}, 227 std::byte{0x4e}}; 228 enum BlockStatus { 229 VALID, 230 MISALIGNED, 231 PREV_MISMATCHED, 232 NEXT_MISMATCHED, 233 POISON_CORRUPTED 234 }; 235 236 Block() = default; 237 238 // Helper to reduce some of the casting nesting in the block management 239 // functions. NextAsUIntPtr()240 uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); } 241 242 void PoisonBlock(); 243 bool CheckPoisonBytes() const; 244 BlockStatus CheckStatus() const; 245 246 // Note: Consider instead making these next/prev offsets from the current 247 // block, with templated type for the offset size. There are some interesting 248 // tradeoffs here; perhaps a pool of small allocations could use 1-byte 249 // next/prev offsets to reduce size further. 250 Block* next_; 251 Block* prev_; 252 }; 253 254 } // namespace pw::allocator 255