1 // Copyright 2020 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #ifndef COMMON_SLABALLOCATOR_H_ 16 #define COMMON_SLABALLOCATOR_H_ 17 18 #include "common/PlacementAllocated.h" 19 20 #include <cstdint> 21 #include <type_traits> 22 #include <utility> 23 24 // The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory. 25 // This makes it very quick to allocate and deallocate fixed-size objects because the allocator only 26 // needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that 27 // recycles memory from previous allocations, except multiple allocations are hosted contiguously in 28 // one large slab. 29 // 30 // Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing 31 // into an std::vector. To service an allocation request, the allocator only needs to know the first 32 // currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED. 33 // A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no 34 // remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is 35 // moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that 36 // deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab 37 // servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED 38 // list. 39 // 40 // Allocated objects are placement-allocated with some extra info at the end (we'll call the Object 41 // plus the extra bytes a "block") used to specify the constant index of the block in its parent 42 // slab, as well as the index of the next available block. So, following the block next-indices 43 // forms a linked list of free blocks. 44 // 45 // Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the 46 // slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and 47 // next-indices are initialized to form the free-list of blocks. 48 // 49 // Allocation: When an object is allocated, if there is no space available in an existing slab, a 50 // new slab is created (or an old slab is recycled). The first block of the slab is removed and 51 // returned. 52 // 53 // Deallocation: When an object is deallocated, it can compute the pointer to its parent slab 54 // because it stores the index of its own allocation. That block is then prepended to the slab's 55 // free list. 56 class SlabAllocatorImpl { 57 public: 58 // Allocations host their current index and the index of the next free block. 59 // Because this is an index, and not a byte offset, it can be much smaller than a size_t. 60 // TODO(crbug.com/dawn/825): Is uint8_t sufficient? 61 using Index = uint16_t; 62 63 SlabAllocatorImpl(SlabAllocatorImpl&& rhs); 64 65 protected: 66 // This is essentially a singly linked list using indices instead of pointers, 67 // so we store the index of "this" in |this->index|. 68 struct IndexLinkNode : PlacementAllocated { 69 IndexLinkNode(Index index, Index nextIndex); 70 71 const Index index; // The index of this block in the slab. 72 Index nextIndex; // The index of the next available block. kInvalidIndex, if none. 73 }; 74 75 struct Slab : PlacementAllocated { 76 // A slab is placement-allocated into an aligned pointer from a separate allocation. 77 // Ownership of the allocation is transferred to the slab on creation. 78 // | ---------- allocation --------- | 79 // | pad | Slab | data ------------> | 80 Slab(char allocation[], IndexLinkNode* head); 81 Slab(Slab&& rhs); 82 83 void Splice(); 84 85 char* allocation; 86 IndexLinkNode* freeList; 87 Slab* prev; 88 Slab* next; 89 Index blocksInUse; 90 }; 91 92 SlabAllocatorImpl(Index blocksPerSlab, uint32_t objectSize, uint32_t objectAlignment); 93 ~SlabAllocatorImpl(); 94 95 // Allocate a new block of memory. 96 void* Allocate(); 97 98 // Deallocate a block of memory. 99 void Deallocate(void* ptr); 100 101 private: 102 // The maximum value is reserved to indicate the end of the list. 103 static Index kInvalidIndex; 104 105 // Get the IndexLinkNode |offset| slots away. 106 IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const; 107 108 // Compute the pointer to the IndexLinkNode from an allocated object. 109 IndexLinkNode* NodeFromObject(void* object) const; 110 111 // Compute the pointer to the object from an IndexLinkNode. 112 void* ObjectFromNode(IndexLinkNode* node) const; 113 114 bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const; 115 116 // The Slab stores a linked-list of free allocations. 117 // PushFront/PopFront adds/removes an allocation from the free list. 118 void PushFront(Slab* slab, IndexLinkNode* node) const; 119 IndexLinkNode* PopFront(Slab* slab) const; 120 121 // Replace the current slab with a new one, and chain the old one off of it. 122 // Both slabs may still be used for for allocation/deallocation, but older slabs 123 // will be a little slower to get allocations from. 124 void GetNewSlab(); 125 126 const uint32_t mAllocationAlignment; 127 128 // | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | .... 129 // | -----------| mSlabBlocksOffset 130 // | | ---------------------- | mBlockStride 131 // | | ----------| mIndexLinkNodeOffset 132 // | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride) 133 134 // A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is 135 // the offset to the start of the aligned memory region. 136 const uint32_t mSlabBlocksOffset; 137 138 // The IndexLinkNode is stored after the Allocation itself. This is the offset to it. 139 const uint32_t mIndexLinkNodeOffset; 140 141 // Because alignment of allocations may introduce padding, |mBlockStride| is the 142 // distance between aligned blocks of (Allocation + IndexLinkNode) 143 const uint32_t mBlockStride; 144 145 const Index mBlocksPerSlab; // The total number of blocks in a slab. 146 147 const size_t mTotalAllocationSize; 148 149 struct SentinelSlab : Slab { 150 SentinelSlab(); 151 ~SentinelSlab(); 152 153 SentinelSlab(SentinelSlab&& rhs); 154 155 void Prepend(Slab* slab); 156 }; 157 158 SentinelSlab mAvailableSlabs; // Available slabs to service allocations. 159 SentinelSlab mFullSlabs; // Full slabs. Stored here so we can skip checking them. 160 SentinelSlab mRecycledSlabs; // Recycled slabs. Not immediately added to |mAvailableSlabs| so 161 // we don't thrash the current "active" slab. 162 }; 163 164 template <typename T> 165 class SlabAllocator : public SlabAllocatorImpl { 166 public: 167 SlabAllocator(size_t totalObjectBytes, 168 uint32_t objectSize = sizeof(T), 169 uint32_t objectAlignment = alignof(T)) 170 : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) { 171 } 172 173 template <typename... Args> Allocate(Args &&...args)174 T* Allocate(Args&&... args) { 175 void* ptr = SlabAllocatorImpl::Allocate(); 176 return new (ptr) T(std::forward<Args>(args)...); 177 } 178 Deallocate(T * object)179 void Deallocate(T* object) { 180 SlabAllocatorImpl::Deallocate(object); 181 } 182 }; 183 184 #endif // COMMON_SLABALLOCATOR_H_ 185