1 // Copyright 2017 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "dawn_native/CommandAllocator.h" 16 17 #include "common/Assert.h" 18 #include "common/Math.h" 19 20 #include <algorithm> 21 #include <climits> 22 #include <cstdlib> 23 #include <utility> 24 25 namespace dawn_native { 26 27 // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator 28 CommandIterator()29 CommandIterator::CommandIterator() { 30 Reset(); 31 } 32 ~CommandIterator()33 CommandIterator::~CommandIterator() { 34 ASSERT(IsEmpty()); 35 } 36 CommandIterator(CommandIterator && other)37 CommandIterator::CommandIterator(CommandIterator&& other) { 38 if (!other.IsEmpty()) { 39 mBlocks = std::move(other.mBlocks); 40 other.Reset(); 41 } 42 Reset(); 43 } 44 operator =(CommandIterator && other)45 CommandIterator& CommandIterator::operator=(CommandIterator&& other) { 46 ASSERT(IsEmpty()); 47 if (!other.IsEmpty()) { 48 mBlocks = std::move(other.mBlocks); 49 other.Reset(); 50 } 51 Reset(); 52 return *this; 53 } 54 CommandIterator(CommandAllocator allocator)55 CommandIterator::CommandIterator(CommandAllocator allocator) 56 : mBlocks(allocator.AcquireBlocks()) { 57 Reset(); 58 } 59 AcquireCommandBlocks(std::vector<CommandAllocator> allocators)60 void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) { 61 ASSERT(IsEmpty()); 62 mBlocks.clear(); 63 for (CommandAllocator& allocator : allocators) { 64 CommandBlocks blocks = allocator.AcquireBlocks(); 65 if (!blocks.empty()) { 66 mBlocks.reserve(mBlocks.size() + blocks.size()); 67 for (BlockDef& block : blocks) { 68 mBlocks.push_back(std::move(block)); 69 } 70 } 71 } 72 Reset(); 73 } 74 NextCommandIdInNewBlock(uint32_t * commandId)75 bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) { 76 mCurrentBlock++; 77 if (mCurrentBlock >= mBlocks.size()) { 78 Reset(); 79 *commandId = detail::kEndOfBlock; 80 return false; 81 } 82 mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t)); 83 return NextCommandId(commandId); 84 } 85 Reset()86 void CommandIterator::Reset() { 87 mCurrentBlock = 0; 88 89 if (mBlocks.empty()) { 90 // This will case the first NextCommandId call to try to move to the next block and stop 91 // the iteration immediately, without special casing the initialization. 92 mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock); 93 mBlocks.emplace_back(); 94 mBlocks[0].size = sizeof(mEndOfBlock); 95 mBlocks[0].block = mCurrentPtr; 96 } else { 97 mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t)); 98 } 99 } 100 MakeEmptyAsDataWasDestroyed()101 void CommandIterator::MakeEmptyAsDataWasDestroyed() { 102 if (IsEmpty()) { 103 return; 104 } 105 106 for (BlockDef& block : mBlocks) { 107 free(block.block); 108 } 109 mBlocks.clear(); 110 Reset(); 111 ASSERT(IsEmpty()); 112 } 113 IsEmpty() const114 bool CommandIterator::IsEmpty() const { 115 return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock); 116 } 117 118 // Potential TODO(crbug.com/dawn/835): 119 // - Host the size and pointer to next block in the block itself to avoid having an allocation 120 // in the vector 121 // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant 122 // in Allocate 123 // - Be able to optimize allocation to one block, for command buffers expected to live long to 124 // avoid cache misses 125 // - Better block allocation, maybe have Dawn API to say command buffer is going to have size 126 // close to another 127 CommandAllocator()128 CommandAllocator::CommandAllocator() { 129 ResetPointers(); 130 } 131 ~CommandAllocator()132 CommandAllocator::~CommandAllocator() { 133 Reset(); 134 } 135 CommandAllocator(CommandAllocator && other)136 CommandAllocator::CommandAllocator(CommandAllocator&& other) 137 : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) { 138 other.mBlocks.clear(); 139 if (!other.IsEmpty()) { 140 mCurrentPtr = other.mCurrentPtr; 141 mEndPtr = other.mEndPtr; 142 } else { 143 ResetPointers(); 144 } 145 other.Reset(); 146 } 147 operator =(CommandAllocator && other)148 CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) { 149 Reset(); 150 if (!other.IsEmpty()) { 151 std::swap(mBlocks, other.mBlocks); 152 mLastAllocationSize = other.mLastAllocationSize; 153 mCurrentPtr = other.mCurrentPtr; 154 mEndPtr = other.mEndPtr; 155 } 156 other.Reset(); 157 return *this; 158 } 159 Reset()160 void CommandAllocator::Reset() { 161 for (BlockDef& block : mBlocks) { 162 free(block.block); 163 } 164 mBlocks.clear(); 165 mLastAllocationSize = kDefaultBaseAllocationSize; 166 ResetPointers(); 167 } 168 IsEmpty() const169 bool CommandAllocator::IsEmpty() const { 170 return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]); 171 } 172 AcquireBlocks()173 CommandBlocks&& CommandAllocator::AcquireBlocks() { 174 ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr); 175 ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t))); 176 ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr); 177 *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock; 178 179 mCurrentPtr = nullptr; 180 mEndPtr = nullptr; 181 return std::move(mBlocks); 182 } 183 AllocateInNewBlock(uint32_t commandId,size_t commandSize,size_t commandAlignment)184 uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId, 185 size_t commandSize, 186 size_t commandAlignment) { 187 // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows 188 // to move to the next one. kEndOfBlock on the last block means the end of the commands. 189 uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr); 190 *idAlloc = detail::kEndOfBlock; 191 192 // We'll request a block that can contain at least the command ID, the command and an 193 // additional ID to contain the kEndOfBlock tag. 194 size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize; 195 196 // The computation of the request could overflow. 197 if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) { 198 return nullptr; 199 } 200 201 if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) { 202 return nullptr; 203 } 204 return Allocate(commandId, commandSize, commandAlignment); 205 } 206 GetNewBlock(size_t minimumSize)207 bool CommandAllocator::GetNewBlock(size_t minimumSize) { 208 // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize). 209 mLastAllocationSize = 210 std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384))); 211 212 uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize)); 213 if (DAWN_UNLIKELY(block == nullptr)) { 214 return false; 215 } 216 217 mBlocks.push_back({mLastAllocationSize, block}); 218 mCurrentPtr = AlignPtr(block, alignof(uint32_t)); 219 mEndPtr = block + mLastAllocationSize; 220 return true; 221 } 222 ResetPointers()223 void CommandAllocator::ResetPointers() { 224 mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]); 225 mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]); 226 } 227 228 } // namespace dawn_native 229