1 // 2 // Copyright 2017 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // Resource: 7 // Resource lifetime tracking in the Vulkan back-end. 8 // 9 10 #ifndef LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_ 11 #define LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_ 12 13 #include "common/FixedQueue.h" 14 #include "common/SimpleMutex.h" 15 #include "libANGLE/HandleAllocator.h" 16 #include "libANGLE/renderer/vulkan/vk_utils.h" 17 18 #include <queue> 19 20 namespace rx 21 { 22 namespace vk 23 { 24 // We expect almost all reasonable usage case should have at most 4 current contexts now. When 25 // exceeded, it should still work, but storage will grow. 26 static constexpr size_t kMaxFastQueueSerials = 4; 27 // Serials is an array of queue serials, which when paired with the index of the serials in the 28 // array result in QueueSerials. The array may expand if needed. Since it owned by Resource object 29 // which is protected by shared lock, it is safe to reallocate storage if needed. When it passes to 30 // renderer at garbage collection time, we will make a copy. The array size is expected to be small. 31 // But in future if we run into situation that array size is too big, we can change to packed array 32 // of QueueSerials. 33 using Serials = angle::FastVector<Serial, kMaxFastQueueSerials>; 34 35 // Tracks how a resource is used by ANGLE and by a VkQueue. The serial indicates the most recent use 36 // of a resource in the VkQueue. We use the monotonically incrementing serial number to determine if 37 // a resource is currently in use. 38 class ResourceUse final 39 { 40 public: 41 ResourceUse() = default; 42 ~ResourceUse() = default; 43 ResourceUse(const QueueSerial & queueSerial)44 ResourceUse(const QueueSerial &queueSerial) { setQueueSerial(queueSerial); } ResourceUse(const Serials & otherSerials)45 ResourceUse(const Serials &otherSerials) { mSerials = otherSerials; } 46 47 // Copy constructor ResourceUse(const ResourceUse & other)48 ResourceUse(const ResourceUse &other) : mSerials(other.mSerials) {} 49 ResourceUse &operator=(const ResourceUse &other) 50 { 51 mSerials = other.mSerials; 52 return *this; 53 } 54 55 // Move constructor ResourceUse(ResourceUse && other)56 ResourceUse(ResourceUse &&other) : mSerials(other.mSerials) { other.mSerials.clear(); } 57 ResourceUse &operator=(ResourceUse &&other) 58 { 59 mSerials = other.mSerials; 60 other.mSerials.clear(); 61 return *this; 62 } 63 valid()64 bool valid() const { return mSerials.size() > 0; } 65 reset()66 void reset() { mSerials.clear(); } 67 getSerials()68 const Serials &getSerials() const { return mSerials; } 69 setSerial(SerialIndex index,Serial serial)70 void setSerial(SerialIndex index, Serial serial) 71 { 72 ASSERT(index != kInvalidQueueSerialIndex); 73 if (ANGLE_UNLIKELY(mSerials.size() <= index)) 74 { 75 mSerials.resize(index + 1, kZeroSerial); 76 } 77 ASSERT(mSerials[index] <= serial); 78 mSerials[index] = serial; 79 } 80 setQueueSerial(const QueueSerial & queueSerial)81 void setQueueSerial(const QueueSerial &queueSerial) 82 { 83 setSerial(queueSerial.getIndex(), queueSerial.getSerial()); 84 } 85 86 // Returns true if there is at least one serial is greater than 87 bool operator>(const AtomicQueueSerialFixedArray &serials) const 88 { 89 ASSERT(mSerials.size() <= serials.size()); 90 for (SerialIndex i = 0; i < mSerials.size(); ++i) 91 { 92 if (mSerials[i] > serials[i]) 93 { 94 return true; 95 } 96 } 97 return false; 98 } 99 100 // Returns true if it contains a serial that is greater than 101 bool operator>(const QueueSerial &queuSerial) const 102 { 103 return mSerials.size() > queuSerial.getIndex() && 104 mSerials[queuSerial.getIndex()] > queuSerial.getSerial(); 105 } 106 107 // Returns true if all serials are less than or equal 108 bool operator<=(const AtomicQueueSerialFixedArray &serials) const 109 { 110 ASSERT(mSerials.size() <= serials.size()); 111 for (SerialIndex i = 0; i < mSerials.size(); ++i) 112 { 113 if (mSerials[i] > serials[i]) 114 { 115 return false; 116 } 117 } 118 return true; 119 } 120 usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)121 bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const 122 { 123 ASSERT(commandBufferQueueSerial.valid()); 124 // Return true if we have the exact queue serial in the array. 125 return mSerials.size() > commandBufferQueueSerial.getIndex() && 126 mSerials[commandBufferQueueSerial.getIndex()] == 127 commandBufferQueueSerial.getSerial(); 128 } 129 130 // Merge other's serials into this object. merge(const ResourceUse & other)131 void merge(const ResourceUse &other) 132 { 133 if (mSerials.size() < other.mSerials.size()) 134 { 135 mSerials.resize(other.mSerials.size(), kZeroSerial); 136 } 137 138 for (SerialIndex i = 0; i < other.mSerials.size(); ++i) 139 { 140 if (mSerials[i] < other.mSerials[i]) 141 { 142 mSerials[i] = other.mSerials[i]; 143 } 144 } 145 } 146 147 private: 148 // The most recent time of use in a VkQueue. 149 Serials mSerials; 150 }; 151 std::ostream &operator<<(std::ostream &os, const ResourceUse &use); 152 153 class SharedGarbage final : angle::NonCopyable 154 { 155 public: 156 SharedGarbage(); 157 SharedGarbage(SharedGarbage &&other); 158 SharedGarbage(const ResourceUse &use, GarbageObjects &&garbage); 159 ~SharedGarbage(); 160 SharedGarbage &operator=(SharedGarbage &&rhs); 161 162 bool destroyIfComplete(Renderer *renderer); 163 bool hasResourceUseSubmitted(Renderer *renderer) const; 164 // This is not being used now. getSize()165 VkDeviceSize getSize() const { return 0; } 166 167 private: 168 ResourceUse mLifetime; 169 GarbageObjects mGarbage; 170 }; 171 172 // SharedGarbageList list tracks garbage using angle::FixedQueue. It allows concurrent add (i.e., 173 // enqueue) and cleanup (i.e. dequeue) operations from two threads. Add call from two threads are 174 // synchronized using a mutex and cleanup call from two threads are synchronized with a separate 175 // mutex. 176 template <class T> 177 class SharedGarbageList final : angle::NonCopyable 178 { 179 public: SharedGarbageList()180 SharedGarbageList() 181 : mSubmittedQueue(kInitialQueueCapacity), 182 mUnsubmittedQueue(kInitialQueueCapacity), 183 mTotalSubmittedGarbageBytes(0), 184 mTotalUnsubmittedGarbageBytes(0), 185 mTotalGarbageDestroyed(0) 186 {} ~SharedGarbageList()187 ~SharedGarbageList() 188 { 189 ASSERT(mSubmittedQueue.empty()); 190 ASSERT(mUnsubmittedQueue.empty()); 191 } 192 add(Renderer * renderer,T && garbage)193 void add(Renderer *renderer, T &&garbage) 194 { 195 VkDeviceSize size = garbage.getSize(); 196 if (garbage.destroyIfComplete(renderer)) 197 { 198 mTotalGarbageDestroyed += size; 199 } 200 else 201 { 202 std::unique_lock<angle::SimpleMutex> enqueueLock(mMutex); 203 if (garbage.hasResourceUseSubmitted(renderer)) 204 { 205 addGarbageLocked(mSubmittedQueue, std::move(garbage)); 206 mTotalSubmittedGarbageBytes += size; 207 } 208 else 209 { 210 addGarbageLocked(mUnsubmittedQueue, std::move(garbage)); 211 // We use relaxed ordering here since it is always modified with mMutex. The atomic 212 // is only for the purpose of make tsan happy. 213 mTotalUnsubmittedGarbageBytes.fetch_add(size, std::memory_order_relaxed); 214 } 215 } 216 } 217 empty()218 bool empty() const { return mSubmittedQueue.empty() && mUnsubmittedQueue.empty(); } getSubmittedGarbageSize()219 VkDeviceSize getSubmittedGarbageSize() const 220 { 221 return mTotalSubmittedGarbageBytes.load(std::memory_order_consume); 222 } getUnsubmittedGarbageSize()223 VkDeviceSize getUnsubmittedGarbageSize() const 224 { 225 return mTotalUnsubmittedGarbageBytes.load(std::memory_order_consume); 226 } getDestroyedGarbageSize()227 VkDeviceSize getDestroyedGarbageSize() const 228 { 229 return mTotalGarbageDestroyed.load(std::memory_order_consume); 230 } resetDestroyedGarbageSize()231 void resetDestroyedGarbageSize() { mTotalGarbageDestroyed = 0; } 232 233 // Number of bytes destroyed is returned. cleanupSubmittedGarbage(Renderer * renderer)234 void cleanupSubmittedGarbage(Renderer *renderer) 235 { 236 std::unique_lock<angle::SimpleMutex> lock(mSubmittedQueueDequeueMutex); 237 VkDeviceSize bytesDestroyed = 0; 238 while (!mSubmittedQueue.empty()) 239 { 240 T &garbage = mSubmittedQueue.front(); 241 VkDeviceSize size = garbage.getSize(); 242 if (!garbage.destroyIfComplete(renderer)) 243 { 244 break; 245 } 246 bytesDestroyed += size; 247 mSubmittedQueue.pop(); 248 } 249 mTotalSubmittedGarbageBytes -= bytesDestroyed; 250 mTotalGarbageDestroyed += bytesDestroyed; 251 } 252 253 // Check if pending garbage is still pending submission. If not, move them to the garbage list. 254 // Otherwise move the element to the end of the queue. Note that this call took both locks of 255 // this list. Since this call is only used for pending submission garbage list and that list 256 // only temporary stores garbage, it does not destroy garbage in this list. And moving garbage 257 // around is expected to be cheap in general, so lock contention is not expected. cleanupUnsubmittedGarbage(Renderer * renderer)258 void cleanupUnsubmittedGarbage(Renderer *renderer) 259 { 260 std::unique_lock<angle::SimpleMutex> enqueueLock(mMutex); 261 size_t count = mUnsubmittedQueue.size(); 262 VkDeviceSize bytesMoved = 0; 263 for (size_t i = 0; i < count; i++) 264 { 265 T &garbage = mUnsubmittedQueue.front(); 266 if (garbage.hasResourceUseSubmitted(renderer)) 267 { 268 bytesMoved += garbage.getSize(); 269 addGarbageLocked(mSubmittedQueue, std::move(garbage)); 270 } 271 else 272 { 273 mUnsubmittedQueue.push(std::move(garbage)); 274 } 275 mUnsubmittedQueue.pop(); 276 } 277 mTotalUnsubmittedGarbageBytes -= bytesMoved; 278 mTotalSubmittedGarbageBytes += bytesMoved; 279 } 280 281 private: addGarbageLocked(angle::FixedQueue<T> & queue,T && garbage)282 void addGarbageLocked(angle::FixedQueue<T> &queue, T &&garbage) 283 { 284 // Expand the queue storage if we only have one empty space left. That one empty space is 285 // required by cleanupPendingSubmissionGarbage so that we do not need to allocate another 286 // temporary storage. 287 if (queue.size() >= queue.capacity() - 1) 288 { 289 std::unique_lock<angle::SimpleMutex> dequeueLock(mSubmittedQueueDequeueMutex); 290 size_t newCapacity = queue.capacity() << 1; 291 queue.updateCapacity(newCapacity); 292 } 293 queue.push(std::move(garbage)); 294 } 295 296 static constexpr size_t kInitialQueueCapacity = 64; 297 // Protects both enqueue and dequeue of mUnsubmittedQueue, as well as enqueue of 298 // mSubmittedQueue. 299 angle::SimpleMutex mMutex; 300 // Protect dequeue of mSubmittedQueue, which is expected to be more expensive. 301 angle::SimpleMutex mSubmittedQueueDequeueMutex; 302 // Holds garbage that all of use has been submitted to renderer. 303 angle::FixedQueue<T> mSubmittedQueue; 304 // Holds garbage with at least one of the queueSerials has not yet submitted to renderer. 305 angle::FixedQueue<T> mUnsubmittedQueue; 306 // Total bytes of garbage in mSubmittedQueue. 307 std::atomic<VkDeviceSize> mTotalSubmittedGarbageBytes; 308 // Total bytes of garbage in mUnsubmittedQueue. 309 std::atomic<VkDeviceSize> mTotalUnsubmittedGarbageBytes; 310 // Total bytes of garbage been destroyed since last resetDestroyedGarbageSize call. 311 std::atomic<VkDeviceSize> mTotalGarbageDestroyed; 312 }; 313 314 // This is a helper class for back-end objects used in Vk command buffers. They keep a record 315 // of their use in ANGLE and VkQueues via ResourceUse. 316 class Resource : angle::NonCopyable 317 { 318 public: ~Resource()319 virtual ~Resource() {} 320 321 // Complete all recorded and in-flight commands involving this resource 322 angle::Result waitForIdle(ContextVk *contextVk, 323 const char *debugMessage, 324 RenderPassClosureReason reason); 325 setSerial(SerialIndex index,Serial serial)326 void setSerial(SerialIndex index, Serial serial) { mUse.setSerial(index, serial); } 327 setQueueSerial(const QueueSerial & queueSerial)328 void setQueueSerial(const QueueSerial &queueSerial) 329 { 330 mUse.setSerial(queueSerial.getIndex(), queueSerial.getSerial()); 331 } 332 mergeResourceUse(const ResourceUse & use)333 void mergeResourceUse(const ResourceUse &use) { mUse.merge(use); } 334 335 // Check if this resource is used by a command buffer. usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)336 bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const 337 { 338 return mUse.usedByCommandBuffer(commandBufferQueueSerial); 339 } 340 getResourceUse()341 const ResourceUse &getResourceUse() const { return mUse; } 342 343 protected: Resource()344 Resource() {} Resource(Resource && other)345 Resource(Resource &&other) : Resource() { mUse = std::move(other.mUse); } 346 Resource &operator=(Resource &&rhs) 347 { 348 std::swap(mUse, rhs.mUse); 349 return *this; 350 } 351 352 // Current resource lifetime. 353 ResourceUse mUse; 354 }; 355 356 // Similar to |Resource| above, this tracks object usage. This includes additional granularity to 357 // track whether an object is used for read-only or read/write access. 358 class ReadWriteResource : public Resource 359 { 360 public: ~ReadWriteResource()361 virtual ~ReadWriteResource() override {} 362 363 // Complete all recorded and in-flight commands involving this resource waitForIdle(ContextVk * contextVk,const char * debugMessage,RenderPassClosureReason reason)364 angle::Result waitForIdle(ContextVk *contextVk, 365 const char *debugMessage, 366 RenderPassClosureReason reason) 367 { 368 return Resource::waitForIdle(contextVk, debugMessage, reason); 369 } 370 setWriteQueueSerial(const QueueSerial & writeQueueSerial)371 void setWriteQueueSerial(const QueueSerial &writeQueueSerial) 372 { 373 mUse.setQueueSerial(writeQueueSerial); 374 mWriteUse.setQueueSerial(writeQueueSerial); 375 } 376 377 // Check if this resource is used by a command buffer. usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)378 bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const 379 { 380 return mUse.usedByCommandBuffer(commandBufferQueueSerial); 381 } writtenByCommandBuffer(const QueueSerial & commandBufferQueueSerial)382 bool writtenByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const 383 { 384 return mWriteUse.usedByCommandBuffer(commandBufferQueueSerial); 385 } 386 getWriteResourceUse()387 const ResourceUse &getWriteResourceUse() const { return mWriteUse; } 388 389 protected: ReadWriteResource()390 ReadWriteResource() {} ReadWriteResource(ReadWriteResource && other)391 ReadWriteResource(ReadWriteResource &&other) { *this = std::move(other); } 392 ReadWriteResource &operator=(ReadWriteResource &&other) 393 { 394 Resource::operator=(std::move(other)); 395 mWriteUse = std::move(other.mWriteUse); 396 return *this; 397 } 398 399 // Track write use of the object. Only updated for setWriteQueueSerial(). 400 ResourceUse mWriteUse; 401 }; 402 403 // Adds "void release(Renderer *)" method for collecting garbage. 404 // Enables RendererScoped<> for classes that support DeviceScoped<>. 405 template <class T> 406 class ReleasableResource final : public Resource 407 { 408 public: 409 // Calls collectGarbage() on the object. 410 void release(Renderer *renderer); 411 get()412 const T &get() const { return mObject; } get()413 T &get() { return mObject; } 414 415 private: 416 T mObject; 417 }; 418 } // namespace vk 419 } // namespace rx 420 421 #endif // LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_ 422