• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2020 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // CommandQueue.h:
7 //    A class to process and submit Vulkan command buffers.
8 //
9 
10 #ifndef LIBANGLE_RENDERER_VULKAN_COMMAND_Queue_H_
11 #define LIBANGLE_RENDERER_VULKAN_COMMAND_Queue_H_
12 
13 #include <condition_variable>
14 #include <mutex>
15 #include <queue>
16 #include <thread>
17 
18 #include "common/FixedQueue.h"
19 #include "common/SimpleMutex.h"
20 #include "common/vulkan/vk_headers.h"
21 #include "libANGLE/renderer/vulkan/PersistentCommandPool.h"
22 #include "libANGLE/renderer/vulkan/vk_helpers.h"
23 #include "vulkan/vulkan_core.h"
24 
25 namespace rx
26 {
27 namespace vk
28 {
29 class ExternalFence;
30 using SharedExternalFence = std::shared_ptr<ExternalFence>;
31 
32 constexpr size_t kInFlightCommandsLimit    = 50u;
33 constexpr size_t kMaxFinishedCommandsLimit = 64u;
34 static_assert(kInFlightCommandsLimit <= kMaxFinishedCommandsLimit);
35 
36 struct Error
37 {
38     VkResult errorCode;
39     const char *file;
40     const char *function;
41     uint32_t line;
42 };
43 
44 class FenceRecycler
45 {
46   public:
FenceRecycler()47     FenceRecycler() {}
~FenceRecycler()48     ~FenceRecycler() {}
49     void destroy(ErrorContext *context);
50 
51     void fetch(VkDevice device, Fence *fenceOut);
52     void recycle(Fence &&fence);
53 
54   private:
55     angle::SimpleMutex mMutex;
56     Recycler<Fence> mRecycler;
57 };
58 
59 class RecyclableFence final : angle::NonCopyable
60 {
61   public:
62     RecyclableFence();
63     ~RecyclableFence();
64 
65     VkResult init(VkDevice device, FenceRecycler *recycler);
66     // Returns fence back to the recycler if it is still attached, destroys the fence otherwise.
67     // Do NOT call directly when object is controlled by a shared pointer.
68     void destroy(VkDevice device);
detachRecycler()69     void detachRecycler() { mRecycler = nullptr; }
70 
valid()71     bool valid() const { return mFence.valid(); }
get()72     const Fence &get() const { return mFence; }
73 
74   private:
75     Fence mFence;
76     FenceRecycler *mRecycler;
77 };
78 
79 using SharedFence = AtomicSharedPtr<RecyclableFence>;
80 
81 class CommandPoolAccess;
82 class CommandBatch final : angle::NonCopyable
83 {
84   public:
85     CommandBatch();
86     ~CommandBatch();
87     CommandBatch(CommandBatch &&other);
88     CommandBatch &operator=(CommandBatch &&other);
89 
90     void destroy(VkDevice device);
91     angle::Result release(ErrorContext *context);
92 
93     void setQueueSerial(const QueueSerial &serial);
94     void setProtectionType(ProtectionType protectionType);
95     void setPrimaryCommands(PrimaryCommandBuffer &&primaryCommands,
96                             CommandPoolAccess *commandPoolAccess);
97     void setSecondaryCommands(SecondaryCommandBufferCollector &&secondaryCommands);
98     VkResult initFence(VkDevice device, FenceRecycler *recycler);
99     void setExternalFence(SharedExternalFence &&externalFence);
100 
101     const QueueSerial &getQueueSerial() const;
102     const PrimaryCommandBuffer &getPrimaryCommands() const;
103     const SharedExternalFence &getExternalFence();
104 
105     bool hasFence() const;
106     VkFence getFenceHandle() const;
107     VkResult getFenceStatus(VkDevice device) const;
108     VkResult waitFence(VkDevice device, uint64_t timeout) const;
109     VkResult waitFenceUnlocked(VkDevice device,
110                                uint64_t timeout,
111                                std::unique_lock<angle::SimpleMutex> *lock) const;
112 
113   private:
114     QueueSerial mQueueSerial;
115     ProtectionType mProtectionType;
116     PrimaryCommandBuffer mPrimaryCommands;
117     CommandPoolAccess *mCommandPoolAccess;  // reference to CommandPoolAccess that is responsible
118                                             // for deleting primaryCommands with a lock
119     SecondaryCommandBufferCollector mSecondaryCommands;
120     SharedFence mFence;
121     SharedExternalFence mExternalFence;
122 };
123 using CommandBatchQueue = angle::FixedQueue<CommandBatch>;
124 
125 class DeviceQueueMap;
126 
127 class QueueFamily final : angle::NonCopyable
128 {
129   public:
130     static const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
131 
132     static uint32_t FindIndex(const std::vector<VkQueueFamilyProperties> &queueFamilyProperties,
133                               VkQueueFlags includeFlags,
134                               VkQueueFlags optionalFlags,
135                               VkQueueFlags excludeFlags,
136                               uint32_t *matchCount);
137     static const uint32_t kQueueCount = static_cast<uint32_t>(egl::ContextPriority::EnumCount);
138     static const float kQueuePriorities[static_cast<uint32_t>(egl::ContextPriority::EnumCount)];
139 
QueueFamily()140     QueueFamily() : mProperties{}, mQueueFamilyIndex(kInvalidIndex) {}
~QueueFamily()141     ~QueueFamily() {}
142 
143     void initialize(const VkQueueFamilyProperties &queueFamilyProperties,
144                     uint32_t queueFamilyIndex);
valid()145     bool valid() const { return (mQueueFamilyIndex != kInvalidIndex); }
getQueueFamilyIndex()146     uint32_t getQueueFamilyIndex() const { return mQueueFamilyIndex; }
getProperties()147     const VkQueueFamilyProperties *getProperties() const { return &mProperties; }
isGraphics()148     bool isGraphics() const { return ((mProperties.queueFlags & VK_QUEUE_GRAPHICS_BIT) > 0); }
isCompute()149     bool isCompute() const { return ((mProperties.queueFlags & VK_QUEUE_COMPUTE_BIT) > 0); }
supportsProtected()150     bool supportsProtected() const
151     {
152         return ((mProperties.queueFlags & VK_QUEUE_PROTECTED_BIT) > 0);
153     }
getDeviceQueueCount()154     uint32_t getDeviceQueueCount() const { return mProperties.queueCount; }
155 
156   private:
157     VkQueueFamilyProperties mProperties;
158     uint32_t mQueueFamilyIndex;
159 };
160 
161 class DeviceQueueMap final
162 {
163   public:
DeviceQueueMap()164     DeviceQueueMap() : mQueueFamilyIndex(QueueFamily::kInvalidIndex), mIsProtected(false) {}
165     ~DeviceQueueMap();
166 
167     void initialize(VkDevice device,
168                     const QueueFamily &queueFamily,
169                     bool makeProtected,
170                     uint32_t queueIndex,
171                     uint32_t queueCount);
172     void destroy();
173 
valid()174     bool valid() const { return (mQueueFamilyIndex != QueueFamily::kInvalidIndex); }
getQueueFamilyIndex()175     uint32_t getQueueFamilyIndex() const { return mQueueFamilyIndex; }
isProtected()176     bool isProtected() const { return mIsProtected; }
getDevicePriority(egl::ContextPriority priority)177     egl::ContextPriority getDevicePriority(egl::ContextPriority priority) const
178     {
179         return mQueueAndIndices[priority].devicePriority;
180     }
getDeviceQueueIndex(egl::ContextPriority priority)181     DeviceQueueIndex getDeviceQueueIndex(egl::ContextPriority priority) const
182     {
183         return DeviceQueueIndex(mQueueFamilyIndex, mQueueAndIndices[priority].index);
184     }
getQueue(egl::ContextPriority priority)185     const VkQueue &getQueue(egl::ContextPriority priority) const
186     {
187         return mQueueAndIndices[priority].queue;
188     }
189 
190     // Wait for all queues to be idle, called on device loss and destruction.
191     void waitAllQueuesIdle();
192 
193   private:
194     uint32_t mQueueFamilyIndex;
195     bool mIsProtected;
196     struct QueueAndIndex
197     {
198         // The actual priority that used
199         egl::ContextPriority devicePriority;
200         VkQueue queue;
201         // The queueIndex used for VkGetDeviceQueue
202         uint32_t index;
203     };
204     angle::PackedEnumMap<egl::ContextPriority, QueueAndIndex> mQueueAndIndices;
205 };
206 
207 class CommandPoolAccess : angle::NonCopyable
208 {
209   public:
210     CommandPoolAccess();
211     ~CommandPoolAccess();
212     angle::Result initCommandPool(ErrorContext *context,
213                                   ProtectionType protectionType,
214                                   const uint32_t queueFamilyIndex);
215     void destroy(VkDevice device);
216     void destroyPrimaryCommandBuffer(VkDevice device, PrimaryCommandBuffer *primaryCommands) const;
217     angle::Result collectPrimaryCommandBuffer(ErrorContext *context,
218                                               const ProtectionType protectionType,
219                                               PrimaryCommandBuffer *primaryCommands);
220     angle::Result flushOutsideRPCommands(Context *context,
221                                          ProtectionType protectionType,
222                                          egl::ContextPriority priority,
223                                          OutsideRenderPassCommandBufferHelper **outsideRPCommands);
224     angle::Result flushRenderPassCommands(Context *context,
225                                           const ProtectionType &protectionType,
226                                           const egl::ContextPriority &priority,
227                                           const RenderPass &renderPass,
228                                           VkFramebuffer framebufferOverride,
229                                           RenderPassCommandBufferHelper **renderPassCommands);
230 
231     void flushWaitSemaphores(ProtectionType protectionType,
232                              egl::ContextPriority priority,
233                              std::vector<VkSemaphore> &&waitSemaphores,
234                              std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks);
235 
236     angle::Result getCommandsAndWaitSemaphores(
237         ErrorContext *context,
238         ProtectionType protectionType,
239         egl::ContextPriority priority,
240         CommandBatch *batchOut,
241         std::vector<VkImageMemoryBarrier> &&imagesToTransitionToForeign,
242         std::vector<VkSemaphore> *waitSemaphoresOut,
243         std::vector<VkPipelineStageFlags> *waitSemaphoreStageMasksOut);
244 
245   private:
ensurePrimaryCommandBufferValidLocked(ErrorContext * context,const ProtectionType & protectionType,const egl::ContextPriority & priority)246     angle::Result ensurePrimaryCommandBufferValidLocked(ErrorContext *context,
247                                                         const ProtectionType &protectionType,
248                                                         const egl::ContextPriority &priority)
249     {
250         CommandsState &state = mCommandsStateMap[priority][protectionType];
251         if (state.primaryCommands.valid())
252         {
253             return angle::Result::Continue;
254         }
255         ANGLE_TRY(mPrimaryCommandPoolMap[protectionType].allocate(context, &state.primaryCommands));
256         VkCommandBufferBeginInfo beginInfo = {};
257         beginInfo.sType                    = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
258         beginInfo.flags                    = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
259         beginInfo.pInheritanceInfo         = nullptr;
260         ANGLE_VK_TRY(context, state.primaryCommands.begin(beginInfo));
261         return angle::Result::Continue;
262     }
263 
264     // This mutex ensures vulkan command pool is externally synchronized.
265     // This means no two threads are operating on command buffers allocated from
266     // the same command pool at the same time. The operations that this mutex
267     // protect include:
268     // 1) recording commands on any command buffers allocated from the same command pool
269     // 2) allocate, free, reset command buffers from the same command pool.
270     // 3) any operations on the command pool itself
271     mutable angle::SimpleMutex mCmdPoolMutex;
272 
273     using PrimaryCommandPoolMap = angle::PackedEnumMap<ProtectionType, PersistentCommandPool>;
274     using CommandsStateMap =
275         angle::PackedEnumMap<egl::ContextPriority,
276                              angle::PackedEnumMap<ProtectionType, CommandsState>>;
277 
278     CommandsStateMap mCommandsStateMap;
279     // Keeps a free list of reusable primary command buffers.
280     PrimaryCommandPoolMap mPrimaryCommandPoolMap;
281 };
282 
283 // Note all public APIs of CommandQueue class must be thread safe.
284 class CommandQueue : angle::NonCopyable
285 {
286   public:
287     CommandQueue();
288     ~CommandQueue();
289 
290     angle::Result init(ErrorContext *context,
291                        const QueueFamily &queueFamily,
292                        bool enableProtectedContent,
293                        uint32_t queueCount);
294 
295     void destroy(ErrorContext *context);
296 
297     void handleDeviceLost(Renderer *renderer);
298 
299     // These public APIs are inherently thread safe. Thread unsafe methods must be protected methods
300     // that are only accessed via ThreadSafeCommandQueue API.
getDriverPriority(egl::ContextPriority priority)301     egl::ContextPriority getDriverPriority(egl::ContextPriority priority) const
302     {
303         return mQueueMap.getDevicePriority(priority);
304     }
305 
getDeviceQueueIndex(egl::ContextPriority priority)306     DeviceQueueIndex getDeviceQueueIndex(egl::ContextPriority priority) const
307     {
308         return mQueueMap.getDeviceQueueIndex(priority);
309     }
310 
getQueue(egl::ContextPriority priority)311     VkQueue getQueue(egl::ContextPriority priority) const { return mQueueMap.getQueue(priority); }
312     // The following are used to implement EGL_ANGLE_device_vulkan, and are called by the
313     // application when it wants to access the VkQueue previously retrieved from ANGLE.  Do not call
314     // these for synchronization within ANGLE.
lockVulkanQueueForExternalAccess()315     void lockVulkanQueueForExternalAccess() { mQueueSubmitMutex.lock(); }
unlockVulkanQueueForExternalAccess()316     void unlockVulkanQueueForExternalAccess() { mQueueSubmitMutex.unlock(); }
317 
getLastSubmittedSerial(SerialIndex index)318     Serial getLastSubmittedSerial(SerialIndex index) const { return mLastSubmittedSerials[index]; }
319 
320     // The ResourceUse still have unfinished queue serial by ANGLE or vulkan.
hasResourceUseFinished(const ResourceUse & use)321     bool hasResourceUseFinished(const ResourceUse &use) const
322     {
323         return use <= mLastCompletedSerials;
324     }
hasQueueSerialFinished(const QueueSerial & queueSerial)325     bool hasQueueSerialFinished(const QueueSerial &queueSerial) const
326     {
327         return queueSerial <= mLastCompletedSerials;
328     }
329     // The ResourceUse still have queue serial not yet submitted to vulkan.
hasResourceUseSubmitted(const ResourceUse & use)330     bool hasResourceUseSubmitted(const ResourceUse &use) const
331     {
332         return use <= mLastSubmittedSerials;
333     }
hasQueueSerialSubmitted(const QueueSerial & queueSerial)334     bool hasQueueSerialSubmitted(const QueueSerial &queueSerial) const
335     {
336         return queueSerial <= mLastSubmittedSerials;
337     }
338 
339     // Wait until the desired serial has been completed.
340     angle::Result finishResourceUse(ErrorContext *context,
341                                     const ResourceUse &use,
342                                     uint64_t timeout);
343     angle::Result finishQueueSerial(ErrorContext *context,
344                                     const QueueSerial &queueSerial,
345                                     uint64_t timeout);
346     angle::Result waitIdle(ErrorContext *context, uint64_t timeout);
347     angle::Result waitForResourceUseToFinishWithUserTimeout(ErrorContext *context,
348                                                             const ResourceUse &use,
349                                                             uint64_t timeout,
350                                                             VkResult *result);
351     bool isBusy(Renderer *renderer) const;
352 
353     angle::Result submitCommands(ErrorContext *context,
354                                  ProtectionType protectionType,
355                                  egl::ContextPriority priority,
356                                  VkSemaphore signalSemaphore,
357                                  SharedExternalFence &&externalFence,
358                                  std::vector<VkImageMemoryBarrier> &&imagesToTransitionToForeign,
359                                  const QueueSerial &submitQueueSerial);
360 
361     angle::Result queueSubmitOneOff(ErrorContext *context,
362                                     ProtectionType protectionType,
363                                     egl::ContextPriority contextPriority,
364                                     VkCommandBuffer commandBufferHandle,
365                                     VkSemaphore waitSemaphore,
366                                     VkPipelineStageFlags waitSemaphoreStageMask,
367                                     const QueueSerial &submitQueueSerial);
368 
369     // Note: Some errors from present are not fatal.
370     VkResult queuePresent(egl::ContextPriority contextPriority,
371                           const VkPresentInfoKHR &presentInfo);
372 
checkCompletedCommands(ErrorContext * context)373     angle::Result checkCompletedCommands(ErrorContext *context)
374     {
375         std::lock_guard<angle::SimpleMutex> lock(mCmdCompleteMutex);
376         return checkCompletedCommandsLocked(context);
377     }
378 
hasFinishedCommands()379     bool hasFinishedCommands() const { return !mFinishedCommandBatches.empty(); }
380 
checkAndCleanupCompletedCommands(ErrorContext * context)381     angle::Result checkAndCleanupCompletedCommands(ErrorContext *context)
382     {
383         ANGLE_TRY(checkCompletedCommands(context));
384 
385         if (!mFinishedCommandBatches.empty())
386         {
387             ANGLE_TRY(releaseFinishedCommandsAndCleanupGarbage(context));
388         }
389 
390         return angle::Result::Continue;
391     }
392 
flushWaitSemaphores(ProtectionType protectionType,egl::ContextPriority priority,std::vector<VkSemaphore> && waitSemaphores,std::vector<VkPipelineStageFlags> && waitSemaphoreStageMasks)393     ANGLE_INLINE void flushWaitSemaphores(
394         ProtectionType protectionType,
395         egl::ContextPriority priority,
396         std::vector<VkSemaphore> &&waitSemaphores,
397         std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks)
398     {
399         return mCommandPoolAccess.flushWaitSemaphores(protectionType, priority,
400                                                       std::move(waitSemaphores),
401                                                       std::move(waitSemaphoreStageMasks));
402     }
flushOutsideRPCommands(Context * context,ProtectionType protectionType,egl::ContextPriority priority,OutsideRenderPassCommandBufferHelper ** outsideRPCommands)403     ANGLE_INLINE angle::Result flushOutsideRPCommands(
404         Context *context,
405         ProtectionType protectionType,
406         egl::ContextPriority priority,
407         OutsideRenderPassCommandBufferHelper **outsideRPCommands)
408     {
409         return mCommandPoolAccess.flushOutsideRPCommands(context, protectionType, priority,
410                                                          outsideRPCommands);
411     }
flushRenderPassCommands(Context * context,ProtectionType protectionType,const egl::ContextPriority & priority,const RenderPass & renderPass,VkFramebuffer framebufferOverride,RenderPassCommandBufferHelper ** renderPassCommands)412     ANGLE_INLINE angle::Result flushRenderPassCommands(
413         Context *context,
414         ProtectionType protectionType,
415         const egl::ContextPriority &priority,
416         const RenderPass &renderPass,
417         VkFramebuffer framebufferOverride,
418         RenderPassCommandBufferHelper **renderPassCommands)
419     {
420         return mCommandPoolAccess.flushRenderPassCommands(
421             context, protectionType, priority, renderPass, framebufferOverride, renderPassCommands);
422     }
423 
424     const angle::VulkanPerfCounters getPerfCounters() const;
425     void resetPerFramePerfCounters();
426 
427     // Release finished commands and clean up garbage immediately, or request async clean up if
428     // enabled.
429     angle::Result releaseFinishedCommandsAndCleanupGarbage(ErrorContext *context);
releaseFinishedCommands(ErrorContext * context)430     angle::Result releaseFinishedCommands(ErrorContext *context)
431     {
432         std::lock_guard<angle::SimpleMutex> lock(mCmdReleaseMutex);
433         return releaseFinishedCommandsLocked(context);
434     }
435     angle::Result postSubmitCheck(ErrorContext *context);
436 
437     bool isInFlightCommandsEmpty() const;
438 
439     // Try to cleanup garbage and return if something was cleaned.  Otherwise, wait for the
440     // mInFlightCommands and retry.
441     angle::Result cleanupSomeGarbage(ErrorContext *context,
442                                      size_t minInFlightBatchesToKeep,
443                                      bool *anyGarbageCleanedOut);
444 
445     // All these private APIs are called with mutex locked, so we must not take lock again.
446   private:
447     // Check the first command buffer in mInFlightCommands and update mLastCompletedSerials if
448     // finished
449     angle::Result checkOneCommandBatchLocked(ErrorContext *context, bool *finished);
450     // Similar to checkOneCommandBatch, except we will wait for it to finish
451     angle::Result finishOneCommandBatch(ErrorContext *context,
452                                         uint64_t timeout,
453                                         std::unique_lock<angle::SimpleMutex> *lock);
454     void onCommandBatchFinishedLocked(CommandBatch &&batch);
455     // Walk mFinishedCommands, reset and recycle all command buffers.
456     angle::Result releaseFinishedCommandsLocked(ErrorContext *context);
457     // Walk mInFlightCommands, check and update mLastCompletedSerials for all commands that are
458     // finished
459     angle::Result checkCompletedCommandsLocked(ErrorContext *context);
460 
461     angle::Result queueSubmitLocked(ErrorContext *context,
462                                     egl::ContextPriority contextPriority,
463                                     const VkSubmitInfo &submitInfo,
464                                     DeviceScoped<CommandBatch> &commandBatch,
465                                     const QueueSerial &submitQueueSerial);
466 
467     void pushInFlightBatchLocked(CommandBatch &&batch);
468     void moveInFlightBatchToFinishedQueueLocked(CommandBatch &&batch);
469     void popFinishedBatchLocked();
470     void popInFlightBatchLocked();
471 
472     CommandPoolAccess mCommandPoolAccess;
473 
474     // Warning: Mutexes must be locked in the order as declared below.
475     // Protect multi-thread access to mInFlightCommands.push/back and ensure ordering of submission.
476     // Also protects mPerfCounters.
477     mutable angle::SimpleMutex mQueueSubmitMutex;
478     // Protect multi-thread access to mInFlightCommands.pop/front and
479     // mFinishedCommandBatches.push/back.
480     angle::SimpleMutex mCmdCompleteMutex;
481     // Protect multi-thread access to mFinishedCommandBatches.pop/front.
482     angle::SimpleMutex mCmdReleaseMutex;
483 
484     CommandBatchQueue mInFlightCommands;
485     // Temporary storage for finished command batches that should be reset.
486     CommandBatchQueue mFinishedCommandBatches;
487 
488     // Combined number of batches in mInFlightCommands and mFinishedCommandBatches queues.
489     // Used instead of calculating the sum because doing this is not thread safe and will require
490     // the mCmdCompleteMutex lock.
491     std::atomic_size_t mNumAllCommands;
492 
493     // Queue serial management.
494     AtomicQueueSerialFixedArray mLastSubmittedSerials;
495     // This queue serial can be read/write from different threads, so we need to use atomic
496     // operations to access the underlying value. Since we only do load/store on this value, it
497     // should be just a normal uint64_t load/store on most platforms.
498     AtomicQueueSerialFixedArray mLastCompletedSerials;
499 
500     // QueueMap
501     DeviceQueueMap mQueueMap;
502 
503     FenceRecycler mFenceRecycler;
504 
505     angle::VulkanPerfCounters mPerfCounters;
506 };
507 
isInFlightCommandsEmpty()508 ANGLE_INLINE bool CommandQueue::isInFlightCommandsEmpty() const
509 {
510     return mInFlightCommands.empty();
511 }
512 
513 // A helper thread used to clean up garbage
514 class CleanUpThread : public ErrorContext
515 {
516   public:
517     CleanUpThread(Renderer *renderer, CommandQueue *commandQueue);
518     ~CleanUpThread() override;
519 
520     // Context
521     void handleError(VkResult result,
522                      const char *file,
523                      const char *function,
524                      unsigned int line) override;
525 
526     angle::Result init();
527 
528     void destroy(ErrorContext *context);
529 
530     void requestCleanUp();
531 
getThreadId()532     std::thread::id getThreadId() const { return mTaskThread.get_id(); }
533 
534   private:
hasPendingError()535     bool hasPendingError() const
536     {
537         std::lock_guard<angle::SimpleMutex> queueLock(mErrorMutex);
538         return !mErrors.empty();
539     }
540     angle::Result checkAndPopPendingError(ErrorContext *errorHandlingContext);
541 
542     // Entry point for clean up thread, calls processTasksImpl to do the
543     // work. called by Renderer::initializeDevice on main thread
544     void processTasks();
545 
546     // Clean up thread, called by processTasks. The loop waits for work to
547     // be submitted from a separate thread.
548     angle::Result processTasksImpl(bool *exitThread);
549 
550     CommandQueue *const mCommandQueue;
551 
552     mutable angle::SimpleMutex mErrorMutex;
553     std::queue<Error> mErrors;
554 
555     // Command queue worker thread.
556     std::thread mTaskThread;
557     bool mTaskThreadShouldExit;
558     std::mutex mMutex;
559     std::condition_variable mWorkAvailableCondition;
560     std::atomic<bool> mNeedCleanUp;
561 };
562 
563 // Provides access to the PrimaryCommandBuffer while also locking the corresponding CommandPool
564 class [[nodiscard]] ScopedPrimaryCommandBuffer final
565 {
566   public:
ScopedPrimaryCommandBuffer(VkDevice device)567     explicit ScopedPrimaryCommandBuffer(VkDevice device) : mCommandBuffer(device) {}
568 
assign(std::unique_lock<angle::SimpleMutex> && poolLock,PrimaryCommandBuffer && commandBuffer)569     void assign(std::unique_lock<angle::SimpleMutex> &&poolLock,
570                 PrimaryCommandBuffer &&commandBuffer)
571     {
572         ASSERT(poolLock.owns_lock());
573         ASSERT(commandBuffer.valid());
574         ASSERT(mPoolLock.mutex() == nullptr);
575         ASSERT(!mCommandBuffer.get().valid());
576         mPoolLock            = std::move(poolLock);
577         mCommandBuffer.get() = std::move(commandBuffer);
578     }
579 
get()580     PrimaryCommandBuffer &get()
581     {
582         ASSERT(mPoolLock.owns_lock());
583         ASSERT(mCommandBuffer.get().valid());
584         return mCommandBuffer.get();
585     }
586 
unlockAndRelease()587     DeviceScoped<PrimaryCommandBuffer> unlockAndRelease()
588     {
589         ASSERT(mCommandBuffer.get().valid() && mPoolLock.owns_lock() ||
590                !mCommandBuffer.get().valid() && mPoolLock.mutex() == nullptr);
591         mPoolLock = {};
592         return std::move(mCommandBuffer);
593     }
594 
595   private:
596     std::unique_lock<angle::SimpleMutex> mPoolLock;
597     DeviceScoped<PrimaryCommandBuffer> mCommandBuffer;
598 };
599 }  // namespace vk
600 
601 }  // namespace rx
602 
603 #endif  // LIBANGLE_RENDERER_VULKAN_COMMAND_QUEUE_H_
604