• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2020 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // CommandProcessor.h:
7 //    A class to process and submit Vulkan command buffers that can be
8 //    used in an asynchronous worker thread.
9 //
10 
11 #ifndef LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_
12 #define LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_
13 
14 #include <condition_variable>
15 #include <mutex>
16 #include <queue>
17 #include <thread>
18 
19 #include "common/FixedQueue.h"
20 #include "common/vulkan/vk_headers.h"
21 #include "libANGLE/renderer/vulkan/PersistentCommandPool.h"
22 #include "libANGLE/renderer/vulkan/vk_helpers.h"
23 
24 namespace rx
25 {
26 class RendererVk;
27 class CommandProcessor;
28 
29 namespace vk
30 {
31 class ExternalFence;
32 using SharedExternalFence = std::shared_ptr<ExternalFence>;
33 
34 constexpr size_t kMaxCommandProcessorTasksLimit = 16u;
35 constexpr size_t kInFlightCommandsLimit         = 50u;
36 constexpr size_t kMaxFinishedCommandsLimit      = 64u;
37 
38 enum class SubmitPolicy
39 {
40     AllowDeferred,
41     EnsureSubmitted,
42 };
43 
44 struct Error
45 {
46     VkResult errorCode;
47     const char *file;
48     const char *function;
49     uint32_t line;
50 };
51 
52 class FenceRecycler;
53 // This is a RAII class manages refcounted vkfence object with auto-release and recycling.
54 class SharedFence final
55 {
56   public:
57     SharedFence();
58     SharedFence(const SharedFence &other);
59     SharedFence(SharedFence &&other);
60     ~SharedFence();
61     // Copy assignment will add reference count to the underline object
62     SharedFence &operator=(const SharedFence &other);
63     // Move assignment will move reference count from other to this object
64     SharedFence &operator=(SharedFence &&other);
65 
66     // Initialize it with a new vkFence either from recycler or create a new one.
67     VkResult init(VkDevice device, FenceRecycler *recycler);
68     // Destroy it immediately (will not recycle).
69     void destroy(VkDevice device);
70     // Release the vkFence (to recycler)
71     void release();
72     // Return true if underline VkFence is valid
73     operator bool() const;
get()74     const Fence &get() const
75     {
76         ASSERT(mRefCountedFence != nullptr && mRefCountedFence->isReferenced());
77         return mRefCountedFence->get();
78     }
79 
80     // The following three APIs can call without lock. Since fence is refcounted and this object has
81     // a refcount to VkFence, No one is able to come in and destroy the VkFence.
82     VkResult getStatus(VkDevice device) const;
83     VkResult wait(VkDevice device, uint64_t timeout) const;
84 
85   private:
86     RefCounted<Fence> *mRefCountedFence;
87     FenceRecycler *mRecycler;
88 };
89 
90 class FenceRecycler
91 {
92   public:
FenceRecycler()93     FenceRecycler() {}
~FenceRecycler()94     ~FenceRecycler() {}
95     void destroy(Context *context);
96 
97     void fetch(VkDevice device, Fence *fenceOut);
98     void recycle(Fence &&fence);
99 
100   private:
101     std::mutex mMutex;
102     Recycler<Fence> mRecyler;
103 };
104 
105 struct SwapchainStatus
106 {
107     std::atomic<bool> isPending;
108     VkResult lastPresentResult = VK_NOT_READY;
109 };
110 
111 enum class CustomTask
112 {
113     Invalid = 0,
114     // Flushes wait semaphores
115     FlushWaitSemaphores,
116     // Process SecondaryCommandBuffer commands into the primary CommandBuffer.
117     ProcessOutsideRenderPassCommands,
118     ProcessRenderPassCommands,
119     // End the current command buffer and submit commands to the queue
120     FlushAndQueueSubmit,
121     // Submit custom command buffer, excludes some state management
122     OneOffQueueSubmit,
123     // Execute QueuePresent
124     Present,
125 };
126 
127 // CommandProcessorTask interface
128 class CommandProcessorTask
129 {
130   public:
CommandProcessorTask()131     CommandProcessorTask() { initTask(); }
132 
133     void initTask();
134 
135     void initFlushWaitSemaphores(ProtectionType protectionType,
136                                  egl::ContextPriority priority,
137                                  std::vector<VkSemaphore> &&waitSemaphores,
138                                  std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks);
139 
140     void initOutsideRenderPassProcessCommands(ProtectionType protectionType,
141                                               egl::ContextPriority priority,
142                                               OutsideRenderPassCommandBufferHelper *commandBuffer);
143 
144     void initRenderPassProcessCommands(ProtectionType protectionType,
145                                        egl::ContextPriority priority,
146                                        RenderPassCommandBufferHelper *commandBuffer,
147                                        const RenderPass *renderPass);
148 
149     void initPresent(egl::ContextPriority priority,
150                      const VkPresentInfoKHR &presentInfo,
151                      SwapchainStatus *swapchainStatus);
152 
153     void initFlushAndQueueSubmit(VkSemaphore semaphore,
154                                  SharedExternalFence &&externalFence,
155                                  ProtectionType protectionType,
156                                  egl::ContextPriority priority,
157                                  const QueueSerial &submitQueueSerial);
158 
159     void initOneOffQueueSubmit(VkCommandBuffer commandBufferHandle,
160                                ProtectionType protectionType,
161                                egl::ContextPriority priority,
162                                VkSemaphore waitSemaphore,
163                                VkPipelineStageFlags waitSemaphoreStageMask,
164                                const QueueSerial &submitQueueSerial);
165 
166     CommandProcessorTask &operator=(CommandProcessorTask &&rhs);
167 
CommandProcessorTask(CommandProcessorTask && other)168     CommandProcessorTask(CommandProcessorTask &&other) : CommandProcessorTask()
169     {
170         *this = std::move(other);
171     }
172 
getSubmitQueueSerial()173     const QueueSerial &getSubmitQueueSerial() const { return mSubmitQueueSerial; }
getTaskCommand()174     CustomTask getTaskCommand() { return mTask; }
getWaitSemaphores()175     std::vector<VkSemaphore> &getWaitSemaphores() { return mWaitSemaphores; }
getWaitSemaphoreStageMasks()176     std::vector<VkPipelineStageFlags> &getWaitSemaphoreStageMasks()
177     {
178         return mWaitSemaphoreStageMasks;
179     }
getSemaphore()180     VkSemaphore getSemaphore() const { return mSemaphore; }
getExternalFence()181     SharedExternalFence &getExternalFence() { return mExternalFence; }
getPriority()182     egl::ContextPriority getPriority() const { return mPriority; }
getProtectionType()183     ProtectionType getProtectionType() const { return mProtectionType; }
getOneOffCommandBuffer()184     VkCommandBuffer getOneOffCommandBuffer() const { return mOneOffCommandBuffer; }
getOneOffWaitSemaphore()185     VkSemaphore getOneOffWaitSemaphore() const { return mOneOffWaitSemaphore; }
getOneOffWaitSemaphoreStageMask()186     VkPipelineStageFlags getOneOffWaitSemaphoreStageMask() const
187     {
188         return mOneOffWaitSemaphoreStageMask;
189     }
getPresentInfo()190     const VkPresentInfoKHR &getPresentInfo() const { return mPresentInfo; }
getSwapchainStatus()191     SwapchainStatus *getSwapchainStatus() const { return mSwapchainStatus; }
getRenderPass()192     const RenderPass *getRenderPass() const { return mRenderPass; }
getOutsideRenderPassCommandBuffer()193     OutsideRenderPassCommandBufferHelper *getOutsideRenderPassCommandBuffer() const
194     {
195         return mOutsideRenderPassCommandBuffer;
196     }
getRenderPassCommandBuffer()197     RenderPassCommandBufferHelper *getRenderPassCommandBuffer() const
198     {
199         return mRenderPassCommandBuffer;
200     }
201 
202   private:
203     void copyPresentInfo(const VkPresentInfoKHR &other);
204 
205     CustomTask mTask;
206 
207     // Wait semaphores
208     std::vector<VkSemaphore> mWaitSemaphores;
209     std::vector<VkPipelineStageFlags> mWaitSemaphoreStageMasks;
210 
211     // ProcessCommands
212     OutsideRenderPassCommandBufferHelper *mOutsideRenderPassCommandBuffer;
213     RenderPassCommandBufferHelper *mRenderPassCommandBuffer;
214     const RenderPass *mRenderPass;
215 
216     // Flush data
217     VkSemaphore mSemaphore;
218     SharedExternalFence mExternalFence;
219 
220     // Flush command data
221     QueueSerial mSubmitQueueSerial;
222 
223     // Present command data
224     VkPresentInfoKHR mPresentInfo;
225     VkSwapchainKHR mSwapchain;
226     VkSemaphore mWaitSemaphore;
227     uint32_t mImageIndex;
228     // Used by Present if supportsIncrementalPresent is enabled
229     VkPresentRegionKHR mPresentRegion;
230     VkPresentRegionsKHR mPresentRegions;
231     std::vector<VkRectLayerKHR> mRects;
232 
233     VkSwapchainPresentFenceInfoEXT mPresentFenceInfo;
234     VkFence mPresentFence;
235 
236     VkSwapchainPresentModeInfoEXT mPresentModeInfo;
237     VkPresentModeKHR mPresentMode;
238 
239     SwapchainStatus *mSwapchainStatus;
240 
241     // Used by OneOffQueueSubmit
242     VkCommandBuffer mOneOffCommandBuffer;
243     VkSemaphore mOneOffWaitSemaphore;
244     VkPipelineStageFlags mOneOffWaitSemaphoreStageMask;
245 
246     // Flush, Present & QueueWaitIdle data
247     egl::ContextPriority mPriority;
248     ProtectionType mProtectionType;
249 };
250 using CommandProcessorTaskQueue =
251     angle::FixedQueue<CommandProcessorTask, kMaxCommandProcessorTasksLimit>;
252 
253 struct CommandBatch final : angle::NonCopyable
254 {
255     CommandBatch();
256     ~CommandBatch();
257     CommandBatch(CommandBatch &&other);
258     CommandBatch &operator=(CommandBatch &&other);
259 
260     void destroy(VkDevice device);
261 
262     bool hasFence() const;
263     void releaseFence();
264     void destroyFence(VkDevice device);
265     VkFence getFenceHandle() const;
266     VkResult getFenceStatus(VkDevice device) const;
267     VkResult waitFence(VkDevice device, uint64_t timeout) const;
268     VkResult waitFenceUnlocked(VkDevice device,
269                                uint64_t timeout,
270                                std::unique_lock<std::mutex> *lock) const;
271 
272     PrimaryCommandBuffer primaryCommands;
273     SecondaryCommandBufferCollector secondaryCommands;
274     SharedFence fence;
275     SharedExternalFence externalFence;
276     QueueSerial queueSerial;
277     ProtectionType protectionType;
278 };
279 using CommandBatchQueue = angle::FixedQueue<CommandBatch, kInFlightCommandsLimit>;
280 
281 class DeviceQueueMap;
282 
283 class QueueFamily final : angle::NonCopyable
284 {
285   public:
286     static const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
287 
288     static uint32_t FindIndex(const std::vector<VkQueueFamilyProperties> &queueFamilyProperties,
289                               VkQueueFlags flags,
290                               int32_t matchNumber,  // 0 = first match, 1 = second match ...
291                               uint32_t *matchCount);
292     static const uint32_t kQueueCount = static_cast<uint32_t>(egl::ContextPriority::EnumCount);
293     static const float kQueuePriorities[static_cast<uint32_t>(egl::ContextPriority::EnumCount)];
294 
QueueFamily()295     QueueFamily() : mProperties{}, mIndex(kInvalidIndex) {}
~QueueFamily()296     ~QueueFamily() {}
297 
298     void initialize(const VkQueueFamilyProperties &queueFamilyProperties, uint32_t index);
valid()299     bool valid() const { return (mIndex != kInvalidIndex); }
getIndex()300     uint32_t getIndex() const { return mIndex; }
getProperties()301     const VkQueueFamilyProperties *getProperties() const { return &mProperties; }
isGraphics()302     bool isGraphics() const { return ((mProperties.queueFlags & VK_QUEUE_GRAPHICS_BIT) > 0); }
isCompute()303     bool isCompute() const { return ((mProperties.queueFlags & VK_QUEUE_COMPUTE_BIT) > 0); }
supportsProtected()304     bool supportsProtected() const
305     {
306         return ((mProperties.queueFlags & VK_QUEUE_PROTECTED_BIT) > 0);
307     }
getDeviceQueueCount()308     uint32_t getDeviceQueueCount() const { return mProperties.queueCount; }
309 
310     DeviceQueueMap initializeQueueMap(VkDevice device,
311                                       bool makeProtected,
312                                       uint32_t queueIndex,
313                                       uint32_t queueCount);
314 
315   private:
316     VkQueueFamilyProperties mProperties;
317     uint32_t mIndex;
318 
319     void getDeviceQueue(VkDevice device, bool makeProtected, uint32_t queueIndex, VkQueue *queue);
320 };
321 
322 class DeviceQueueMap : public angle::PackedEnumMap<egl::ContextPriority, VkQueue>
323 {
324     friend QueueFamily;
325 
326   public:
DeviceQueueMap()327     DeviceQueueMap() : mIndex(QueueFamily::kInvalidIndex), mIsProtected(false) {}
DeviceQueueMap(uint32_t queueFamilyIndex,bool isProtected)328     DeviceQueueMap(uint32_t queueFamilyIndex, bool isProtected)
329         : mIndex(queueFamilyIndex), mIsProtected(isProtected)
330     {}
331     DeviceQueueMap(const DeviceQueueMap &other) = default;
332     ~DeviceQueueMap();
333     DeviceQueueMap &operator=(const DeviceQueueMap &other);
334 
valid()335     bool valid() const { return (mIndex != QueueFamily::kInvalidIndex); }
getIndex()336     uint32_t getIndex() const { return mIndex; }
isProtected()337     bool isProtected() const { return mIsProtected; }
338     egl::ContextPriority getDevicePriority(egl::ContextPriority priority) const;
339 
340   private:
341     uint32_t mIndex;
342     bool mIsProtected;
343     angle::PackedEnumMap<egl::ContextPriority, egl::ContextPriority> mPriorities;
344 };
345 
346 // Note all public APIs of CommandQueue class must be thread safe.
347 class CommandQueue : angle::NonCopyable
348 {
349   public:
350     CommandQueue();
351     ~CommandQueue();
352 
353     angle::Result init(Context *context, const DeviceQueueMap &queueMap);
354     void destroy(Context *context);
355 
356     void handleDeviceLost(RendererVk *renderer);
357 
358     // These public APIs are inherently thread safe. Thread unsafe methods must be protected methods
359     // that are only accessed via ThreadSafeCommandQueue API.
getDriverPriority(egl::ContextPriority priority)360     egl::ContextPriority getDriverPriority(egl::ContextPriority priority) const
361     {
362         return mQueueMap.getDevicePriority(priority);
363     }
getDeviceQueueIndex()364     uint32_t getDeviceQueueIndex() const { return mQueueMap.getIndex(); }
365 
getQueue(egl::ContextPriority priority)366     VkQueue getQueue(egl::ContextPriority priority) const { return mQueueMap[priority]; }
367 
getLastSubmittedSerial(SerialIndex index)368     Serial getLastSubmittedSerial(SerialIndex index) const { return mLastSubmittedSerials[index]; }
369 
370     // The ResourceUse still have unfinished queue serial by ANGLE or vulkan.
hasResourceUseFinished(const ResourceUse & use)371     bool hasResourceUseFinished(const ResourceUse &use) const
372     {
373         return use <= mLastCompletedSerials;
374     }
hasQueueSerialFinished(const QueueSerial & queueSerial)375     bool hasQueueSerialFinished(const QueueSerial &queueSerial) const
376     {
377         return queueSerial <= mLastCompletedSerials;
378     }
379     // The ResourceUse still have queue serial not yet submitted to vulkan.
hasResourceUseSubmitted(const ResourceUse & use)380     bool hasResourceUseSubmitted(const ResourceUse &use) const
381     {
382         return use <= mLastSubmittedSerials;
383     }
hasQueueSerialSubmitted(const QueueSerial & queueSerial)384     bool hasQueueSerialSubmitted(const QueueSerial &queueSerial) const
385     {
386         return queueSerial <= mLastSubmittedSerials;
387     }
388 
389     // Wait until the desired serial has been completed.
390     angle::Result finishResourceUse(Context *context, const ResourceUse &use, uint64_t timeout);
391     angle::Result finishQueueSerial(Context *context,
392                                     const QueueSerial &queueSerial,
393                                     uint64_t timeout);
394     angle::Result waitIdle(Context *context, uint64_t timeout);
395     angle::Result waitForResourceUseToFinishWithUserTimeout(Context *context,
396                                                             const ResourceUse &use,
397                                                             uint64_t timeout,
398                                                             VkResult *result);
399     bool isBusy(RendererVk *renderer) const;
400 
401     angle::Result submitCommands(Context *context,
402                                  ProtectionType protectionType,
403                                  egl::ContextPriority priority,
404                                  VkSemaphore signalSemaphore,
405                                  SharedExternalFence &&externalFence,
406                                  const QueueSerial &submitQueueSerial);
407 
408     angle::Result queueSubmitOneOff(Context *context,
409                                     ProtectionType protectionType,
410                                     egl::ContextPriority contextPriority,
411                                     VkCommandBuffer commandBufferHandle,
412                                     VkSemaphore waitSemaphore,
413                                     VkPipelineStageFlags waitSemaphoreStageMask,
414                                     SubmitPolicy submitPolicy,
415                                     const QueueSerial &submitQueueSerial);
416 
417     // Errors from present is not considered to be fatal.
418     void queuePresent(egl::ContextPriority contextPriority,
419                       const VkPresentInfoKHR &presentInfo,
420                       SwapchainStatus *swapchainStatus);
421 
checkCompletedCommands(Context * context)422     angle::Result checkCompletedCommands(Context *context)
423     {
424         std::lock_guard<std::mutex> lock(mMutex);
425         return checkCompletedCommandsLocked(context);
426     }
427 
hasFinishedCommands()428     bool hasFinishedCommands() const { return !mFinishedCommandBatches.empty(); }
429 
checkAndCleanupCompletedCommands(Context * context)430     angle::Result checkAndCleanupCompletedCommands(Context *context)
431     {
432         ANGLE_TRY(checkCompletedCommands(context));
433 
434         if (!mFinishedCommandBatches.empty())
435         {
436             ANGLE_TRY(retireFinishedCommandsAndCleanupGarbage(context));
437         }
438 
439         return angle::Result::Continue;
440     }
441 
442     void flushWaitSemaphores(ProtectionType protectionType,
443                              egl::ContextPriority priority,
444                              std::vector<VkSemaphore> &&waitSemaphores,
445                              std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks);
446     angle::Result flushOutsideRPCommands(Context *context,
447                                          ProtectionType protectionType,
448                                          egl::ContextPriority priority,
449                                          OutsideRenderPassCommandBufferHelper **outsideRPCommands);
450     angle::Result flushRenderPassCommands(Context *context,
451                                           ProtectionType protectionType,
452                                           egl::ContextPriority priority,
453                                           const RenderPass &renderPass,
454                                           RenderPassCommandBufferHelper **renderPassCommands);
455 
456     const angle::VulkanPerfCounters getPerfCounters() const;
457     void resetPerFramePerfCounters();
458 
459     // Retire finished commands and clean up garbage immediately, or request async clean up if
460     // enabled.
461     angle::Result retireFinishedCommandsAndCleanupGarbage(Context *context);
retireFinishedCommands(Context * context)462     angle::Result retireFinishedCommands(Context *context)
463     {
464         std::lock_guard<std::mutex> lock(mMutex);
465         return retireFinishedCommandsLocked(context);
466     }
467     angle::Result postSubmitCheck(Context *context);
468 
469     // Similar to finishOneCommandBatchAndCleanupImpl(), but returns if no command exists in the
470     // queue.
471     angle::Result finishOneCommandBatchAndCleanup(Context *context,
472                                                   uint64_t timeout,
473                                                   bool *anyFinished);
474 
475     // All these private APIs are called with mutex locked, so we must not take lock again.
476   private:
477     // Check the first command buffer in mInFlightCommands and update mLastCompletedSerials if
478     // finished
479     angle::Result checkOneCommandBatch(Context *context, bool *finished);
480     // Similar to checkOneCommandBatch, except we will wait for it to finish
481     angle::Result finishOneCommandBatchAndCleanupImpl(Context *context, uint64_t timeout);
482     // Walk mFinishedCommands, reset and recycle all command buffers.
483     angle::Result retireFinishedCommandsLocked(Context *context);
484     // Walk mInFlightCommands, check and update mLastCompletedSerials for all commands that are
485     // finished
486     angle::Result checkCompletedCommandsLocked(Context *context);
487 
488     angle::Result queueSubmit(Context *context,
489                               std::unique_lock<std::mutex> &&dequeueLock,
490                               egl::ContextPriority contextPriority,
491                               const VkSubmitInfo &submitInfo,
492                               DeviceScoped<CommandBatch> &commandBatch,
493                               const QueueSerial &submitQueueSerial);
494 
495     angle::Result ensurePrimaryCommandBufferValid(Context *context,
496                                                   ProtectionType protectionType,
497                                                   egl::ContextPriority priority);
498 
499     using CommandsStateMap =
500         angle::PackedEnumMap<egl::ContextPriority,
501                              angle::PackedEnumMap<ProtectionType, CommandsState>>;
502     using PrimaryCommandPoolMap = angle::PackedEnumMap<ProtectionType, PersistentCommandPool>;
503 
initCommandPool(Context * context,ProtectionType protectionType)504     angle::Result initCommandPool(Context *context, ProtectionType protectionType)
505     {
506         PersistentCommandPool &commandPool = mPrimaryCommandPoolMap[protectionType];
507         return commandPool.init(context, protectionType, mQueueMap.getIndex());
508     }
509 
510     // Protect multi-thread access to mInFlightCommands.pop and ensure ordering of submission.
511     mutable std::mutex mMutex;
512     // Protect multi-thread access to mInFlightCommands.push as well as does lock relay for mMutex
513     // so that we can release mMutex while doing potential lengthy vkQueueSubmit and vkQueuePresent
514     // call.
515     std::mutex mQueueSubmitMutex;
516     CommandBatchQueue mInFlightCommands;
517     // Temporary storage for finished command batches that should be reset.
518     angle::FixedQueue<CommandBatch, kMaxFinishedCommandsLimit> mFinishedCommandBatches;
519 
520     CommandsStateMap mCommandsStateMap;
521     // Keeps a free list of reusable primary command buffers.
522     PrimaryCommandPoolMap mPrimaryCommandPoolMap;
523 
524     // Queue serial management.
525     AtomicQueueSerialFixedArray mLastSubmittedSerials;
526     // This queue serial can be read/write from different threads, so we need to use atomic
527     // operations to access the underline value. Since we only do load/store on this value, it
528     // should be just a normal uint64_t load/store on most platforms.
529     AtomicQueueSerialFixedArray mLastCompletedSerials;
530 
531     // QueueMap
532     DeviceQueueMap mQueueMap;
533 
534     FenceRecycler mFenceRecycler;
535 
536     angle::VulkanPerfCounters mPerfCounters;
537 };
538 
539 // CommandProcessor is used to dispatch work to the GPU when the asyncCommandQueue feature is
540 // enabled. Issuing the |destroy| command will cause the worker thread to clean up it's resources
541 // and shut down. This command is sent when the renderer instance shuts down. Tasks are defined by
542 // the CommandQueue interface.
543 
544 class CommandProcessor : public Context
545 {
546   public:
547     CommandProcessor(RendererVk *renderer, CommandQueue *commandQueue);
548     ~CommandProcessor() override;
549 
550     // Context
551     void handleError(VkResult result,
552                      const char *file,
553                      const char *function,
554                      unsigned int line) override;
555 
556     angle::Result init();
557 
558     void destroy(Context *context);
559 
560     void handleDeviceLost(RendererVk *renderer);
561 
562     angle::Result enqueueSubmitCommands(Context *context,
563                                         ProtectionType protectionType,
564                                         egl::ContextPriority priority,
565                                         VkSemaphore signalSemaphore,
566                                         SharedExternalFence &&externalFence,
567                                         const QueueSerial &submitQueueSerial);
568 
569     void requestCommandsAndGarbageCleanup();
570 
571     angle::Result enqueueSubmitOneOffCommands(Context *context,
572                                               ProtectionType protectionType,
573                                               egl::ContextPriority contextPriority,
574                                               VkCommandBuffer commandBufferHandle,
575                                               VkSemaphore waitSemaphore,
576                                               VkPipelineStageFlags waitSemaphoreStageMask,
577                                               SubmitPolicy submitPolicy,
578                                               const QueueSerial &submitQueueSerial);
579     void enqueuePresent(egl::ContextPriority contextPriority,
580                         const VkPresentInfoKHR &presentInfo,
581                         SwapchainStatus *swapchainStatus);
582 
583     angle::Result enqueueFlushWaitSemaphores(
584         ProtectionType protectionType,
585         egl::ContextPriority priority,
586         std::vector<VkSemaphore> &&waitSemaphores,
587         std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks);
588     angle::Result enqueueFlushOutsideRPCommands(
589         Context *context,
590         ProtectionType protectionType,
591         egl::ContextPriority priority,
592         OutsideRenderPassCommandBufferHelper **outsideRPCommands);
593     angle::Result enqueueFlushRenderPassCommands(
594         Context *context,
595         ProtectionType protectionType,
596         egl::ContextPriority priority,
597         const RenderPass &renderPass,
598         RenderPassCommandBufferHelper **renderPassCommands);
599 
600     // Wait until the desired serial has been submitted.
waitForQueueSerialToBeSubmitted(vk::Context * context,const QueueSerial & queueSerial)601     angle::Result waitForQueueSerialToBeSubmitted(vk::Context *context,
602                                                   const QueueSerial &queueSerial)
603     {
604         const ResourceUse use(queueSerial);
605         return waitForResourceUseToBeSubmitted(context, use);
606     }
607     angle::Result waitForResourceUseToBeSubmitted(vk::Context *context, const ResourceUse &use);
608     // Wait for worker thread to submit all outstanding work.
609     angle::Result waitForAllWorkToBeSubmitted(Context *context);
610     // Wait for enqueued present to be submitted.
611     angle::Result waitForPresentToBeSubmitted(SwapchainStatus *swapchainStatus);
612 
isBusy(RendererVk * renderer)613     bool isBusy(RendererVk *renderer) const
614     {
615         std::lock_guard<std::mutex> enqueueLock(mTaskEnqueueMutex);
616         return !mTaskQueue.empty() || mCommandQueue->isBusy(renderer);
617     }
618 
hasResourceUseEnqueued(const ResourceUse & use)619     bool hasResourceUseEnqueued(const ResourceUse &use) const
620     {
621         return use <= mLastEnqueuedSerials;
622     }
hasQueueSerialEnqueued(const QueueSerial & queueSerial)623     bool hasQueueSerialEnqueued(const QueueSerial &queueSerial) const
624     {
625         return queueSerial <= mLastEnqueuedSerials;
626     }
getLastEnqueuedSerial(SerialIndex index)627     Serial getLastEnqueuedSerial(SerialIndex index) const { return mLastEnqueuedSerials[index]; }
628 
629   private:
hasPendingError()630     bool hasPendingError() const
631     {
632         std::lock_guard<std::mutex> queueLock(mErrorMutex);
633         return !mErrors.empty();
634     }
635     angle::Result checkAndPopPendingError(Context *errorHandlingContext);
636 
637     // Entry point for command processor thread, calls processTasksImpl to do the
638     // work. called by RendererVk::initializeDevice on main thread
639     void processTasks();
640 
641     // Called asynchronously from main thread to queue work that is then processed by the worker
642     // thread
643     angle::Result queueCommand(CommandProcessorTask &&task);
644 
645     // Command processor thread, called by processTasks. The loop waits for work to
646     // be submitted from a separate thread.
647     angle::Result processTasksImpl(bool *exitThread);
648 
649     // Command processor thread, process a task
650     angle::Result processTask(CommandProcessorTask *task);
651 
652     VkResult present(egl::ContextPriority priority,
653                      const VkPresentInfoKHR &presentInfo,
654                      SwapchainStatus *swapchainStatus);
655 
656     // The mutex lock that serializes dequeue from mTask and submit to mCommandQueue so that only
657     // one mTaskQueue consumer at a time
658     std::mutex mTaskDequeueMutex;
659 
660     CommandProcessorTaskQueue mTaskQueue;
661     mutable std::mutex mTaskEnqueueMutex;
662     // Signal worker thread when work is available
663     std::condition_variable mWorkAvailableCondition;
664     CommandQueue *const mCommandQueue;
665 
666     // Tracks last serial that was enqueued to mTaskQueue . Note: this maybe different (always equal
667     // or smaller) from mLastSubmittedQueueSerial in CommandQueue since submission from
668     // CommandProcessor to CommandQueue occur in a separate thread.
669     AtomicQueueSerialFixedArray mLastEnqueuedSerials;
670 
671     mutable std::mutex mErrorMutex;
672     std::queue<Error> mErrors;
673 
674     // Command queue worker thread.
675     std::thread mTaskThread;
676     bool mTaskThreadShouldExit;
677     std::atomic<bool> mNeedCommandsAndGarbageCleanup;
678 };
679 }  // namespace vk
680 
681 }  // namespace rx
682 
683 #endif  // LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_
684