• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2020 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // CommandProcessor.cpp:
7 //    Implements the class methods for CommandProcessor.
8 //
9 
10 #include "libANGLE/renderer/vulkan/CommandProcessor.h"
11 #include "libANGLE/renderer/vulkan/RendererVk.h"
12 #include "libANGLE/trace.h"
13 
14 namespace rx
15 {
16 namespace vk
17 {
18 namespace
19 {
20 constexpr size_t kInFlightCommandsLimit = 100u;
21 constexpr bool kOutputVmaStatsString    = false;
22 
InitializeSubmitInfo(VkSubmitInfo * submitInfo,const vk::PrimaryCommandBuffer & commandBuffer,const std::vector<VkSemaphore> & waitSemaphores,const std::vector<VkPipelineStageFlags> & waitSemaphoreStageMasks,const vk::Semaphore * signalSemaphore)23 void InitializeSubmitInfo(VkSubmitInfo *submitInfo,
24                           const vk::PrimaryCommandBuffer &commandBuffer,
25                           const std::vector<VkSemaphore> &waitSemaphores,
26                           const std::vector<VkPipelineStageFlags> &waitSemaphoreStageMasks,
27                           const vk::Semaphore *signalSemaphore)
28 {
29     // Verify that the submitInfo has been zero'd out.
30     ASSERT(submitInfo->signalSemaphoreCount == 0);
31     ASSERT(waitSemaphores.size() == waitSemaphoreStageMasks.size());
32     submitInfo->sType              = VK_STRUCTURE_TYPE_SUBMIT_INFO;
33     submitInfo->commandBufferCount = commandBuffer.valid() ? 1 : 0;
34     submitInfo->pCommandBuffers    = commandBuffer.ptr();
35     submitInfo->waitSemaphoreCount = static_cast<uint32_t>(waitSemaphores.size());
36     submitInfo->pWaitSemaphores    = waitSemaphores.data();
37     submitInfo->pWaitDstStageMask  = waitSemaphoreStageMasks.data();
38 
39     if (signalSemaphore)
40     {
41         submitInfo->signalSemaphoreCount = 1;
42         submitInfo->pSignalSemaphores    = signalSemaphore->ptr();
43     }
44 }
45 
CommandsHaveValidOrdering(const std::vector<vk::CommandBatch> & commands)46 bool CommandsHaveValidOrdering(const std::vector<vk::CommandBatch> &commands)
47 {
48     Serial currentSerial;
49     for (const vk::CommandBatch &commandBatch : commands)
50     {
51         if (commandBatch.serial <= currentSerial)
52         {
53             return false;
54         }
55         currentSerial = commandBatch.serial;
56     }
57 
58     return true;
59 }
60 }  // namespace
61 
newSharedFence(vk::Context * context,vk::Shared<vk::Fence> * sharedFenceOut)62 angle::Result FenceRecycler::newSharedFence(vk::Context *context,
63                                             vk::Shared<vk::Fence> *sharedFenceOut)
64 {
65     bool gotRecycledFence = false;
66     vk::Fence fence;
67     {
68         std::lock_guard<std::mutex> lock(mMutex);
69         if (!mRecyler.empty())
70         {
71             mRecyler.fetch(&fence);
72             gotRecycledFence = true;
73         }
74     }
75 
76     VkDevice device(context->getDevice());
77     if (gotRecycledFence)
78     {
79         ANGLE_VK_TRY(context, fence.reset(device));
80     }
81     else
82     {
83         VkFenceCreateInfo fenceCreateInfo = {};
84         fenceCreateInfo.sType             = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
85         fenceCreateInfo.flags             = 0;
86         ANGLE_VK_TRY(context, fence.init(device, fenceCreateInfo));
87     }
88     sharedFenceOut->assign(device, std::move(fence));
89     return angle::Result::Continue;
90 }
91 
destroy(vk::Context * context)92 void FenceRecycler::destroy(vk::Context *context)
93 {
94     std::lock_guard<std::mutex> lock(mMutex);
95     mRecyler.destroy(context->getDevice());
96 }
97 
98 // CommandProcessorTask implementation
initTask()99 void CommandProcessorTask::initTask()
100 {
101     mTask                        = CustomTask::Invalid;
102     mRenderPass                  = nullptr;
103     mCommandBuffer               = nullptr;
104     mSemaphore                   = nullptr;
105     mOneOffFence                 = nullptr;
106     mPresentInfo                 = {};
107     mPresentInfo.pResults        = nullptr;
108     mPresentInfo.pSwapchains     = nullptr;
109     mPresentInfo.pImageIndices   = nullptr;
110     mPresentInfo.pNext           = nullptr;
111     mPresentInfo.pWaitSemaphores = nullptr;
112     mOneOffCommandBufferVk       = VK_NULL_HANDLE;
113     mPriority                    = egl::ContextPriority::Medium;
114     mHasProtectedContent         = false;
115 }
116 
initProcessCommands(bool hasProtectedContent,CommandBufferHelper * commandBuffer,const RenderPass * renderPass)117 void CommandProcessorTask::initProcessCommands(bool hasProtectedContent,
118                                                CommandBufferHelper *commandBuffer,
119                                                const RenderPass *renderPass)
120 {
121     mTask                = CustomTask::ProcessCommands;
122     mCommandBuffer       = commandBuffer;
123     mRenderPass          = renderPass;
124     mHasProtectedContent = hasProtectedContent;
125 }
126 
copyPresentInfo(const VkPresentInfoKHR & other)127 void CommandProcessorTask::copyPresentInfo(const VkPresentInfoKHR &other)
128 {
129     if (other.sType == 0)
130     {
131         return;
132     }
133 
134     mPresentInfo.sType = other.sType;
135     mPresentInfo.pNext = other.pNext;
136 
137     if (other.swapchainCount > 0)
138     {
139         ASSERT(other.swapchainCount == 1);
140         mPresentInfo.swapchainCount = 1;
141         mSwapchain                  = other.pSwapchains[0];
142         mPresentInfo.pSwapchains    = &mSwapchain;
143         mImageIndex                 = other.pImageIndices[0];
144         mPresentInfo.pImageIndices  = &mImageIndex;
145     }
146 
147     if (other.waitSemaphoreCount > 0)
148     {
149         ASSERT(other.waitSemaphoreCount == 1);
150         mPresentInfo.waitSemaphoreCount = 1;
151         mWaitSemaphore                  = other.pWaitSemaphores[0];
152         mPresentInfo.pWaitSemaphores    = &mWaitSemaphore;
153     }
154 
155     mPresentInfo.pResults = other.pResults;
156 
157     void *pNext = const_cast<void *>(other.pNext);
158     while (pNext != nullptr)
159     {
160         VkStructureType sType = *reinterpret_cast<VkStructureType *>(pNext);
161         switch (sType)
162         {
163             case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
164             {
165                 const VkPresentRegionsKHR *presentRegions =
166                     reinterpret_cast<VkPresentRegionsKHR *>(pNext);
167                 mPresentRegion = *presentRegions->pRegions;
168                 mRects.resize(mPresentRegion.rectangleCount);
169                 for (uint32_t i = 0; i < mPresentRegion.rectangleCount; i++)
170                 {
171                     mRects[i] = presentRegions->pRegions->pRectangles[i];
172                 }
173                 mPresentRegion.pRectangles = mRects.data();
174 
175                 mPresentRegions.sType          = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR;
176                 mPresentRegions.pNext          = presentRegions->pNext;
177                 mPresentRegions.swapchainCount = 1;
178                 mPresentRegions.pRegions       = &mPresentRegion;
179                 mPresentInfo.pNext             = &mPresentRegions;
180                 pNext                          = const_cast<void *>(presentRegions->pNext);
181                 break;
182             }
183             default:
184                 ERR() << "Unknown sType: " << sType << " in VkPresentInfoKHR.pNext chain";
185                 UNREACHABLE();
186                 break;
187         }
188     }
189 }
190 
initPresent(egl::ContextPriority priority,const VkPresentInfoKHR & presentInfo)191 void CommandProcessorTask::initPresent(egl::ContextPriority priority,
192                                        const VkPresentInfoKHR &presentInfo)
193 {
194     mTask     = CustomTask::Present;
195     mPriority = priority;
196     copyPresentInfo(presentInfo);
197 }
198 
initFinishToSerial(Serial serial)199 void CommandProcessorTask::initFinishToSerial(Serial serial)
200 {
201     // Note: sometimes the serial is not valid and that's okay, the finish will early exit in the
202     // TaskProcessor::finishToSerial
203     mTask   = CustomTask::FinishToSerial;
204     mSerial = serial;
205 }
206 
initFlushAndQueueSubmit(const std::vector<VkSemaphore> & waitSemaphores,const std::vector<VkPipelineStageFlags> & waitSemaphoreStageMasks,const Semaphore * semaphore,bool hasProtectedContent,egl::ContextPriority priority,GarbageList && currentGarbage,Serial submitQueueSerial)207 void CommandProcessorTask::initFlushAndQueueSubmit(
208     const std::vector<VkSemaphore> &waitSemaphores,
209     const std::vector<VkPipelineStageFlags> &waitSemaphoreStageMasks,
210     const Semaphore *semaphore,
211     bool hasProtectedContent,
212     egl::ContextPriority priority,
213     GarbageList &&currentGarbage,
214     Serial submitQueueSerial)
215 {
216     mTask                    = CustomTask::FlushAndQueueSubmit;
217     mWaitSemaphores          = waitSemaphores;
218     mWaitSemaphoreStageMasks = waitSemaphoreStageMasks;
219     mSemaphore               = semaphore;
220     mGarbage                 = std::move(currentGarbage);
221     mPriority                = priority;
222     mHasProtectedContent     = hasProtectedContent;
223     mSerial                  = submitQueueSerial;
224 }
225 
initOneOffQueueSubmit(VkCommandBuffer commandBufferHandle,bool hasProtectedContent,egl::ContextPriority priority,const Fence * fence,Serial submitQueueSerial)226 void CommandProcessorTask::initOneOffQueueSubmit(VkCommandBuffer commandBufferHandle,
227                                                  bool hasProtectedContent,
228                                                  egl::ContextPriority priority,
229                                                  const Fence *fence,
230                                                  Serial submitQueueSerial)
231 {
232     mTask                  = CustomTask::OneOffQueueSubmit;
233     mOneOffCommandBufferVk = commandBufferHandle;
234     mOneOffFence           = fence;
235     mPriority              = priority;
236     mHasProtectedContent   = hasProtectedContent;
237     mSerial                = submitQueueSerial;
238 }
239 
operator =(CommandProcessorTask && rhs)240 CommandProcessorTask &CommandProcessorTask::operator=(CommandProcessorTask &&rhs)
241 {
242     if (this == &rhs)
243     {
244         return *this;
245     }
246 
247     std::swap(mRenderPass, rhs.mRenderPass);
248     std::swap(mCommandBuffer, rhs.mCommandBuffer);
249     std::swap(mTask, rhs.mTask);
250     std::swap(mWaitSemaphores, rhs.mWaitSemaphores);
251     std::swap(mWaitSemaphoreStageMasks, rhs.mWaitSemaphoreStageMasks);
252     std::swap(mSemaphore, rhs.mSemaphore);
253     std::swap(mOneOffFence, rhs.mOneOffFence);
254     std::swap(mGarbage, rhs.mGarbage);
255     std::swap(mSerial, rhs.mSerial);
256     std::swap(mPriority, rhs.mPriority);
257     std::swap(mHasProtectedContent, rhs.mHasProtectedContent);
258     std::swap(mOneOffCommandBufferVk, rhs.mOneOffCommandBufferVk);
259 
260     copyPresentInfo(rhs.mPresentInfo);
261 
262     // clear rhs now that everything has moved.
263     rhs.initTask();
264 
265     return *this;
266 }
267 
268 // CommandBatch implementation.
269 CommandBatch::CommandBatch() = default;
270 
271 CommandBatch::~CommandBatch() = default;
272 
CommandBatch(CommandBatch && other)273 CommandBatch::CommandBatch(CommandBatch &&other)
274 {
275     *this = std::move(other);
276 }
277 
operator =(CommandBatch && other)278 CommandBatch &CommandBatch::operator=(CommandBatch &&other)
279 {
280     std::swap(primaryCommands, other.primaryCommands);
281     std::swap(commandPool, other.commandPool);
282     std::swap(fence, other.fence);
283     std::swap(serial, other.serial);
284     std::swap(hasProtectedContent, other.hasProtectedContent);
285     return *this;
286 }
287 
destroy(VkDevice device)288 void CommandBatch::destroy(VkDevice device)
289 {
290     primaryCommands.destroy(device);
291     commandPool.destroy(device);
292     fence.reset(device);
293     hasProtectedContent = false;
294 }
295 
296 // CommandProcessor implementation.
handleError(VkResult errorCode,const char * file,const char * function,unsigned int line)297 void CommandProcessor::handleError(VkResult errorCode,
298                                    const char *file,
299                                    const char *function,
300                                    unsigned int line)
301 {
302     ASSERT(errorCode != VK_SUCCESS);
303 
304     std::stringstream errorStream;
305     errorStream << "Internal Vulkan error (" << errorCode << "): " << VulkanResultString(errorCode)
306                 << ".";
307 
308     if (errorCode == VK_ERROR_DEVICE_LOST)
309     {
310         WARN() << errorStream.str();
311         handleDeviceLost(mRenderer);
312     }
313 
314     std::lock_guard<std::mutex> queueLock(mErrorMutex);
315     Error error = {errorCode, file, function, line};
316     mErrors.emplace(error);
317 }
318 
CommandProcessor(RendererVk * renderer)319 CommandProcessor::CommandProcessor(RendererVk *renderer)
320     : Context(renderer), mWorkerThreadIdle(false)
321 {
322     std::lock_guard<std::mutex> queueLock(mErrorMutex);
323     while (!mErrors.empty())
324     {
325         mErrors.pop();
326     }
327 }
328 
329 CommandProcessor::~CommandProcessor() = default;
330 
checkAndPopPendingError(Context * errorHandlingContext)331 angle::Result CommandProcessor::checkAndPopPendingError(Context *errorHandlingContext)
332 {
333     std::lock_guard<std::mutex> queueLock(mErrorMutex);
334     if (mErrors.empty())
335     {
336         return angle::Result::Continue;
337     }
338     else
339     {
340         Error err = mErrors.front();
341         mErrors.pop();
342         errorHandlingContext->handleError(err.errorCode, err.file, err.function, err.line);
343         return angle::Result::Stop;
344     }
345 }
346 
queueCommand(CommandProcessorTask && task)347 void CommandProcessor::queueCommand(CommandProcessorTask &&task)
348 {
349     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::queueCommand");
350     // Grab the worker mutex so that we put things on the queue in the same order as we give out
351     // serials.
352     std::lock_guard<std::mutex> queueLock(mWorkerMutex);
353 
354     mTasks.emplace(std::move(task));
355     mWorkAvailableCondition.notify_one();
356 }
357 
processTasks()358 void CommandProcessor::processTasks()
359 {
360     while (true)
361     {
362         bool exitThread      = false;
363         angle::Result result = processTasksImpl(&exitThread);
364         if (exitThread)
365         {
366             // We are doing a controlled exit of the thread, break out of the while loop.
367             break;
368         }
369         if (result != angle::Result::Continue)
370         {
371             // TODO: https://issuetracker.google.com/issues/170311829 - follow-up on error handling
372             // ContextVk::commandProcessorSyncErrorsAndQueueCommand and WindowSurfaceVk::destroy
373             // do error processing, is anything required here? Don't think so, mostly need to
374             // continue the worker thread until it's been told to exit.
375             UNREACHABLE();
376         }
377     }
378 }
379 
processTasksImpl(bool * exitThread)380 angle::Result CommandProcessor::processTasksImpl(bool *exitThread)
381 {
382     while (true)
383     {
384         std::unique_lock<std::mutex> lock(mWorkerMutex);
385         if (mTasks.empty())
386         {
387             mWorkerThreadIdle = true;
388             mWorkerIdleCondition.notify_all();
389             // Only wake if notified and command queue is not empty
390             mWorkAvailableCondition.wait(lock, [this] { return !mTasks.empty(); });
391         }
392         mWorkerThreadIdle = false;
393         CommandProcessorTask task(std::move(mTasks.front()));
394         mTasks.pop();
395         lock.unlock();
396 
397         ANGLE_TRY(processTask(&task));
398         if (task.getTaskCommand() == CustomTask::Exit)
399         {
400 
401             *exitThread = true;
402             lock.lock();
403             mWorkerThreadIdle = true;
404             mWorkerIdleCondition.notify_one();
405             return angle::Result::Continue;
406         }
407     }
408 
409     UNREACHABLE();
410     return angle::Result::Stop;
411 }
412 
processTask(CommandProcessorTask * task)413 angle::Result CommandProcessor::processTask(CommandProcessorTask *task)
414 {
415     switch (task->getTaskCommand())
416     {
417         case CustomTask::Exit:
418         {
419             ANGLE_TRY(mCommandQueue.finishToSerial(this, Serial::Infinite(),
420                                                    mRenderer->getMaxFenceWaitTimeNs()));
421             // Shutting down so cleanup
422             mCommandQueue.destroy(this);
423             mCommandPool.destroy(mRenderer->getDevice());
424             break;
425         }
426         case CustomTask::FlushAndQueueSubmit:
427         {
428             ANGLE_TRACE_EVENT0("gpu.angle", "processTask::FlushAndQueueSubmit");
429             // End command buffer
430 
431             // Call submitFrame()
432             ANGLE_TRY(mCommandQueue.submitFrame(
433                 this, task->hasProtectedContent(), task->getPriority(), task->getWaitSemaphores(),
434                 task->getWaitSemaphoreStageMasks(), task->getSemaphore(),
435                 std::move(task->getGarbage()), &mCommandPool, task->getQueueSerial()));
436 
437             ASSERT(task->getGarbage().empty());
438             break;
439         }
440         case CustomTask::OneOffQueueSubmit:
441         {
442             ANGLE_TRACE_EVENT0("gpu.angle", "processTask::OneOffQueueSubmit");
443 
444             ANGLE_TRY(mCommandQueue.queueSubmitOneOff(
445                 this, task->hasProtectedContent(), task->getPriority(),
446                 task->getOneOffCommandBufferVk(), task->getOneOffFence(),
447                 SubmitPolicy::EnsureSubmitted, task->getQueueSerial()));
448             ANGLE_TRY(mCommandQueue.checkCompletedCommands(this));
449             break;
450         }
451         case CustomTask::FinishToSerial:
452         {
453             ANGLE_TRY(mCommandQueue.finishToSerial(this, task->getQueueSerial(),
454                                                    mRenderer->getMaxFenceWaitTimeNs()));
455             break;
456         }
457         case CustomTask::Present:
458         {
459             VkResult result = present(task->getPriority(), task->getPresentInfo());
460             if (ANGLE_UNLIKELY(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR))
461             {
462                 // We get to ignore these as they are not fatal
463             }
464             else if (ANGLE_UNLIKELY(result != VK_SUCCESS))
465             {
466                 // Save the error so that we can handle it.
467                 // Don't leave processing loop, don't consider errors from present to be fatal.
468                 // TODO: https://issuetracker.google.com/issues/170329600 - This needs to improve to
469                 // properly parallelize present
470                 handleError(result, __FILE__, __FUNCTION__, __LINE__);
471             }
472             break;
473         }
474         case CustomTask::ProcessCommands:
475         {
476             ASSERT(!task->getCommandBuffer()->empty());
477 
478             CommandBufferHelper *commandBuffer = task->getCommandBuffer();
479             if (task->getRenderPass())
480             {
481                 ANGLE_TRY(mCommandQueue.flushRenderPassCommands(
482                     this, task->hasProtectedContent(), *task->getRenderPass(), &commandBuffer));
483             }
484             else
485             {
486                 ANGLE_TRY(mCommandQueue.flushOutsideRPCommands(this, task->hasProtectedContent(),
487                                                                &commandBuffer));
488             }
489             ASSERT(task->getCommandBuffer()->empty());
490             mRenderer->recycleCommandBufferHelper(task->getCommandBuffer());
491             break;
492         }
493         case CustomTask::CheckCompletedCommands:
494         {
495             ANGLE_TRY(mCommandQueue.checkCompletedCommands(this));
496             break;
497         }
498         default:
499             UNREACHABLE();
500             break;
501     }
502 
503     return angle::Result::Continue;
504 }
505 
checkCompletedCommands(Context * context)506 angle::Result CommandProcessor::checkCompletedCommands(Context *context)
507 {
508     ANGLE_TRY(checkAndPopPendingError(context));
509 
510     CommandProcessorTask checkCompletedTask;
511     checkCompletedTask.initTask(CustomTask::CheckCompletedCommands);
512     queueCommand(std::move(checkCompletedTask));
513 
514     return angle::Result::Continue;
515 }
516 
waitForWorkComplete(Context * context)517 angle::Result CommandProcessor::waitForWorkComplete(Context *context)
518 {
519     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::waitForWorkComplete");
520     std::unique_lock<std::mutex> lock(mWorkerMutex);
521     mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
522     // Worker thread is idle and command queue is empty so good to continue
523 
524     // Sync any errors to the context
525     bool shouldStop = hasPendingError();
526     while (hasPendingError())
527     {
528         (void)checkAndPopPendingError(context);
529     }
530     return shouldStop ? angle::Result::Stop : angle::Result::Continue;
531 }
532 
init(Context * context,const DeviceQueueMap & queueMap)533 angle::Result CommandProcessor::init(Context *context, const DeviceQueueMap &queueMap)
534 {
535     ANGLE_TRY(mCommandQueue.init(context, queueMap));
536 
537     mTaskThread = std::thread(&CommandProcessor::processTasks, this);
538 
539     return angle::Result::Continue;
540 }
541 
destroy(Context * context)542 void CommandProcessor::destroy(Context *context)
543 {
544     CommandProcessorTask endTask;
545     endTask.initTask(CustomTask::Exit);
546     queueCommand(std::move(endTask));
547     (void)waitForWorkComplete(context);
548     if (mTaskThread.joinable())
549     {
550         mTaskThread.join();
551     }
552 }
553 
getLastCompletedQueueSerial() const554 Serial CommandProcessor::getLastCompletedQueueSerial() const
555 {
556     std::lock_guard<std::mutex> lock(mQueueSerialMutex);
557     return mCommandQueue.getLastCompletedQueueSerial();
558 }
559 
getLastSubmittedQueueSerial() const560 Serial CommandProcessor::getLastSubmittedQueueSerial() const
561 {
562     std::lock_guard<std::mutex> lock(mQueueSerialMutex);
563     return mCommandQueue.getLastSubmittedQueueSerial();
564 }
565 
getCurrentQueueSerial() const566 Serial CommandProcessor::getCurrentQueueSerial() const
567 {
568     std::lock_guard<std::mutex> lock(mQueueSerialMutex);
569     return mCommandQueue.getCurrentQueueSerial();
570 }
571 
reserveSubmitSerial()572 Serial CommandProcessor::reserveSubmitSerial()
573 {
574     std::lock_guard<std::mutex> lock(mQueueSerialMutex);
575     return mCommandQueue.reserveSubmitSerial();
576 }
577 
578 // Wait until all commands up to and including serial have been processed
finishToSerial(Context * context,Serial serial,uint64_t timeout)579 angle::Result CommandProcessor::finishToSerial(Context *context, Serial serial, uint64_t timeout)
580 {
581     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::finishToSerial");
582 
583     ANGLE_TRY(checkAndPopPendingError(context));
584 
585     CommandProcessorTask task;
586     task.initFinishToSerial(serial);
587     queueCommand(std::move(task));
588 
589     // Wait until the worker is idle. At that point we know that the finishToSerial command has
590     // completed executing, including any associated state cleanup.
591     return waitForWorkComplete(context);
592 }
593 
handleDeviceLost(RendererVk * renderer)594 void CommandProcessor::handleDeviceLost(RendererVk *renderer)
595 {
596     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::handleDeviceLost");
597     std::unique_lock<std::mutex> lock(mWorkerMutex);
598     mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
599 
600     // Worker thread is idle and command queue is empty so good to continue
601     mCommandQueue.handleDeviceLost(renderer);
602 }
603 
finishAllWork(Context * context)604 angle::Result CommandProcessor::finishAllWork(Context *context)
605 {
606     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::finishAllWork");
607     // Wait for GPU work to finish
608     return finishToSerial(context, Serial::Infinite(), mRenderer->getMaxFenceWaitTimeNs());
609 }
610 
getLastAndClearPresentResult(VkSwapchainKHR swapchain)611 VkResult CommandProcessor::getLastAndClearPresentResult(VkSwapchainKHR swapchain)
612 {
613     std::unique_lock<std::mutex> lock(mSwapchainStatusMutex);
614     if (mSwapchainStatus.find(swapchain) == mSwapchainStatus.end())
615     {
616         // Wake when required swapchain status becomes available
617         mSwapchainStatusCondition.wait(lock, [this, swapchain] {
618             return mSwapchainStatus.find(swapchain) != mSwapchainStatus.end();
619         });
620     }
621     VkResult result = mSwapchainStatus[swapchain];
622     mSwapchainStatus.erase(swapchain);
623     return result;
624 }
625 
present(egl::ContextPriority priority,const VkPresentInfoKHR & presentInfo)626 VkResult CommandProcessor::present(egl::ContextPriority priority,
627                                    const VkPresentInfoKHR &presentInfo)
628 {
629     std::lock_guard<std::mutex> lock(mSwapchainStatusMutex);
630     ANGLE_TRACE_EVENT0("gpu.angle", "vkQueuePresentKHR");
631     VkResult result = mCommandQueue.queuePresent(priority, presentInfo);
632 
633     // Verify that we are presenting one and only one swapchain
634     ASSERT(presentInfo.swapchainCount == 1);
635     ASSERT(presentInfo.pResults == nullptr);
636     mSwapchainStatus[presentInfo.pSwapchains[0]] = result;
637 
638     mSwapchainStatusCondition.notify_all();
639 
640     return result;
641 }
642 
submitFrame(Context * context,bool hasProtectedContent,egl::ContextPriority priority,const std::vector<VkSemaphore> & waitSemaphores,const std::vector<VkPipelineStageFlags> & waitSemaphoreStageMasks,const Semaphore * signalSemaphore,GarbageList && currentGarbage,CommandPool * commandPool,Serial submitQueueSerial)643 angle::Result CommandProcessor::submitFrame(
644     Context *context,
645     bool hasProtectedContent,
646     egl::ContextPriority priority,
647     const std::vector<VkSemaphore> &waitSemaphores,
648     const std::vector<VkPipelineStageFlags> &waitSemaphoreStageMasks,
649     const Semaphore *signalSemaphore,
650     GarbageList &&currentGarbage,
651     CommandPool *commandPool,
652     Serial submitQueueSerial)
653 {
654     ANGLE_TRY(checkAndPopPendingError(context));
655 
656     CommandProcessorTask task;
657     task.initFlushAndQueueSubmit(waitSemaphores, waitSemaphoreStageMasks, signalSemaphore,
658                                  hasProtectedContent, priority, std::move(currentGarbage),
659                                  submitQueueSerial);
660 
661     queueCommand(std::move(task));
662 
663     return angle::Result::Continue;
664 }
665 
queueSubmitOneOff(Context * context,bool hasProtectedContent,egl::ContextPriority contextPriority,VkCommandBuffer commandBufferHandle,const Fence * fence,SubmitPolicy submitPolicy,Serial submitQueueSerial)666 angle::Result CommandProcessor::queueSubmitOneOff(Context *context,
667                                                   bool hasProtectedContent,
668                                                   egl::ContextPriority contextPriority,
669                                                   VkCommandBuffer commandBufferHandle,
670                                                   const Fence *fence,
671                                                   SubmitPolicy submitPolicy,
672                                                   Serial submitQueueSerial)
673 {
674     ANGLE_TRY(checkAndPopPendingError(context));
675 
676     CommandProcessorTask task;
677     task.initOneOffQueueSubmit(commandBufferHandle, hasProtectedContent, contextPriority, fence,
678                                submitQueueSerial);
679     queueCommand(std::move(task));
680     if (submitPolicy == SubmitPolicy::EnsureSubmitted)
681     {
682         // Caller has synchronization requirement to have work in GPU pipe when returning from this
683         // function.
684         ANGLE_TRY(waitForWorkComplete(context));
685     }
686 
687     return angle::Result::Continue;
688 }
689 
queuePresent(egl::ContextPriority contextPriority,const VkPresentInfoKHR & presentInfo)690 VkResult CommandProcessor::queuePresent(egl::ContextPriority contextPriority,
691                                         const VkPresentInfoKHR &presentInfo)
692 {
693     CommandProcessorTask task;
694     task.initPresent(contextPriority, presentInfo);
695 
696     ANGLE_TRACE_EVENT0("gpu.angle", "CommandProcessor::queuePresent");
697     queueCommand(std::move(task));
698 
699     // Always return success, when we call acquireNextImage we'll check the return code. This
700     // allows the app to continue working until we really need to know the return code from
701     // present.
702     return VK_SUCCESS;
703 }
704 
waitForSerialWithUserTimeout(vk::Context * context,Serial serial,uint64_t timeout,VkResult * result)705 angle::Result CommandProcessor::waitForSerialWithUserTimeout(vk::Context *context,
706                                                              Serial serial,
707                                                              uint64_t timeout,
708                                                              VkResult *result)
709 {
710     // If finishToSerial times out we generate an error. Therefore we a large timeout.
711     // TODO: https://issuetracker.google.com/170312581 - Wait with timeout.
712     return finishToSerial(context, serial, mRenderer->getMaxFenceWaitTimeNs());
713 }
714 
flushOutsideRPCommands(Context * context,bool hasProtectedContent,CommandBufferHelper ** outsideRPCommands)715 angle::Result CommandProcessor::flushOutsideRPCommands(Context *context,
716                                                        bool hasProtectedContent,
717                                                        CommandBufferHelper **outsideRPCommands)
718 {
719     ANGLE_TRY(checkAndPopPendingError(context));
720 
721     (*outsideRPCommands)->markClosed();
722     CommandProcessorTask task;
723     task.initProcessCommands(hasProtectedContent, *outsideRPCommands, nullptr);
724     queueCommand(std::move(task));
725     *outsideRPCommands = mRenderer->getCommandBufferHelper(false);
726 
727     return angle::Result::Continue;
728 }
729 
flushRenderPassCommands(Context * context,bool hasProtectedContent,const RenderPass & renderPass,CommandBufferHelper ** renderPassCommands)730 angle::Result CommandProcessor::flushRenderPassCommands(Context *context,
731                                                         bool hasProtectedContent,
732                                                         const RenderPass &renderPass,
733                                                         CommandBufferHelper **renderPassCommands)
734 {
735     ANGLE_TRY(checkAndPopPendingError(context));
736 
737     (*renderPassCommands)->markClosed();
738     CommandProcessorTask task;
739     task.initProcessCommands(hasProtectedContent, *renderPassCommands, &renderPass);
740     queueCommand(std::move(task));
741     *renderPassCommands = mRenderer->getCommandBufferHelper(true);
742 
743     return angle::Result::Continue;
744 }
745 
746 // CommandQueue implementation.
CommandQueue()747 CommandQueue::CommandQueue() : mCurrentQueueSerial(mQueueSerialFactory.generate()) {}
748 
749 CommandQueue::~CommandQueue() = default;
750 
destroy(Context * context)751 void CommandQueue::destroy(Context *context)
752 {
753     // Force all commands to finish by flushing all queues.
754     for (VkQueue queue : mQueueMap)
755     {
756         if (queue != VK_NULL_HANDLE)
757         {
758             vkQueueWaitIdle(queue);
759         }
760     }
761 
762     RendererVk *renderer = context->getRenderer();
763 
764     mLastCompletedQueueSerial = Serial::Infinite();
765     (void)clearAllGarbage(renderer);
766 
767     mPrimaryCommands.destroy(renderer->getDevice());
768     mPrimaryCommandPool.destroy(renderer->getDevice());
769 
770     if (mProtectedCommandPool.valid())
771     {
772         mProtectedCommands.destroy(renderer->getDevice());
773         mProtectedCommandPool.destroy(renderer->getDevice());
774     }
775 
776     mFenceRecycler.destroy(context);
777 
778     ASSERT(mInFlightCommands.empty() && mGarbageQueue.empty());
779 }
780 
init(Context * context,const vk::DeviceQueueMap & queueMap)781 angle::Result CommandQueue::init(Context *context, const vk::DeviceQueueMap &queueMap)
782 {
783     // Initialize the command pool now that we know the queue family index.
784     ANGLE_TRY(mPrimaryCommandPool.init(context, false, queueMap.getIndex()));
785     mQueueMap = queueMap;
786 
787     if (queueMap.isProtected())
788     {
789         ANGLE_TRY(mProtectedCommandPool.init(context, true, queueMap.getIndex()));
790     }
791 
792     return angle::Result::Continue;
793 }
794 
checkCompletedCommands(Context * context)795 angle::Result CommandQueue::checkCompletedCommands(Context *context)
796 {
797     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::checkCompletedCommandsNoLock");
798     RendererVk *renderer = context->getRenderer();
799     VkDevice device      = renderer->getDevice();
800 
801     int finishedCount = 0;
802 
803     for (CommandBatch &batch : mInFlightCommands)
804     {
805         VkResult result = batch.fence.get().getStatus(device);
806         if (result == VK_NOT_READY)
807         {
808             break;
809         }
810         ANGLE_VK_TRY(context, result);
811         ++finishedCount;
812     }
813 
814     if (finishedCount == 0)
815     {
816         return angle::Result::Continue;
817     }
818 
819     return retireFinishedCommands(context, finishedCount);
820 }
821 
retireFinishedCommands(Context * context,size_t finishedCount)822 angle::Result CommandQueue::retireFinishedCommands(Context *context, size_t finishedCount)
823 {
824     ASSERT(finishedCount > 0);
825 
826     RendererVk *renderer = context->getRenderer();
827     VkDevice device      = renderer->getDevice();
828 
829     for (size_t commandIndex = 0; commandIndex < finishedCount; ++commandIndex)
830     {
831         CommandBatch &batch = mInFlightCommands[commandIndex];
832 
833         mLastCompletedQueueSerial = batch.serial;
834         mFenceRecycler.resetSharedFence(&batch.fence);
835         ANGLE_TRACE_EVENT0("gpu.angle", "command buffer recycling");
836         batch.commandPool.destroy(device);
837         PersistentCommandPool &commandPool = getCommandPool(batch.hasProtectedContent);
838         ANGLE_TRY(commandPool.collect(context, std::move(batch.primaryCommands)));
839     }
840 
841     if (finishedCount > 0)
842     {
843         auto beginIter = mInFlightCommands.begin();
844         mInFlightCommands.erase(beginIter, beginIter + finishedCount);
845     }
846 
847     size_t freeIndex = 0;
848     for (; freeIndex < mGarbageQueue.size(); ++freeIndex)
849     {
850         GarbageAndSerial &garbageList = mGarbageQueue[freeIndex];
851         if (garbageList.getSerial() < mLastCompletedQueueSerial)
852         {
853             for (GarbageObject &garbage : garbageList.get())
854             {
855                 garbage.destroy(renderer);
856             }
857         }
858         else
859         {
860             break;
861         }
862     }
863 
864     // Remove the entries from the garbage list - they should be ready to go.
865     if (freeIndex > 0)
866     {
867         mGarbageQueue.erase(mGarbageQueue.begin(), mGarbageQueue.begin() + freeIndex);
868     }
869 
870     return angle::Result::Continue;
871 }
872 
releaseToCommandBatch(Context * context,bool hasProtectedContent,PrimaryCommandBuffer && commandBuffer,CommandPool * commandPool,CommandBatch * batch)873 angle::Result CommandQueue::releaseToCommandBatch(Context *context,
874                                                   bool hasProtectedContent,
875                                                   PrimaryCommandBuffer &&commandBuffer,
876                                                   CommandPool *commandPool,
877                                                   CommandBatch *batch)
878 {
879     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::releaseToCommandBatch");
880 
881     RendererVk *renderer = context->getRenderer();
882     VkDevice device      = renderer->getDevice();
883 
884     batch->primaryCommands = std::move(commandBuffer);
885 
886     if (commandPool->valid())
887     {
888         batch->commandPool = std::move(*commandPool);
889         // Recreate CommandPool
890         VkCommandPoolCreateInfo poolInfo = {};
891         poolInfo.sType                   = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
892         poolInfo.flags                   = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
893         poolInfo.queueFamilyIndex        = mQueueMap.getIndex();
894         if (hasProtectedContent)
895         {
896             poolInfo.flags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
897         }
898         batch->hasProtectedContent = hasProtectedContent;
899         ANGLE_VK_TRY(context, commandPool->init(device, poolInfo));
900     }
901 
902     return angle::Result::Continue;
903 }
904 
clearAllGarbage(RendererVk * renderer)905 void CommandQueue::clearAllGarbage(RendererVk *renderer)
906 {
907     for (GarbageAndSerial &garbageList : mGarbageQueue)
908     {
909         for (GarbageObject &garbage : garbageList.get())
910         {
911             garbage.destroy(renderer);
912         }
913     }
914     mGarbageQueue.clear();
915 }
916 
handleDeviceLost(RendererVk * renderer)917 void CommandQueue::handleDeviceLost(RendererVk *renderer)
918 {
919     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::handleDeviceLost");
920 
921     VkDevice device = renderer->getDevice();
922 
923     for (CommandBatch &batch : mInFlightCommands)
924     {
925         // On device loss we need to wait for fence to be signaled before destroying it
926         VkResult status = batch.fence.get().wait(device, renderer->getMaxFenceWaitTimeNs());
927         // If the wait times out, it is probably not possible to recover from lost device
928         ASSERT(status == VK_SUCCESS || status == VK_ERROR_DEVICE_LOST);
929 
930         // On device lost, here simply destroy the CommandBuffer, it will fully cleared later
931         // by CommandPool::destroy
932         batch.primaryCommands.destroy(device);
933 
934         batch.commandPool.destroy(device);
935         batch.fence.reset(device);
936     }
937     mInFlightCommands.clear();
938 }
939 
allInFlightCommandsAreAfterSerial(Serial serial)940 bool CommandQueue::allInFlightCommandsAreAfterSerial(Serial serial)
941 {
942     return mInFlightCommands.empty() || mInFlightCommands[0].serial > serial;
943 }
944 
finishToSerial(Context * context,Serial finishSerial,uint64_t timeout)945 angle::Result CommandQueue::finishToSerial(Context *context, Serial finishSerial, uint64_t timeout)
946 {
947     if (mInFlightCommands.empty())
948     {
949         return angle::Result::Continue;
950     }
951 
952     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::finishToSerial");
953 
954     // Find the serial in the the list. The serials should be in order.
955     ASSERT(CommandsHaveValidOrdering(mInFlightCommands));
956 
957     size_t finishedCount = 0;
958     while (finishedCount < mInFlightCommands.size() &&
959            mInFlightCommands[finishedCount].serial <= finishSerial)
960     {
961         finishedCount++;
962     }
963 
964     if (finishedCount == 0)
965     {
966         return angle::Result::Continue;
967     }
968 
969     const CommandBatch &batch = mInFlightCommands[finishedCount - 1];
970 
971     // Wait for it finish
972     VkDevice device = context->getDevice();
973     VkResult status = batch.fence.get().wait(device, timeout);
974 
975     ANGLE_VK_TRY(context, status);
976 
977     // Clean up finished batches.
978     ANGLE_TRY(retireFinishedCommands(context, finishedCount));
979     ASSERT(allInFlightCommandsAreAfterSerial(finishSerial));
980 
981     return angle::Result::Continue;
982 }
983 
reserveSubmitSerial()984 Serial CommandQueue::reserveSubmitSerial()
985 {
986     Serial returnSerial = mCurrentQueueSerial;
987     mCurrentQueueSerial = mQueueSerialFactory.generate();
988     return returnSerial;
989 }
990 
submitFrame(Context * context,bool hasProtectedContent,egl::ContextPriority priority,const std::vector<VkSemaphore> & waitSemaphores,const std::vector<VkPipelineStageFlags> & waitSemaphoreStageMasks,const Semaphore * signalSemaphore,GarbageList && currentGarbage,CommandPool * commandPool,Serial submitQueueSerial)991 angle::Result CommandQueue::submitFrame(
992     Context *context,
993     bool hasProtectedContent,
994     egl::ContextPriority priority,
995     const std::vector<VkSemaphore> &waitSemaphores,
996     const std::vector<VkPipelineStageFlags> &waitSemaphoreStageMasks,
997     const Semaphore *signalSemaphore,
998     GarbageList &&currentGarbage,
999     CommandPool *commandPool,
1000     Serial submitQueueSerial)
1001 {
1002     // Start an empty primary buffer if we have an empty submit.
1003     PrimaryCommandBuffer &commandBuffer = getCommandBuffer(hasProtectedContent);
1004     ANGLE_TRY(ensurePrimaryCommandBufferValid(context, hasProtectedContent));
1005     ANGLE_VK_TRY(context, commandBuffer.end());
1006 
1007     VkSubmitInfo submitInfo = {};
1008     InitializeSubmitInfo(&submitInfo, commandBuffer, waitSemaphores, waitSemaphoreStageMasks,
1009                          signalSemaphore);
1010 
1011     VkProtectedSubmitInfo protectedSubmitInfo = {};
1012     if (hasProtectedContent)
1013     {
1014         protectedSubmitInfo.sType           = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
1015         protectedSubmitInfo.pNext           = nullptr;
1016         protectedSubmitInfo.protectedSubmit = true;
1017         submitInfo.pNext                    = &protectedSubmitInfo;
1018     }
1019 
1020     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::submitFrame");
1021 
1022     RendererVk *renderer = context->getRenderer();
1023     VkDevice device      = renderer->getDevice();
1024 
1025     DeviceScoped<CommandBatch> scopedBatch(device);
1026     CommandBatch &batch = scopedBatch.get();
1027 
1028     ANGLE_TRY(mFenceRecycler.newSharedFence(context, &batch.fence));
1029     batch.serial              = submitQueueSerial;
1030     batch.hasProtectedContent = hasProtectedContent;
1031 
1032     ANGLE_TRY(queueSubmit(context, priority, submitInfo, &batch.fence.get(), batch.serial));
1033 
1034     if (!currentGarbage.empty())
1035     {
1036         mGarbageQueue.emplace_back(std::move(currentGarbage), batch.serial);
1037     }
1038 
1039     // Store the primary CommandBuffer and command pool used for secondary CommandBuffers
1040     // in the in-flight list.
1041     if (hasProtectedContent)
1042     {
1043         ANGLE_TRY(releaseToCommandBatch(context, hasProtectedContent, std::move(mProtectedCommands),
1044                                         commandPool, &batch));
1045     }
1046     else
1047     {
1048         ANGLE_TRY(releaseToCommandBatch(context, hasProtectedContent, std::move(mPrimaryCommands),
1049                                         commandPool, &batch));
1050     }
1051     mInFlightCommands.emplace_back(scopedBatch.release());
1052 
1053     ANGLE_TRY(checkCompletedCommands(context));
1054 
1055     // CPU should be throttled to avoid mInFlightCommands from growing too fast. Important for
1056     // off-screen scenarios.
1057     if (mInFlightCommands.size() > kInFlightCommandsLimit)
1058     {
1059         size_t numCommandsToFinish = mInFlightCommands.size() - kInFlightCommandsLimit;
1060         Serial finishSerial        = mInFlightCommands[numCommandsToFinish].serial;
1061         ANGLE_TRY(finishToSerial(context, finishSerial, renderer->getMaxFenceWaitTimeNs()));
1062     }
1063 
1064     return angle::Result::Continue;
1065 }
1066 
waitForSerialWithUserTimeout(vk::Context * context,Serial serial,uint64_t timeout,VkResult * result)1067 angle::Result CommandQueue::waitForSerialWithUserTimeout(vk::Context *context,
1068                                                          Serial serial,
1069                                                          uint64_t timeout,
1070                                                          VkResult *result)
1071 {
1072     // No in-flight work. This indicates the serial is already complete.
1073     if (mInFlightCommands.empty())
1074     {
1075         *result = VK_SUCCESS;
1076         return angle::Result::Continue;
1077     }
1078 
1079     // Serial is already complete.
1080     if (serial < mInFlightCommands[0].serial)
1081     {
1082         *result = VK_SUCCESS;
1083         return angle::Result::Continue;
1084     }
1085 
1086     size_t batchIndex = 0;
1087     while (batchIndex != mInFlightCommands.size() && mInFlightCommands[batchIndex].serial < serial)
1088     {
1089         batchIndex++;
1090     }
1091 
1092     // Serial is not yet submitted. This is undefined behaviour, so we can do anything.
1093     if (batchIndex >= mInFlightCommands.size())
1094     {
1095         WARN() << "Waiting on an unsubmitted serial.";
1096         *result = VK_TIMEOUT;
1097         return angle::Result::Continue;
1098     }
1099 
1100     ASSERT(serial == mInFlightCommands[batchIndex].serial);
1101 
1102     vk::Fence &fence = mInFlightCommands[batchIndex].fence.get();
1103     ASSERT(fence.valid());
1104     *result = fence.wait(context->getDevice(), timeout);
1105 
1106     // Don't trigger an error on timeout.
1107     if (*result != VK_TIMEOUT)
1108     {
1109         ANGLE_VK_TRY(context, *result);
1110     }
1111 
1112     return angle::Result::Continue;
1113 }
1114 
ensurePrimaryCommandBufferValid(Context * context,bool hasProtectedContent)1115 angle::Result CommandQueue::ensurePrimaryCommandBufferValid(Context *context,
1116                                                             bool hasProtectedContent)
1117 {
1118     PersistentCommandPool &commandPool  = getCommandPool(hasProtectedContent);
1119     PrimaryCommandBuffer &commandBuffer = getCommandBuffer(hasProtectedContent);
1120 
1121     if (commandBuffer.valid())
1122     {
1123         return angle::Result::Continue;
1124     }
1125 
1126     ANGLE_TRY(commandPool.allocate(context, &commandBuffer));
1127     VkCommandBufferBeginInfo beginInfo = {};
1128     beginInfo.sType                    = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1129     beginInfo.flags                    = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
1130     beginInfo.pInheritanceInfo         = nullptr;
1131     ANGLE_VK_TRY(context, commandBuffer.begin(beginInfo));
1132 
1133     return angle::Result::Continue;
1134 }
1135 
flushOutsideRPCommands(Context * context,bool hasProtectedContent,CommandBufferHelper ** outsideRPCommands)1136 angle::Result CommandQueue::flushOutsideRPCommands(Context *context,
1137                                                    bool hasProtectedContent,
1138                                                    CommandBufferHelper **outsideRPCommands)
1139 {
1140     ANGLE_TRY(ensurePrimaryCommandBufferValid(context, hasProtectedContent));
1141     PrimaryCommandBuffer &commandBuffer = getCommandBuffer(hasProtectedContent);
1142     return (*outsideRPCommands)
1143         ->flushToPrimary(context->getRenderer()->getFeatures(), &commandBuffer, nullptr);
1144 }
1145 
flushRenderPassCommands(Context * context,bool hasProtectedContent,const RenderPass & renderPass,CommandBufferHelper ** renderPassCommands)1146 angle::Result CommandQueue::flushRenderPassCommands(Context *context,
1147                                                     bool hasProtectedContent,
1148                                                     const RenderPass &renderPass,
1149                                                     CommandBufferHelper **renderPassCommands)
1150 {
1151     ANGLE_TRY(ensurePrimaryCommandBufferValid(context, hasProtectedContent));
1152     PrimaryCommandBuffer &commandBuffer = getCommandBuffer(hasProtectedContent);
1153     return (*renderPassCommands)
1154         ->flushToPrimary(context->getRenderer()->getFeatures(), &commandBuffer, &renderPass);
1155 }
1156 
queueSubmitOneOff(Context * context,bool hasProtectedContent,egl::ContextPriority contextPriority,VkCommandBuffer commandBufferHandle,const Fence * fence,SubmitPolicy submitPolicy,Serial submitQueueSerial)1157 angle::Result CommandQueue::queueSubmitOneOff(Context *context,
1158                                               bool hasProtectedContent,
1159                                               egl::ContextPriority contextPriority,
1160                                               VkCommandBuffer commandBufferHandle,
1161                                               const Fence *fence,
1162                                               SubmitPolicy submitPolicy,
1163                                               Serial submitQueueSerial)
1164 {
1165     VkSubmitInfo submitInfo = {};
1166     submitInfo.sType        = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1167 
1168     VkProtectedSubmitInfo protectedSubmitInfo = {};
1169     if (hasProtectedContent)
1170     {
1171         protectedSubmitInfo.sType           = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
1172         protectedSubmitInfo.pNext           = nullptr;
1173         protectedSubmitInfo.protectedSubmit = true;
1174         submitInfo.pNext                    = &protectedSubmitInfo;
1175     }
1176 
1177     if (commandBufferHandle != VK_NULL_HANDLE)
1178     {
1179         submitInfo.commandBufferCount = 1;
1180         submitInfo.pCommandBuffers    = &commandBufferHandle;
1181     }
1182 
1183     return queueSubmit(context, contextPriority, submitInfo, fence, submitQueueSerial);
1184 }
1185 
queueSubmit(Context * context,egl::ContextPriority contextPriority,const VkSubmitInfo & submitInfo,const Fence * fence,Serial submitQueueSerial)1186 angle::Result CommandQueue::queueSubmit(Context *context,
1187                                         egl::ContextPriority contextPriority,
1188                                         const VkSubmitInfo &submitInfo,
1189                                         const Fence *fence,
1190                                         Serial submitQueueSerial)
1191 {
1192     ANGLE_TRACE_EVENT0("gpu.angle", "CommandQueue::queueSubmit");
1193 
1194     RendererVk *renderer = context->getRenderer();
1195 
1196     if (kOutputVmaStatsString)
1197     {
1198         renderer->outputVmaStatString();
1199     }
1200 
1201     VkFence fenceHandle = fence ? fence->getHandle() : VK_NULL_HANDLE;
1202     VkQueue queue       = getQueue(contextPriority);
1203     ANGLE_VK_TRY(context, vkQueueSubmit(queue, 1, &submitInfo, fenceHandle));
1204     mLastSubmittedQueueSerial = submitQueueSerial;
1205 
1206     // Now that we've submitted work, clean up RendererVk garbage
1207     return renderer->cleanupGarbage(mLastCompletedQueueSerial);
1208 }
1209 
queuePresent(egl::ContextPriority contextPriority,const VkPresentInfoKHR & presentInfo)1210 VkResult CommandQueue::queuePresent(egl::ContextPriority contextPriority,
1211                                     const VkPresentInfoKHR &presentInfo)
1212 {
1213     VkQueue queue = getQueue(contextPriority);
1214     return vkQueuePresentKHR(queue, &presentInfo);
1215 }
1216 
getLastSubmittedQueueSerial() const1217 Serial CommandQueue::getLastSubmittedQueueSerial() const
1218 {
1219     return mLastSubmittedQueueSerial;
1220 }
1221 
getLastCompletedQueueSerial() const1222 Serial CommandQueue::getLastCompletedQueueSerial() const
1223 {
1224     return mLastCompletedQueueSerial;
1225 }
1226 
getCurrentQueueSerial() const1227 Serial CommandQueue::getCurrentQueueSerial() const
1228 {
1229     return mCurrentQueueSerial;
1230 }
1231 
1232 // QueuePriorities:
1233 constexpr float kVulkanQueuePriorityLow    = 0.0;
1234 constexpr float kVulkanQueuePriorityMedium = 0.4;
1235 constexpr float kVulkanQueuePriorityHigh   = 1.0;
1236 
1237 const float QueueFamily::kQueuePriorities[static_cast<uint32_t>(egl::ContextPriority::EnumCount)] =
1238     {kVulkanQueuePriorityMedium, kVulkanQueuePriorityHigh, kVulkanQueuePriorityLow};
1239 
getDevicePriority(egl::ContextPriority priority) const1240 egl::ContextPriority DeviceQueueMap::getDevicePriority(egl::ContextPriority priority) const
1241 {
1242     return mPriorities[priority];
1243 }
1244 
~DeviceQueueMap()1245 DeviceQueueMap::~DeviceQueueMap() {}
1246 
operator =(const DeviceQueueMap & other)1247 DeviceQueueMap &DeviceQueueMap::operator=(const DeviceQueueMap &other)
1248 {
1249     ASSERT(this != &other);
1250     if ((this != &other) && other.valid())
1251     {
1252         mIndex                                    = other.mIndex;
1253         mIsProtected                              = other.mIsProtected;
1254         mPriorities[egl::ContextPriority::Low]    = other.mPriorities[egl::ContextPriority::Low];
1255         mPriorities[egl::ContextPriority::Medium] = other.mPriorities[egl::ContextPriority::Medium];
1256         mPriorities[egl::ContextPriority::High]   = other.mPriorities[egl::ContextPriority::High];
1257         *static_cast<angle::PackedEnumMap<egl::ContextPriority, VkQueue> *>(this) = other;
1258     }
1259     return *this;
1260 }
1261 
getDeviceQueue(VkDevice device,bool makeProtected,uint32_t queueIndex,VkQueue * queue)1262 void QueueFamily::getDeviceQueue(VkDevice device,
1263                                  bool makeProtected,
1264                                  uint32_t queueIndex,
1265                                  VkQueue *queue)
1266 {
1267     if (makeProtected)
1268     {
1269         VkDeviceQueueInfo2 queueInfo2 = {};
1270         queueInfo2.sType              = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2;
1271         queueInfo2.flags              = VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT;
1272         queueInfo2.queueFamilyIndex   = mIndex;
1273         queueInfo2.queueIndex         = queueIndex;
1274 
1275         vkGetDeviceQueue2(device, &queueInfo2, queue);
1276     }
1277     else
1278     {
1279         vkGetDeviceQueue(device, mIndex, queueIndex, queue);
1280     }
1281 }
1282 
initializeQueueMap(VkDevice device,bool makeProtected,uint32_t queueIndex,uint32_t queueCount)1283 DeviceQueueMap QueueFamily::initializeQueueMap(VkDevice device,
1284                                                bool makeProtected,
1285                                                uint32_t queueIndex,
1286                                                uint32_t queueCount)
1287 {
1288     // QueueIndexing:
1289     constexpr uint32_t kQueueIndexMedium = 0;
1290     constexpr uint32_t kQueueIndexHigh   = 1;
1291     constexpr uint32_t kQueueIndexLow    = 2;
1292 
1293     ASSERT(queueCount);
1294     ASSERT((queueIndex + queueCount) <= mProperties.queueCount);
1295     DeviceQueueMap queueMap(mIndex, makeProtected);
1296 
1297     getDeviceQueue(device, makeProtected, queueIndex + kQueueIndexMedium,
1298                    &queueMap[egl::ContextPriority::Medium]);
1299     queueMap.mPriorities[egl::ContextPriority::Medium] = egl::ContextPriority::Medium;
1300 
1301     // If at least 2 queues, High has its own queue
1302     if (queueCount > 1)
1303     {
1304         getDeviceQueue(device, makeProtected, queueIndex + kQueueIndexHigh,
1305                        &queueMap[egl::ContextPriority::High]);
1306         queueMap.mPriorities[egl::ContextPriority::High] = egl::ContextPriority::High;
1307     }
1308     else
1309     {
1310         queueMap[egl::ContextPriority::High]             = queueMap[egl::ContextPriority::Medium];
1311         queueMap.mPriorities[egl::ContextPriority::High] = egl::ContextPriority::Medium;
1312     }
1313     // If at least 3 queues, Low has its own queue. Adjust Low priority.
1314     if (queueCount > 2)
1315     {
1316         getDeviceQueue(device, makeProtected, queueIndex + kQueueIndexLow,
1317                        &queueMap[egl::ContextPriority::Low]);
1318         queueMap.mPriorities[egl::ContextPriority::Low] = egl::ContextPriority::Low;
1319     }
1320     else
1321     {
1322         queueMap[egl::ContextPriority::Low]             = queueMap[egl::ContextPriority::Medium];
1323         queueMap.mPriorities[egl::ContextPriority::Low] = egl::ContextPriority::Medium;
1324     }
1325     return queueMap;
1326 }
1327 
initialize(const VkQueueFamilyProperties & queueFamilyProperties,uint32_t index)1328 void QueueFamily::initialize(const VkQueueFamilyProperties &queueFamilyProperties, uint32_t index)
1329 {
1330     mProperties = queueFamilyProperties;
1331     mIndex      = index;
1332 }
1333 
FindIndex(const std::vector<VkQueueFamilyProperties> & queueFamilyProperties,VkQueueFlags flags,int32_t matchNumber,uint32_t * matchCount)1334 uint32_t QueueFamily::FindIndex(const std::vector<VkQueueFamilyProperties> &queueFamilyProperties,
1335                                 VkQueueFlags flags,
1336                                 int32_t matchNumber,
1337                                 uint32_t *matchCount)
1338 {
1339     uint32_t index = QueueFamily::kInvalidIndex;
1340     uint32_t count = 0;
1341 
1342     for (uint32_t familyIndex = 0; familyIndex < queueFamilyProperties.size(); ++familyIndex)
1343     {
1344         const auto &queueInfo = queueFamilyProperties[familyIndex];
1345         if ((queueInfo.queueFlags & flags) == flags)
1346         {
1347             ASSERT(queueInfo.queueCount > 0);
1348             count++;
1349             if ((index == QueueFamily::kInvalidIndex) && (matchNumber-- == 0))
1350             {
1351                 index = familyIndex;
1352             }
1353         }
1354     }
1355     if (matchCount)
1356     {
1357         *matchCount = count;
1358     }
1359 
1360     return index;
1361 }
1362 
1363 }  // namespace vk
1364 }  // namespace rx
1365