1 // 2 // Copyright 2020 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // CommandProcessor.h: 7 // A class to process and submit Vulkan command buffers that can be 8 // used in an asynchronous worker thread. 9 // 10 11 #ifndef LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_ 12 #define LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_ 13 14 #include <condition_variable> 15 #include <mutex> 16 #include <queue> 17 #include <thread> 18 19 #include "common/FixedQueue.h" 20 #include "common/SimpleMutex.h" 21 #include "common/vulkan/vk_headers.h" 22 #include "libANGLE/renderer/vulkan/PersistentCommandPool.h" 23 #include "libANGLE/renderer/vulkan/vk_helpers.h" 24 25 namespace rx 26 { 27 class CommandProcessor; 28 29 namespace vk 30 { 31 class ExternalFence; 32 using SharedExternalFence = std::shared_ptr<ExternalFence>; 33 34 constexpr size_t kMaxCommandProcessorTasksLimit = 16u; 35 constexpr size_t kInFlightCommandsLimit = 50u; 36 constexpr size_t kMaxFinishedCommandsLimit = 64u; 37 38 enum class SubmitPolicy 39 { 40 AllowDeferred, 41 EnsureSubmitted, 42 }; 43 44 struct Error 45 { 46 VkResult errorCode; 47 const char *file; 48 const char *function; 49 uint32_t line; 50 }; 51 52 class FenceRecycler; 53 // This is a RAII class manages refcounted vkfence object with auto-release and recycling. 54 class SharedFence final 55 { 56 public: 57 SharedFence(); 58 SharedFence(const SharedFence &other); 59 SharedFence(SharedFence &&other); 60 ~SharedFence(); 61 // Copy assignment will add reference count to the underlying object 62 SharedFence &operator=(const SharedFence &other); 63 // Move assignment will move reference count from other to this object 64 SharedFence &operator=(SharedFence &&other); 65 66 // Initialize it with a new vkFence either from recycler or create a new one. 67 VkResult init(VkDevice device, FenceRecycler *recycler); 68 // Destroy it immediately (will not recycle). 69 void destroy(VkDevice device); 70 // Release the vkFence (to recycler) 71 void release(); 72 // Return true if the underlying VkFence is valid 73 operator bool() const; get()74 const Fence &get() const 75 { 76 ASSERT(mRefCountedFence != nullptr && mRefCountedFence->isReferenced()); 77 return mRefCountedFence->get(); 78 } 79 80 // The following three APIs can call without lock. Since fence is refcounted and this object has 81 // a refcount to VkFence, No one is able to come in and destroy the VkFence. 82 VkResult getStatus(VkDevice device) const; 83 VkResult wait(VkDevice device, uint64_t timeout) const; 84 85 private: 86 RefCounted<Fence> *mRefCountedFence; 87 FenceRecycler *mRecycler; 88 }; 89 90 class FenceRecycler 91 { 92 public: FenceRecycler()93 FenceRecycler() {} ~FenceRecycler()94 ~FenceRecycler() {} 95 void destroy(Context *context); 96 97 void fetch(VkDevice device, Fence *fenceOut); 98 void recycle(Fence &&fence); 99 100 private: 101 angle::SimpleMutex mMutex; 102 Recycler<Fence> mRecyler; 103 }; 104 105 struct SwapchainStatus 106 { 107 std::atomic<bool> isPending; 108 VkResult lastPresentResult = VK_NOT_READY; 109 }; 110 111 enum class CustomTask 112 { 113 Invalid = 0, 114 // Flushes wait semaphores 115 FlushWaitSemaphores, 116 // Process SecondaryCommandBuffer commands into the primary CommandBuffer. 117 ProcessOutsideRenderPassCommands, 118 ProcessRenderPassCommands, 119 // End the current command buffer and submit commands to the queue 120 FlushAndQueueSubmit, 121 // Submit custom command buffer, excludes some state management 122 OneOffQueueSubmit, 123 // Execute QueuePresent 124 Present, 125 }; 126 127 // CommandProcessorTask interface 128 class CommandProcessorTask 129 { 130 public: CommandProcessorTask()131 CommandProcessorTask() { initTask(); } ~CommandProcessorTask()132 ~CommandProcessorTask() 133 { 134 // Render passes are cached in RenderPassCache. The handle stored in the task references a 135 // render pass that is managed by that cache. 136 mRenderPass.release(); 137 } 138 139 void initTask(); 140 141 void initFlushWaitSemaphores(ProtectionType protectionType, 142 egl::ContextPriority priority, 143 std::vector<VkSemaphore> &&waitSemaphores, 144 std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks); 145 146 void initOutsideRenderPassProcessCommands(ProtectionType protectionType, 147 egl::ContextPriority priority, 148 OutsideRenderPassCommandBufferHelper *commandBuffer); 149 150 void initRenderPassProcessCommands(ProtectionType protectionType, 151 egl::ContextPriority priority, 152 RenderPassCommandBufferHelper *commandBuffer, 153 const RenderPass *renderPass, 154 VkFramebuffer framebufferOverride); 155 156 void initPresent(egl::ContextPriority priority, 157 const VkPresentInfoKHR &presentInfo, 158 SwapchainStatus *swapchainStatus); 159 160 void initFlushAndQueueSubmit(VkSemaphore semaphore, 161 SharedExternalFence &&externalFence, 162 ProtectionType protectionType, 163 egl::ContextPriority priority, 164 const QueueSerial &submitQueueSerial); 165 166 void initOneOffQueueSubmit(VkCommandBuffer commandBufferHandle, 167 ProtectionType protectionType, 168 egl::ContextPriority priority, 169 VkSemaphore waitSemaphore, 170 VkPipelineStageFlags waitSemaphoreStageMask, 171 const QueueSerial &submitQueueSerial); 172 173 CommandProcessorTask &operator=(CommandProcessorTask &&rhs); 174 CommandProcessorTask(CommandProcessorTask && other)175 CommandProcessorTask(CommandProcessorTask &&other) : CommandProcessorTask() 176 { 177 *this = std::move(other); 178 } 179 getSubmitQueueSerial()180 const QueueSerial &getSubmitQueueSerial() const { return mSubmitQueueSerial; } getTaskCommand()181 CustomTask getTaskCommand() { return mTask; } getWaitSemaphores()182 std::vector<VkSemaphore> &getWaitSemaphores() { return mWaitSemaphores; } getWaitSemaphoreStageMasks()183 std::vector<VkPipelineStageFlags> &getWaitSemaphoreStageMasks() 184 { 185 return mWaitSemaphoreStageMasks; 186 } getSemaphore()187 VkSemaphore getSemaphore() const { return mSemaphore; } getExternalFence()188 SharedExternalFence &getExternalFence() { return mExternalFence; } getPriority()189 egl::ContextPriority getPriority() const { return mPriority; } getProtectionType()190 ProtectionType getProtectionType() const { return mProtectionType; } getOneOffCommandBuffer()191 VkCommandBuffer getOneOffCommandBuffer() const { return mOneOffCommandBuffer; } getOneOffWaitSemaphore()192 VkSemaphore getOneOffWaitSemaphore() const { return mOneOffWaitSemaphore; } getOneOffWaitSemaphoreStageMask()193 VkPipelineStageFlags getOneOffWaitSemaphoreStageMask() const 194 { 195 return mOneOffWaitSemaphoreStageMask; 196 } getPresentInfo()197 const VkPresentInfoKHR &getPresentInfo() const { return mPresentInfo; } getSwapchainStatus()198 SwapchainStatus *getSwapchainStatus() const { return mSwapchainStatus; } getRenderPass()199 const RenderPass &getRenderPass() const { return mRenderPass; } getFramebufferOverride()200 VkFramebuffer getFramebufferOverride() const { return mFramebufferOverride; } getOutsideRenderPassCommandBuffer()201 OutsideRenderPassCommandBufferHelper *getOutsideRenderPassCommandBuffer() const 202 { 203 return mOutsideRenderPassCommandBuffer; 204 } getRenderPassCommandBuffer()205 RenderPassCommandBufferHelper *getRenderPassCommandBuffer() const 206 { 207 return mRenderPassCommandBuffer; 208 } 209 210 private: 211 void copyPresentInfo(const VkPresentInfoKHR &other); 212 213 CustomTask mTask; 214 215 // Wait semaphores 216 std::vector<VkSemaphore> mWaitSemaphores; 217 std::vector<VkPipelineStageFlags> mWaitSemaphoreStageMasks; 218 219 // ProcessCommands 220 OutsideRenderPassCommandBufferHelper *mOutsideRenderPassCommandBuffer; 221 RenderPassCommandBufferHelper *mRenderPassCommandBuffer; 222 RenderPass mRenderPass; 223 VkFramebuffer mFramebufferOverride; 224 225 // Flush data 226 VkSemaphore mSemaphore; 227 SharedExternalFence mExternalFence; 228 229 // Flush command data 230 QueueSerial mSubmitQueueSerial; 231 232 // Present command data 233 VkPresentInfoKHR mPresentInfo; 234 VkSwapchainKHR mSwapchain; 235 VkSemaphore mWaitSemaphore; 236 uint32_t mImageIndex; 237 // Used by Present if supportsIncrementalPresent is enabled 238 VkPresentRegionKHR mPresentRegion; 239 VkPresentRegionsKHR mPresentRegions; 240 std::vector<VkRectLayerKHR> mRects; 241 242 VkSwapchainPresentFenceInfoEXT mPresentFenceInfo; 243 VkFence mPresentFence; 244 245 VkSwapchainPresentModeInfoEXT mPresentModeInfo; 246 VkPresentModeKHR mPresentMode; 247 248 SwapchainStatus *mSwapchainStatus; 249 250 // Used by OneOffQueueSubmit 251 VkCommandBuffer mOneOffCommandBuffer; 252 VkSemaphore mOneOffWaitSemaphore; 253 VkPipelineStageFlags mOneOffWaitSemaphoreStageMask; 254 255 // Flush, Present & QueueWaitIdle data 256 egl::ContextPriority mPriority; 257 ProtectionType mProtectionType; 258 }; 259 using CommandProcessorTaskQueue = angle::FixedQueue<CommandProcessorTask>; 260 261 struct CommandBatch final : angle::NonCopyable 262 { 263 CommandBatch(); 264 ~CommandBatch(); 265 CommandBatch(CommandBatch &&other); 266 CommandBatch &operator=(CommandBatch &&other); 267 268 void destroy(VkDevice device); 269 270 bool hasFence() const; 271 void releaseFence(); 272 void destroyFence(VkDevice device); 273 VkFence getFenceHandle() const; 274 VkResult getFenceStatus(VkDevice device) const; 275 VkResult waitFence(VkDevice device, uint64_t timeout) const; 276 VkResult waitFenceUnlocked(VkDevice device, 277 uint64_t timeout, 278 std::unique_lock<angle::SimpleMutex> *lock) const; 279 280 PrimaryCommandBuffer primaryCommands; 281 SecondaryCommandBufferCollector secondaryCommands; 282 SharedFence fence; 283 SharedExternalFence externalFence; 284 QueueSerial queueSerial; 285 ProtectionType protectionType; 286 }; 287 using CommandBatchQueue = angle::FixedQueue<CommandBatch>; 288 289 class DeviceQueueMap; 290 291 class QueueFamily final : angle::NonCopyable 292 { 293 public: 294 static const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max(); 295 296 static uint32_t FindIndex(const std::vector<VkQueueFamilyProperties> &queueFamilyProperties, 297 VkQueueFlags flags, 298 int32_t matchNumber, // 0 = first match, 1 = second match ... 299 uint32_t *matchCount); 300 static const uint32_t kQueueCount = static_cast<uint32_t>(egl::ContextPriority::EnumCount); 301 static const float kQueuePriorities[static_cast<uint32_t>(egl::ContextPriority::EnumCount)]; 302 QueueFamily()303 QueueFamily() : mProperties{}, mQueueFamilyIndex(kInvalidIndex) {} ~QueueFamily()304 ~QueueFamily() {} 305 306 void initialize(const VkQueueFamilyProperties &queueFamilyProperties, 307 uint32_t queueFamilyIndex); valid()308 bool valid() const { return (mQueueFamilyIndex != kInvalidIndex); } getQueueFamilyIndex()309 uint32_t getQueueFamilyIndex() const { return mQueueFamilyIndex; } getProperties()310 const VkQueueFamilyProperties *getProperties() const { return &mProperties; } isGraphics()311 bool isGraphics() const { return ((mProperties.queueFlags & VK_QUEUE_GRAPHICS_BIT) > 0); } isCompute()312 bool isCompute() const { return ((mProperties.queueFlags & VK_QUEUE_COMPUTE_BIT) > 0); } supportsProtected()313 bool supportsProtected() const 314 { 315 return ((mProperties.queueFlags & VK_QUEUE_PROTECTED_BIT) > 0); 316 } getDeviceQueueCount()317 uint32_t getDeviceQueueCount() const { return mProperties.queueCount; } 318 319 private: 320 VkQueueFamilyProperties mProperties; 321 uint32_t mQueueFamilyIndex; 322 }; 323 324 class DeviceQueueMap final 325 { 326 public: DeviceQueueMap()327 DeviceQueueMap() : mQueueFamilyIndex(QueueFamily::kInvalidIndex), mIsProtected(false) {} 328 ~DeviceQueueMap(); 329 330 void initialize(VkDevice device, 331 const QueueFamily &queueFamily, 332 bool makeProtected, 333 uint32_t queueIndex, 334 uint32_t queueCount); 335 void destroy(); 336 valid()337 bool valid() const { return (mQueueFamilyIndex != QueueFamily::kInvalidIndex); } getQueueFamilyIndex()338 uint32_t getQueueFamilyIndex() const { return mQueueFamilyIndex; } isProtected()339 bool isProtected() const { return mIsProtected; } getDevicePriority(egl::ContextPriority priority)340 egl::ContextPriority getDevicePriority(egl::ContextPriority priority) const 341 { 342 return mQueueAndIndices[priority].devicePriority; 343 } getDeviceQueueIndex(egl::ContextPriority priority)344 DeviceQueueIndex getDeviceQueueIndex(egl::ContextPriority priority) const 345 { 346 return DeviceQueueIndex(mQueueFamilyIndex, mQueueAndIndices[priority].index); 347 } getQueue(egl::ContextPriority priority)348 const VkQueue &getQueue(egl::ContextPriority priority) const 349 { 350 return mQueueAndIndices[priority].queue; 351 } 352 353 private: 354 uint32_t mQueueFamilyIndex; 355 bool mIsProtected; 356 struct QueueAndIndex 357 { 358 // The actual priority that used 359 egl::ContextPriority devicePriority; 360 VkQueue queue; 361 // The queueIndex used for VkGetDeviceQueue 362 uint32_t index; 363 }; 364 angle::PackedEnumMap<egl::ContextPriority, QueueAndIndex> mQueueAndIndices; 365 }; 366 367 // Note all public APIs of CommandQueue class must be thread safe. 368 class CommandQueue : angle::NonCopyable 369 { 370 public: 371 CommandQueue(); 372 ~CommandQueue(); 373 374 angle::Result init(Context *context, 375 const QueueFamily &queueFamily, 376 bool enableProtectedContent, 377 uint32_t queueCount); 378 379 void destroy(Context *context); 380 381 void handleDeviceLost(Renderer *renderer); 382 383 // These public APIs are inherently thread safe. Thread unsafe methods must be protected methods 384 // that are only accessed via ThreadSafeCommandQueue API. getDriverPriority(egl::ContextPriority priority)385 egl::ContextPriority getDriverPriority(egl::ContextPriority priority) const 386 { 387 return mQueueMap.getDevicePriority(priority); 388 } 389 getDeviceQueueIndex(egl::ContextPriority priority)390 DeviceQueueIndex getDeviceQueueIndex(egl::ContextPriority priority) const 391 { 392 return mQueueMap.getDeviceQueueIndex(priority); 393 } 394 getQueue(egl::ContextPriority priority)395 VkQueue getQueue(egl::ContextPriority priority) const { return mQueueMap.getQueue(priority); } 396 getLastSubmittedSerial(SerialIndex index)397 Serial getLastSubmittedSerial(SerialIndex index) const { return mLastSubmittedSerials[index]; } 398 399 // The ResourceUse still have unfinished queue serial by ANGLE or vulkan. hasResourceUseFinished(const ResourceUse & use)400 bool hasResourceUseFinished(const ResourceUse &use) const 401 { 402 return use <= mLastCompletedSerials; 403 } hasQueueSerialFinished(const QueueSerial & queueSerial)404 bool hasQueueSerialFinished(const QueueSerial &queueSerial) const 405 { 406 return queueSerial <= mLastCompletedSerials; 407 } 408 // The ResourceUse still have queue serial not yet submitted to vulkan. hasResourceUseSubmitted(const ResourceUse & use)409 bool hasResourceUseSubmitted(const ResourceUse &use) const 410 { 411 return use <= mLastSubmittedSerials; 412 } hasQueueSerialSubmitted(const QueueSerial & queueSerial)413 bool hasQueueSerialSubmitted(const QueueSerial &queueSerial) const 414 { 415 return queueSerial <= mLastSubmittedSerials; 416 } 417 418 // Wait until the desired serial has been completed. 419 angle::Result finishResourceUse(Context *context, const ResourceUse &use, uint64_t timeout); 420 angle::Result finishQueueSerial(Context *context, 421 const QueueSerial &queueSerial, 422 uint64_t timeout); 423 angle::Result waitIdle(Context *context, uint64_t timeout); 424 angle::Result waitForResourceUseToFinishWithUserTimeout(Context *context, 425 const ResourceUse &use, 426 uint64_t timeout, 427 VkResult *result); 428 bool isBusy(Renderer *renderer) const; 429 430 angle::Result submitCommands(Context *context, 431 ProtectionType protectionType, 432 egl::ContextPriority priority, 433 VkSemaphore signalSemaphore, 434 SharedExternalFence &&externalFence, 435 const QueueSerial &submitQueueSerial); 436 437 angle::Result queueSubmitOneOff(Context *context, 438 ProtectionType protectionType, 439 egl::ContextPriority contextPriority, 440 VkCommandBuffer commandBufferHandle, 441 VkSemaphore waitSemaphore, 442 VkPipelineStageFlags waitSemaphoreStageMask, 443 SubmitPolicy submitPolicy, 444 const QueueSerial &submitQueueSerial); 445 446 // Errors from present is not considered to be fatal. 447 void queuePresent(egl::ContextPriority contextPriority, 448 const VkPresentInfoKHR &presentInfo, 449 SwapchainStatus *swapchainStatus); 450 checkCompletedCommands(Context * context)451 angle::Result checkCompletedCommands(Context *context) 452 { 453 std::lock_guard<angle::SimpleMutex> lock(mMutex); 454 return checkCompletedCommandsLocked(context); 455 } 456 hasFinishedCommands()457 bool hasFinishedCommands() const { return !mFinishedCommandBatches.empty(); } 458 checkAndCleanupCompletedCommands(Context * context)459 angle::Result checkAndCleanupCompletedCommands(Context *context) 460 { 461 ANGLE_TRY(checkCompletedCommands(context)); 462 463 if (!mFinishedCommandBatches.empty()) 464 { 465 ANGLE_TRY(retireFinishedCommandsAndCleanupGarbage(context)); 466 } 467 468 return angle::Result::Continue; 469 } 470 471 void flushWaitSemaphores(ProtectionType protectionType, 472 egl::ContextPriority priority, 473 std::vector<VkSemaphore> &&waitSemaphores, 474 std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks); 475 angle::Result flushOutsideRPCommands(Context *context, 476 ProtectionType protectionType, 477 egl::ContextPriority priority, 478 OutsideRenderPassCommandBufferHelper **outsideRPCommands); 479 angle::Result flushRenderPassCommands(Context *context, 480 ProtectionType protectionType, 481 egl::ContextPriority priority, 482 const RenderPass &renderPass, 483 VkFramebuffer framebufferOverride, 484 RenderPassCommandBufferHelper **renderPassCommands); 485 486 const angle::VulkanPerfCounters getPerfCounters() const; 487 void resetPerFramePerfCounters(); 488 489 // Retire finished commands and clean up garbage immediately, or request async clean up if 490 // enabled. 491 angle::Result retireFinishedCommandsAndCleanupGarbage(Context *context); retireFinishedCommands(Context * context)492 angle::Result retireFinishedCommands(Context *context) 493 { 494 std::lock_guard<angle::SimpleMutex> lock(mMutex); 495 return retireFinishedCommandsLocked(context); 496 } 497 angle::Result postSubmitCheck(Context *context); 498 499 // Similar to finishOneCommandBatchAndCleanupImpl(), but returns if no command exists in the 500 // queue. 501 angle::Result finishOneCommandBatchAndCleanup(Context *context, 502 uint64_t timeout, 503 bool *anyFinished); 504 505 // All these private APIs are called with mutex locked, so we must not take lock again. 506 private: 507 // Check the first command buffer in mInFlightCommands and update mLastCompletedSerials if 508 // finished 509 angle::Result checkOneCommandBatch(Context *context, bool *finished); 510 // Similar to checkOneCommandBatch, except we will wait for it to finish 511 angle::Result finishOneCommandBatchAndCleanupImpl(Context *context, uint64_t timeout); 512 // Walk mFinishedCommands, reset and recycle all command buffers. 513 angle::Result retireFinishedCommandsLocked(Context *context); 514 // Walk mInFlightCommands, check and update mLastCompletedSerials for all commands that are 515 // finished 516 angle::Result checkCompletedCommandsLocked(Context *context); 517 518 angle::Result queueSubmit(Context *context, 519 std::unique_lock<angle::SimpleMutex> &&dequeueLock, 520 egl::ContextPriority contextPriority, 521 const VkSubmitInfo &submitInfo, 522 DeviceScoped<CommandBatch> &commandBatch, 523 const QueueSerial &submitQueueSerial); 524 525 angle::Result ensurePrimaryCommandBufferValid(Context *context, 526 ProtectionType protectionType, 527 egl::ContextPriority priority); 528 529 using CommandsStateMap = 530 angle::PackedEnumMap<egl::ContextPriority, 531 angle::PackedEnumMap<ProtectionType, CommandsState>>; 532 using PrimaryCommandPoolMap = angle::PackedEnumMap<ProtectionType, PersistentCommandPool>; 533 initCommandPool(Context * context,ProtectionType protectionType)534 angle::Result initCommandPool(Context *context, ProtectionType protectionType) 535 { 536 PersistentCommandPool &commandPool = mPrimaryCommandPoolMap[protectionType]; 537 return commandPool.init(context, protectionType, mQueueMap.getQueueFamilyIndex()); 538 } 539 540 // Protect multi-thread access to mInFlightCommands.pop and ensure ordering of submission. 541 mutable angle::SimpleMutex mMutex; 542 // Protect multi-thread access to mInFlightCommands.push as well as does lock relay for mMutex 543 // so that we can release mMutex while doing potential lengthy vkQueueSubmit and vkQueuePresent 544 // call. 545 angle::SimpleMutex mQueueSubmitMutex; 546 CommandBatchQueue mInFlightCommands; 547 // Temporary storage for finished command batches that should be reset. 548 CommandBatchQueue mFinishedCommandBatches; 549 550 CommandsStateMap mCommandsStateMap; 551 // Keeps a free list of reusable primary command buffers. 552 PrimaryCommandPoolMap mPrimaryCommandPoolMap; 553 554 // Queue serial management. 555 AtomicQueueSerialFixedArray mLastSubmittedSerials; 556 // This queue serial can be read/write from different threads, so we need to use atomic 557 // operations to access the underlying value. Since we only do load/store on this value, it 558 // should be just a normal uint64_t load/store on most platforms. 559 AtomicQueueSerialFixedArray mLastCompletedSerials; 560 561 // QueueMap 562 DeviceQueueMap mQueueMap; 563 564 FenceRecycler mFenceRecycler; 565 566 angle::VulkanPerfCounters mPerfCounters; 567 }; 568 569 // CommandProcessor is used to dispatch work to the GPU when the asyncCommandQueue feature is 570 // enabled. Issuing the |destroy| command will cause the worker thread to clean up it's resources 571 // and shut down. This command is sent when the renderer instance shuts down. Tasks are defined by 572 // the CommandQueue interface. 573 574 class CommandProcessor : public Context 575 { 576 public: 577 CommandProcessor(Renderer *renderer, CommandQueue *commandQueue); 578 ~CommandProcessor() override; 579 580 // Context 581 void handleError(VkResult result, 582 const char *file, 583 const char *function, 584 unsigned int line) override; 585 586 angle::Result init(); 587 588 void destroy(Context *context); 589 590 void handleDeviceLost(Renderer *renderer); 591 592 angle::Result enqueueSubmitCommands(Context *context, 593 ProtectionType protectionType, 594 egl::ContextPriority priority, 595 VkSemaphore signalSemaphore, 596 SharedExternalFence &&externalFence, 597 const QueueSerial &submitQueueSerial); 598 599 void requestCommandsAndGarbageCleanup(); 600 601 angle::Result enqueueSubmitOneOffCommands(Context *context, 602 ProtectionType protectionType, 603 egl::ContextPriority contextPriority, 604 VkCommandBuffer commandBufferHandle, 605 VkSemaphore waitSemaphore, 606 VkPipelineStageFlags waitSemaphoreStageMask, 607 SubmitPolicy submitPolicy, 608 const QueueSerial &submitQueueSerial); 609 void enqueuePresent(egl::ContextPriority contextPriority, 610 const VkPresentInfoKHR &presentInfo, 611 SwapchainStatus *swapchainStatus); 612 613 angle::Result enqueueFlushWaitSemaphores( 614 ProtectionType protectionType, 615 egl::ContextPriority priority, 616 std::vector<VkSemaphore> &&waitSemaphores, 617 std::vector<VkPipelineStageFlags> &&waitSemaphoreStageMasks); 618 angle::Result enqueueFlushOutsideRPCommands( 619 Context *context, 620 ProtectionType protectionType, 621 egl::ContextPriority priority, 622 OutsideRenderPassCommandBufferHelper **outsideRPCommands); 623 angle::Result enqueueFlushRenderPassCommands( 624 Context *context, 625 ProtectionType protectionType, 626 egl::ContextPriority priority, 627 const RenderPass &renderPass, 628 VkFramebuffer framebufferOverride, 629 RenderPassCommandBufferHelper **renderPassCommands); 630 631 // Wait until the desired serial has been submitted. waitForQueueSerialToBeSubmitted(vk::Context * context,const QueueSerial & queueSerial)632 angle::Result waitForQueueSerialToBeSubmitted(vk::Context *context, 633 const QueueSerial &queueSerial) 634 { 635 const ResourceUse use(queueSerial); 636 return waitForResourceUseToBeSubmitted(context, use); 637 } 638 angle::Result waitForResourceUseToBeSubmitted(vk::Context *context, const ResourceUse &use); 639 // Wait for worker thread to submit all outstanding work. 640 angle::Result waitForAllWorkToBeSubmitted(Context *context); 641 // Wait for enqueued present to be submitted. 642 angle::Result waitForPresentToBeSubmitted(SwapchainStatus *swapchainStatus); 643 isBusy(Renderer * renderer)644 bool isBusy(Renderer *renderer) const 645 { 646 std::lock_guard<std::mutex> enqueueLock(mTaskEnqueueMutex); 647 return !mTaskQueue.empty() || mCommandQueue->isBusy(renderer); 648 } 649 hasResourceUseEnqueued(const ResourceUse & use)650 bool hasResourceUseEnqueued(const ResourceUse &use) const 651 { 652 return use <= mLastEnqueuedSerials; 653 } hasQueueSerialEnqueued(const QueueSerial & queueSerial)654 bool hasQueueSerialEnqueued(const QueueSerial &queueSerial) const 655 { 656 return queueSerial <= mLastEnqueuedSerials; 657 } getLastEnqueuedSerial(SerialIndex index)658 Serial getLastEnqueuedSerial(SerialIndex index) const { return mLastEnqueuedSerials[index]; } 659 getThreadId()660 std::thread::id getThreadId() const { return mTaskThread.get_id(); } 661 662 private: hasPendingError()663 bool hasPendingError() const 664 { 665 std::lock_guard<angle::SimpleMutex> queueLock(mErrorMutex); 666 return !mErrors.empty(); 667 } 668 angle::Result checkAndPopPendingError(Context *errorHandlingContext); 669 670 // Entry point for command processor thread, calls processTasksImpl to do the 671 // work. called by Renderer::initializeDevice on main thread 672 void processTasks(); 673 674 // Called asynchronously from main thread to queue work that is then processed by the worker 675 // thread 676 angle::Result queueCommand(CommandProcessorTask &&task); 677 678 // Command processor thread, called by processTasks. The loop waits for work to 679 // be submitted from a separate thread. 680 angle::Result processTasksImpl(bool *exitThread); 681 682 // Command processor thread, process a task 683 angle::Result processTask(CommandProcessorTask *task); 684 685 VkResult present(egl::ContextPriority priority, 686 const VkPresentInfoKHR &presentInfo, 687 SwapchainStatus *swapchainStatus); 688 689 // The mutex lock that serializes dequeue from mTask and submit to mCommandQueue so that only 690 // one mTaskQueue consumer at a time 691 angle::SimpleMutex mTaskDequeueMutex; 692 693 CommandProcessorTaskQueue mTaskQueue; 694 mutable std::mutex mTaskEnqueueMutex; 695 // Signal worker thread when work is available 696 std::condition_variable mWorkAvailableCondition; 697 CommandQueue *const mCommandQueue; 698 699 // Tracks last serial that was enqueued to mTaskQueue . Note: this maybe different (always equal 700 // or smaller) from mLastSubmittedQueueSerial in CommandQueue since submission from 701 // CommandProcessor to CommandQueue occur in a separate thread. 702 AtomicQueueSerialFixedArray mLastEnqueuedSerials; 703 704 mutable angle::SimpleMutex mErrorMutex; 705 std::queue<Error> mErrors; 706 707 // Command queue worker thread. 708 std::thread mTaskThread; 709 bool mTaskThreadShouldExit; 710 std::atomic<bool> mNeedCommandsAndGarbageCleanup; 711 }; 712 } // namespace vk 713 714 } // namespace rx 715 716 #endif // LIBANGLE_RENDERER_VULKAN_COMMAND_PROCESSOR_H_ 717