• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_utils:
7 //    Helper functions for the Vulkan Renderer.
8 //
9 
10 #ifndef LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
11 #define LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
12 
13 #include <atomic>
14 #include <limits>
15 #include <queue>
16 
17 #include "GLSLANG/ShaderLang.h"
18 #include "common/FixedVector.h"
19 #include "common/Optional.h"
20 #include "common/PackedEnums.h"
21 #include "common/SimpleMutex.h"
22 #include "common/WorkerThread.h"
23 #include "common/backtrace_utils.h"
24 #include "common/debug.h"
25 #include "libANGLE/Error.h"
26 #include "libANGLE/Observer.h"
27 #include "libANGLE/angletypes.h"
28 #include "libANGLE/renderer/serial_utils.h"
29 #include "libANGLE/renderer/vulkan/SecondaryCommandBuffer.h"
30 #include "libANGLE/renderer/vulkan/SecondaryCommandPool.h"
31 #include "libANGLE/renderer/vulkan/VulkanSecondaryCommandBuffer.h"
32 #include "libANGLE/renderer/vulkan/vk_wrapper.h"
33 #include "platform/autogen/FeaturesVk_autogen.h"
34 #include "vulkan/vulkan_fuchsia_ext.h"
35 
36 #define ANGLE_GL_OBJECTS_X(PROC) \
37     PROC(Buffer)                 \
38     PROC(Context)                \
39     PROC(Framebuffer)            \
40     PROC(MemoryObject)           \
41     PROC(Overlay)                \
42     PROC(Program)                \
43     PROC(ProgramExecutable)      \
44     PROC(ProgramPipeline)        \
45     PROC(Query)                  \
46     PROC(Renderbuffer)           \
47     PROC(Sampler)                \
48     PROC(Semaphore)              \
49     PROC(Texture)                \
50     PROC(TransformFeedback)      \
51     PROC(VertexArray)
52 
53 #define ANGLE_PRE_DECLARE_OBJECT(OBJ) class OBJ;
54 
55 namespace egl
56 {
57 class Display;
58 class Image;
59 class ShareGroup;
60 }  // namespace egl
61 
62 namespace gl
63 {
64 class MockOverlay;
65 class ProgramExecutable;
66 struct RasterizerState;
67 struct SwizzleState;
68 struct VertexAttribute;
69 class VertexBinding;
70 
71 ANGLE_GL_OBJECTS_X(ANGLE_PRE_DECLARE_OBJECT)
72 }  // namespace gl
73 
74 #define ANGLE_PRE_DECLARE_VK_OBJECT(OBJ) class OBJ##Vk;
75 
76 namespace rx
77 {
78 class DisplayVk;
79 class ImageVk;
80 class ProgramExecutableVk;
81 class RenderbufferVk;
82 class RenderTargetVk;
83 class RenderPassCache;
84 class ShareGroupVk;
85 }  // namespace rx
86 
87 namespace angle
88 {
89 egl::Error ToEGL(Result result, EGLint errorCode);
90 }  // namespace angle
91 
92 namespace rx
93 {
94 ANGLE_GL_OBJECTS_X(ANGLE_PRE_DECLARE_VK_OBJECT)
95 
96 const char *VulkanResultString(VkResult result);
97 
98 constexpr size_t kMaxVulkanLayers = 20;
99 using VulkanLayerVector           = angle::FixedVector<const char *, kMaxVulkanLayers>;
100 
101 // Verify that validation layers are available.
102 bool GetAvailableValidationLayers(const std::vector<VkLayerProperties> &layerProps,
103                                   bool mustHaveLayers,
104                                   VulkanLayerVector *enabledLayerNames);
105 
106 enum class TextureDimension
107 {
108     TEX_2D,
109     TEX_CUBE,
110     TEX_3D,
111     TEX_2D_ARRAY,
112 };
113 
114 enum class BufferUsageType
115 {
116     Static      = 0,
117     Dynamic     = 1,
118     InvalidEnum = 2,
119     EnumCount   = InvalidEnum,
120 };
121 
122 // A maximum offset of 4096 covers almost every Vulkan driver on desktop (80%) and mobile (99%). The
123 // next highest values to meet native drivers are 16 bits or 32 bits.
124 constexpr uint32_t kAttributeOffsetMaxBits = 15;
125 constexpr uint32_t kInvalidMemoryTypeIndex = UINT32_MAX;
126 constexpr uint32_t kInvalidMemoryHeapIndex = UINT32_MAX;
127 
128 namespace vk
129 {
130 class Renderer;
131 
132 // Used for memory allocation tracking.
133 enum class MemoryAllocationType;
134 
135 enum class MemoryHostVisibility
136 {
137     NonVisible,
138     Visible
139 };
140 
141 // Encapsulate the graphics family index and VkQueue index (as seen in vkGetDeviceQueue API
142 // arguments) into one integer so that we can easily pass around without introduce extra overhead..
143 class DeviceQueueIndex final
144 {
145   public:
DeviceQueueIndex()146     constexpr DeviceQueueIndex()
147         : mFamilyIndex(kInvalidQueueFamilyIndex), mQueueIndex(kInvalidQueueIndex)
148     {}
DeviceQueueIndex(uint32_t familyIndex)149     constexpr DeviceQueueIndex(uint32_t familyIndex)
150         : mFamilyIndex((int8_t)familyIndex), mQueueIndex(kInvalidQueueIndex)
151     {
152         ASSERT(static_cast<uint32_t>(mFamilyIndex) == familyIndex);
153     }
DeviceQueueIndex(uint32_t familyIndex,uint32_t queueIndex)154     DeviceQueueIndex(uint32_t familyIndex, uint32_t queueIndex)
155         : mFamilyIndex((int8_t)familyIndex), mQueueIndex((int8_t)queueIndex)
156     {
157         // Ensure the value we actually don't truncate the useful bits.
158         ASSERT(static_cast<uint32_t>(mFamilyIndex) == familyIndex);
159         ASSERT(static_cast<uint32_t>(mQueueIndex) == queueIndex);
160     }
DeviceQueueIndex(const DeviceQueueIndex & other)161     DeviceQueueIndex(const DeviceQueueIndex &other) { *this = other; }
162 
163     DeviceQueueIndex &operator=(const DeviceQueueIndex &other)
164     {
165         mValue = other.mValue;
166         return *this;
167     }
168 
familyIndex()169     constexpr uint32_t familyIndex() const { return mFamilyIndex; }
queueIndex()170     constexpr uint32_t queueIndex() const { return mQueueIndex; }
171 
172     bool operator==(const DeviceQueueIndex &other) const { return mValue == other.mValue; }
173     bool operator!=(const DeviceQueueIndex &other) const { return mValue != other.mValue; }
174 
175   private:
176     static constexpr int8_t kInvalidQueueFamilyIndex = -1;
177     static constexpr int8_t kInvalidQueueIndex       = -1;
178     // The expectation is that these indices are small numbers that could easily fit into int8_t.
179     // int8_t is used instead of uint8_t because we need to handle VK_QUEUE_FAMILY_FOREIGN_EXT and
180     // VK_QUEUE_FAMILY_EXTERNAL properly which are essentially are negative values.
181     union
182     {
183         struct
184         {
185             int8_t mFamilyIndex;
186             int8_t mQueueIndex;
187         };
188         uint16_t mValue;
189     };
190 };
191 static constexpr DeviceQueueIndex kInvalidDeviceQueueIndex = DeviceQueueIndex();
192 static constexpr DeviceQueueIndex kForeignDeviceQueueIndex =
193     DeviceQueueIndex(VK_QUEUE_FAMILY_FOREIGN_EXT);
194 static constexpr DeviceQueueIndex kExternalDeviceQueueIndex =
195     DeviceQueueIndex(VK_QUEUE_FAMILY_EXTERNAL);
196 static_assert(kForeignDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_FOREIGN_EXT);
197 static_assert(kExternalDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_EXTERNAL);
198 static_assert(kInvalidDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_IGNORED);
199 
200 // A packed attachment index interface with vulkan API
201 class PackedAttachmentIndex final
202 {
203   public:
PackedAttachmentIndex(uint32_t index)204     explicit constexpr PackedAttachmentIndex(uint32_t index) : mAttachmentIndex(index) {}
205     constexpr PackedAttachmentIndex(const PackedAttachmentIndex &other)            = default;
206     constexpr PackedAttachmentIndex &operator=(const PackedAttachmentIndex &other) = default;
207 
get()208     constexpr uint32_t get() const { return mAttachmentIndex; }
209     PackedAttachmentIndex &operator++()
210     {
211         ++mAttachmentIndex;
212         return *this;
213     }
214     constexpr bool operator==(const PackedAttachmentIndex &other) const
215     {
216         return mAttachmentIndex == other.mAttachmentIndex;
217     }
218     constexpr bool operator!=(const PackedAttachmentIndex &other) const
219     {
220         return mAttachmentIndex != other.mAttachmentIndex;
221     }
222     constexpr bool operator<(const PackedAttachmentIndex &other) const
223     {
224         return mAttachmentIndex < other.mAttachmentIndex;
225     }
226 
227   private:
228     uint32_t mAttachmentIndex;
229 };
230 using PackedAttachmentCount                                    = PackedAttachmentIndex;
231 static constexpr PackedAttachmentIndex kAttachmentIndexInvalid = PackedAttachmentIndex(-1);
232 static constexpr PackedAttachmentIndex kAttachmentIndexZero    = PackedAttachmentIndex(0);
233 
234 // Prepend ptr to the pNext chain at chainStart
235 template <typename VulkanStruct1, typename VulkanStruct2>
AddToPNextChain(VulkanStruct1 * chainStart,VulkanStruct2 * ptr)236 void AddToPNextChain(VulkanStruct1 *chainStart, VulkanStruct2 *ptr)
237 {
238     // Catch bugs where this function is called with `&pointer` instead of `pointer`.
239     static_assert(!std::is_pointer<VulkanStruct1>::value);
240     static_assert(!std::is_pointer<VulkanStruct2>::value);
241 
242     ASSERT(ptr->pNext == nullptr);
243 
244     VkBaseOutStructure *localPtr = reinterpret_cast<VkBaseOutStructure *>(chainStart);
245     ptr->pNext                   = localPtr->pNext;
246     localPtr->pNext              = reinterpret_cast<VkBaseOutStructure *>(ptr);
247 }
248 
249 // Append ptr to the end of the chain
250 template <typename VulkanStruct1, typename VulkanStruct2>
AppendToPNextChain(VulkanStruct1 * chainStart,VulkanStruct2 * ptr)251 void AppendToPNextChain(VulkanStruct1 *chainStart, VulkanStruct2 *ptr)
252 {
253     static_assert(!std::is_pointer<VulkanStruct1>::value);
254     static_assert(!std::is_pointer<VulkanStruct2>::value);
255 
256     if (!ptr)
257     {
258         return;
259     }
260 
261     VkBaseOutStructure *endPtr = reinterpret_cast<VkBaseOutStructure *>(chainStart);
262     while (endPtr->pNext)
263     {
264         endPtr = endPtr->pNext;
265     }
266     endPtr->pNext = reinterpret_cast<VkBaseOutStructure *>(ptr);
267 }
268 
269 class QueueSerialIndexAllocator final
270 {
271   public:
QueueSerialIndexAllocator()272     QueueSerialIndexAllocator() : mLargestIndexEverAllocated(kInvalidQueueSerialIndex)
273     {
274         // Start with every index is free
275         mFreeIndexBitSetArray.set();
276         ASSERT(mFreeIndexBitSetArray.all());
277     }
allocate()278     SerialIndex allocate()
279     {
280         std::lock_guard<angle::SimpleMutex> lock(mMutex);
281         if (mFreeIndexBitSetArray.none())
282         {
283             ERR() << "Run out of queue serial index. All " << kMaxQueueSerialIndexCount
284                   << " indices are used.";
285             return kInvalidQueueSerialIndex;
286         }
287         SerialIndex index = static_cast<SerialIndex>(mFreeIndexBitSetArray.first());
288         ASSERT(index < kMaxQueueSerialIndexCount);
289         mFreeIndexBitSetArray.reset(index);
290         mLargestIndexEverAllocated = (~mFreeIndexBitSetArray).last();
291         return index;
292     }
293 
release(SerialIndex index)294     void release(SerialIndex index)
295     {
296         std::lock_guard<angle::SimpleMutex> lock(mMutex);
297         ASSERT(index <= mLargestIndexEverAllocated);
298         ASSERT(!mFreeIndexBitSetArray.test(index));
299         mFreeIndexBitSetArray.set(index);
300         // mLargestIndexEverAllocated is for optimization. Even if we released queueIndex, we may
301         // still have resources still have serial the index. Thus do not decrement
302         // mLargestIndexEverAllocated here. The only downside is that we may get into slightly less
303         // optimal code path in GetBatchCountUpToSerials.
304     }
305 
getLargestIndexEverAllocated()306     size_t getLargestIndexEverAllocated() const
307     {
308         return mLargestIndexEverAllocated.load(std::memory_order_consume);
309     }
310 
311   private:
312     angle::BitSetArray<kMaxQueueSerialIndexCount> mFreeIndexBitSetArray;
313     std::atomic<size_t> mLargestIndexEverAllocated;
314     angle::SimpleMutex mMutex;
315 };
316 
317 class [[nodiscard]] ScopedQueueSerialIndex final : angle::NonCopyable
318 {
319   public:
ScopedQueueSerialIndex()320     ScopedQueueSerialIndex() : mIndex(kInvalidQueueSerialIndex), mIndexAllocator(nullptr) {}
~ScopedQueueSerialIndex()321     ~ScopedQueueSerialIndex()
322     {
323         if (mIndex != kInvalidQueueSerialIndex)
324         {
325             ASSERT(mIndexAllocator != nullptr);
326             mIndexAllocator->release(mIndex);
327         }
328     }
329 
init(SerialIndex index,QueueSerialIndexAllocator * indexAllocator)330     void init(SerialIndex index, QueueSerialIndexAllocator *indexAllocator)
331     {
332         ASSERT(mIndex == kInvalidQueueSerialIndex);
333         ASSERT(index != kInvalidQueueSerialIndex);
334         ASSERT(indexAllocator != nullptr);
335         mIndex          = index;
336         mIndexAllocator = indexAllocator;
337     }
338 
get()339     SerialIndex get() const { return mIndex; }
340 
341   private:
342     SerialIndex mIndex;
343     QueueSerialIndexAllocator *mIndexAllocator;
344 };
345 
346 class RefCountedEventsGarbageRecycler;
347 // Abstracts error handling. Implemented by ContextVk for GL, DisplayVk for EGL, worker threads,
348 // CLContextVk etc.
349 class ErrorContext : angle::NonCopyable
350 {
351   public:
352     ErrorContext(Renderer *renderer);
353     virtual ~ErrorContext();
354 
355     virtual void handleError(VkResult result,
356                              const char *file,
357                              const char *function,
358                              unsigned int line) = 0;
359     VkDevice getDevice() const;
getRenderer()360     Renderer *getRenderer() const { return mRenderer; }
361     const angle::FeaturesVk &getFeatures() const;
362 
getPerfCounters()363     const angle::VulkanPerfCounters &getPerfCounters() const { return mPerfCounters; }
getPerfCounters()364     angle::VulkanPerfCounters &getPerfCounters() { return mPerfCounters; }
getDeviceQueueIndex()365     const DeviceQueueIndex &getDeviceQueueIndex() const { return mDeviceQueueIndex; }
366 
367   protected:
368     Renderer *const mRenderer;
369     DeviceQueueIndex mDeviceQueueIndex;
370     angle::VulkanPerfCounters mPerfCounters;
371 };
372 
373 // Abstract global operations that are handled differently between EGL and OpenCL.
374 class GlobalOps : angle::NonCopyable
375 {
376   public:
377     virtual ~GlobalOps() = default;
378 
379     virtual void putBlob(const angle::BlobCacheKey &key, const angle::MemoryBuffer &value) = 0;
380     virtual bool getBlob(const angle::BlobCacheKey &key, angle::BlobCacheValue *valueOut)  = 0;
381 
382     virtual std::shared_ptr<angle::WaitableEvent> postMultiThreadWorkerTask(
383         const std::shared_ptr<angle::Closure> &task) = 0;
384 
385     virtual void notifyDeviceLost() = 0;
386 };
387 
388 class RenderPassDesc;
389 
390 #if ANGLE_USE_CUSTOM_VULKAN_OUTSIDE_RENDER_PASS_CMD_BUFFERS
391 using OutsideRenderPassCommandBuffer = priv::SecondaryCommandBuffer;
392 #else
393 using OutsideRenderPassCommandBuffer = VulkanSecondaryCommandBuffer;
394 #endif
395 #if ANGLE_USE_CUSTOM_VULKAN_RENDER_PASS_CMD_BUFFERS
396 using RenderPassCommandBuffer = priv::SecondaryCommandBuffer;
397 #else
398 using RenderPassCommandBuffer = VulkanSecondaryCommandBuffer;
399 #endif
400 
401 struct SecondaryCommandPools
402 {
403     SecondaryCommandPool outsideRenderPassPool;
404     SecondaryCommandPool renderPassPool;
405 };
406 
407 VkImageAspectFlags GetDepthStencilAspectFlags(const angle::Format &format);
408 VkImageAspectFlags GetFormatAspectFlags(const angle::Format &format);
409 
410 template <typename T>
411 struct ImplTypeHelper;
412 
413 // clang-format off
414 #define ANGLE_IMPL_TYPE_HELPER_GL(OBJ) \
415 template<>                             \
416 struct ImplTypeHelper<gl::OBJ>         \
417 {                                      \
418     using ImplType = OBJ##Vk;          \
419 };
420 // clang-format on
421 
422 ANGLE_GL_OBJECTS_X(ANGLE_IMPL_TYPE_HELPER_GL)
423 
424 template <>
425 struct ImplTypeHelper<gl::MockOverlay>
426 {
427     using ImplType = OverlayVk;
428 };
429 
430 template <>
431 struct ImplTypeHelper<egl::Display>
432 {
433     using ImplType = DisplayVk;
434 };
435 
436 template <>
437 struct ImplTypeHelper<egl::Image>
438 {
439     using ImplType = ImageVk;
440 };
441 
442 template <>
443 struct ImplTypeHelper<egl::ShareGroup>
444 {
445     using ImplType = ShareGroupVk;
446 };
447 
448 template <typename T>
449 using GetImplType = typename ImplTypeHelper<T>::ImplType;
450 
451 template <typename T>
452 GetImplType<T> *GetImpl(const T *glObject)
453 {
454     return GetImplAs<GetImplType<T>>(glObject);
455 }
456 
457 template <typename T>
458 GetImplType<T> *SafeGetImpl(const T *glObject)
459 {
460     return SafeGetImplAs<GetImplType<T>>(glObject);
461 }
462 
463 template <>
464 inline OverlayVk *GetImpl(const gl::MockOverlay *glObject)
465 {
466     return nullptr;
467 }
468 
469 // Reference to a deleted object. The object is due to be destroyed at some point in the future.
470 // |mHandleType| determines the type of the object and which destroy function should be called.
471 class GarbageObject
472 {
473   public:
474     GarbageObject();
475     GarbageObject(GarbageObject &&other);
476     GarbageObject &operator=(GarbageObject &&rhs);
477 
478     bool valid() const { return mHandle != VK_NULL_HANDLE; }
479     void destroy(Renderer *renderer);
480 
481     template <typename DerivedT, typename HandleT>
482     static GarbageObject Get(WrappedObject<DerivedT, HandleT> *object)
483     {
484         // Using c-style cast here to avoid conditional compile for MSVC 32-bit
485         //  which fails to compile with reinterpret_cast, requiring static_cast.
486         return GarbageObject(HandleTypeHelper<DerivedT>::kHandleType,
487                              (GarbageHandle)(object->release()));
488     }
489 
490   private:
491     VK_DEFINE_NON_DISPATCHABLE_HANDLE(GarbageHandle)
492     GarbageObject(HandleType handleType, GarbageHandle handle);
493 
494     HandleType mHandleType;
495     GarbageHandle mHandle;
496 };
497 
498 template <typename T>
499 GarbageObject GetGarbage(T *obj)
500 {
501     return GarbageObject::Get(obj);
502 }
503 
504 // A list of garbage objects. Has no object lifetime information.
505 using GarbageObjects = std::vector<GarbageObject>;
506 
507 class MemoryProperties final : angle::NonCopyable
508 {
509   public:
510     MemoryProperties();
511 
512     void init(VkPhysicalDevice physicalDevice);
513     bool hasLazilyAllocatedMemory() const;
514     VkResult findCompatibleMemoryIndex(Renderer *renderer,
515                                        const VkMemoryRequirements &memoryRequirements,
516                                        VkMemoryPropertyFlags requestedMemoryPropertyFlags,
517                                        bool isExternalMemory,
518                                        VkMemoryPropertyFlags *memoryPropertyFlagsOut,
519                                        uint32_t *indexOut) const;
520     void destroy();
521 
522     uint32_t getHeapIndexForMemoryType(uint32_t memoryType) const
523     {
524         if (memoryType == kInvalidMemoryTypeIndex)
525         {
526             return kInvalidMemoryHeapIndex;
527         }
528 
529         ASSERT(memoryType < getMemoryTypeCount());
530         return mMemoryProperties.memoryTypes[memoryType].heapIndex;
531     }
532 
533     VkDeviceSize getHeapSizeForMemoryType(uint32_t memoryType) const
534     {
535         uint32_t heapIndex = mMemoryProperties.memoryTypes[memoryType].heapIndex;
536         return mMemoryProperties.memoryHeaps[heapIndex].size;
537     }
538 
539     const VkMemoryType &getMemoryType(uint32_t i) const { return mMemoryProperties.memoryTypes[i]; }
540 
541     uint32_t getMemoryHeapCount() const { return mMemoryProperties.memoryHeapCount; }
542     uint32_t getMemoryTypeCount() const { return mMemoryProperties.memoryTypeCount; }
543 
544   private:
545     VkPhysicalDeviceMemoryProperties mMemoryProperties;
546 };
547 
548 // Similar to StagingImage, for Buffers.
549 class StagingBuffer final : angle::NonCopyable
550 {
551   public:
552     StagingBuffer();
553     void release(ContextVk *contextVk);
554     void collectGarbage(Renderer *renderer, const QueueSerial &queueSerial);
555     void destroy(Renderer *renderer);
556 
557     angle::Result init(ErrorContext *context, VkDeviceSize size, StagingUsage usage);
558 
559     Buffer &getBuffer() { return mBuffer; }
560     const Buffer &getBuffer() const { return mBuffer; }
561     size_t getSize() const { return mSize; }
562 
563   private:
564     Buffer mBuffer;
565     Allocation mAllocation;
566     size_t mSize;
567 };
568 
569 angle::Result InitMappableAllocation(ErrorContext *context,
570                                      const Allocator &allocator,
571                                      Allocation *allocation,
572                                      VkDeviceSize size,
573                                      int value,
574                                      VkMemoryPropertyFlags memoryPropertyFlags);
575 
576 VkResult AllocateBufferMemory(ErrorContext *context,
577                               vk::MemoryAllocationType memoryAllocationType,
578                               VkMemoryPropertyFlags requestedMemoryPropertyFlags,
579                               VkMemoryPropertyFlags *memoryPropertyFlagsOut,
580                               const void *extraAllocationInfo,
581                               Buffer *buffer,
582                               uint32_t *memoryTypeIndexOut,
583                               DeviceMemory *deviceMemoryOut,
584                               VkDeviceSize *sizeOut);
585 
586 VkResult AllocateImageMemory(ErrorContext *context,
587                              vk::MemoryAllocationType memoryAllocationType,
588                              VkMemoryPropertyFlags memoryPropertyFlags,
589                              VkMemoryPropertyFlags *memoryPropertyFlagsOut,
590                              const void *extraAllocationInfo,
591                              Image *image,
592                              uint32_t *memoryTypeIndexOut,
593                              DeviceMemory *deviceMemoryOut,
594                              VkDeviceSize *sizeOut);
595 
596 VkResult AllocateImageMemoryWithRequirements(ErrorContext *context,
597                                              vk::MemoryAllocationType memoryAllocationType,
598                                              VkMemoryPropertyFlags memoryPropertyFlags,
599                                              const VkMemoryRequirements &memoryRequirements,
600                                              const void *extraAllocationInfo,
601                                              const VkBindImagePlaneMemoryInfoKHR *extraBindInfo,
602                                              Image *image,
603                                              uint32_t *memoryTypeIndexOut,
604                                              DeviceMemory *deviceMemoryOut);
605 
606 VkResult AllocateBufferMemoryWithRequirements(ErrorContext *context,
607                                               MemoryAllocationType memoryAllocationType,
608                                               VkMemoryPropertyFlags memoryPropertyFlags,
609                                               const VkMemoryRequirements &memoryRequirements,
610                                               const void *extraAllocationInfo,
611                                               Buffer *buffer,
612                                               VkMemoryPropertyFlags *memoryPropertyFlagsOut,
613                                               uint32_t *memoryTypeIndexOut,
614                                               DeviceMemory *deviceMemoryOut);
615 
616 gl::TextureType Get2DTextureType(uint32_t layerCount, GLint samples);
617 
618 enum class RecordingMode
619 {
620     Start,
621     Append,
622 };
623 
624 // Helper class to handle RAII patterns for initialization. Requires that T have a destroy method
625 // that takes a VkDevice and returns void.
626 template <typename T>
627 class [[nodiscard]] DeviceScoped final : angle::NonCopyable
628 {
629   public:
630     explicit DeviceScoped(VkDevice device) : mDevice(device) {}
631     DeviceScoped(DeviceScoped &&other) : mDevice(other.mDevice), mVar(std::move(other.mVar)) {}
632     ~DeviceScoped() { mVar.destroy(mDevice); }
633 
634     const T &get() const { return mVar; }
635     T &get() { return mVar; }
636 
637     T &&release() { return std::move(mVar); }
638 
639   private:
640     VkDevice mDevice;
641     T mVar;
642 };
643 
644 template <typename T>
645 class [[nodiscard]] AllocatorScoped final : angle::NonCopyable
646 {
647   public:
648     AllocatorScoped(const Allocator &allocator) : mAllocator(allocator) {}
649     ~AllocatorScoped() { mVar.destroy(mAllocator); }
650 
651     const T &get() const { return mVar; }
652     T &get() { return mVar; }
653 
654     T &&release() { return std::move(mVar); }
655 
656   private:
657     const Allocator &mAllocator;
658     T mVar;
659 };
660 
661 // Similar to DeviceScoped, but releases objects instead of destroying them. Requires that T have a
662 // release method that takes a ContextVk * and returns void.
663 template <typename T>
664 class [[nodiscard]] ContextScoped final : angle::NonCopyable
665 {
666   public:
667     ContextScoped(ContextVk *contextVk) : mContextVk(contextVk) {}
668     ~ContextScoped() { mVar.release(mContextVk); }
669 
670     const T &get() const { return mVar; }
671     T &get() { return mVar; }
672 
673     T &&release() { return std::move(mVar); }
674 
675   private:
676     ContextVk *mContextVk;
677     T mVar;
678 };
679 
680 template <typename T>
681 class [[nodiscard]] RendererScoped final : angle::NonCopyable
682 {
683   public:
684     RendererScoped(Renderer *renderer) : mRenderer(renderer) {}
685     ~RendererScoped() { mVar.release(mRenderer); }
686 
687     const T &get() const { return mVar; }
688     T &get() { return mVar; }
689 
690     T &&release() { return std::move(mVar); }
691 
692   private:
693     Renderer *mRenderer;
694     T mVar;
695 };
696 
697 // This is a very simple RefCount class that has no autoreleasing.
698 template <typename T>
699 class RefCounted : angle::NonCopyable
700 {
701   public:
702     RefCounted() : mRefCount(0) {}
703     template <class... Args>
704     explicit RefCounted(Args &&...args) : mRefCount(0), mObject(std::forward<Args>(args)...)
705     {}
706     explicit RefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
707     ~RefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
708 
709     RefCounted(RefCounted &&copy) : mRefCount(copy.mRefCount), mObject(std::move(copy.mObject))
710     {
711         ASSERT(this != &copy);
712         copy.mRefCount = 0;
713     }
714 
715     RefCounted &operator=(RefCounted &&rhs)
716     {
717         std::swap(mRefCount, rhs.mRefCount);
718         mObject = std::move(rhs.mObject);
719         return *this;
720     }
721 
722     void addRef()
723     {
724         ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
725         mRefCount++;
726     }
727 
728     void releaseRef()
729     {
730         ASSERT(isReferenced());
731         mRefCount--;
732     }
733 
734     uint32_t getAndReleaseRef()
735     {
736         ASSERT(isReferenced());
737         return mRefCount--;
738     }
739 
740     bool isReferenced() const { return mRefCount != 0; }
741     uint32_t getRefCount() const { return mRefCount; }
742     bool isLastReferenceCount() const { return mRefCount == 1; }
743 
744     T &get() { return mObject; }
745     const T &get() const { return mObject; }
746 
747     // A debug function to validate that the reference count is as expected used for assertions.
748     bool isRefCountAsExpected(uint32_t expectedRefCount) { return mRefCount == expectedRefCount; }
749 
750   private:
751     uint32_t mRefCount;
752     T mObject;
753 };
754 
755 // Atomic version of RefCounted.  Used in the descriptor set and pipeline layout caches, which are
756 // accessed by link jobs.  No std::move is allowed due to the atomic ref count.
757 template <typename T>
758 class AtomicRefCounted : angle::NonCopyable
759 {
760   public:
761     AtomicRefCounted() : mRefCount(0) {}
762     explicit AtomicRefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
763     ~AtomicRefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
764 
765     void addRef()
766     {
767         ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
768         mRefCount.fetch_add(1, std::memory_order_relaxed);
769     }
770 
771     // Warning: method does not perform any synchronization, therefore can not be used along with
772     // following `!isReferenced()` call to check if object is not longer accessed by other threads.
773     // Use `getAndReleaseRef()` instead, when synchronization is required.
774     void releaseRef()
775     {
776         ASSERT(isReferenced());
777         mRefCount.fetch_sub(1, std::memory_order_relaxed);
778     }
779 
780     // Performs acquire-release memory synchronization. When result is "1", the object is
781     // guaranteed to be no longer in use by other threads, and may be safely destroyed or updated.
782     // Warning: do not mix this method and the unsynchronized `releaseRef()` call.
783     unsigned int getAndReleaseRef()
784     {
785         ASSERT(isReferenced());
786         return mRefCount.fetch_sub(1, std::memory_order_acq_rel);
787     }
788 
789     // Making decisions based on reference count is not thread safe, so it should not used in
790     // release build.
791 #if defined(ANGLE_ENABLE_ASSERTS)
792     // Warning: method does not perform any synchronization.  See `releaseRef()` for details.
793     // Method may be only used after external synchronization.
794     bool isReferenced() const { return mRefCount.load(std::memory_order_relaxed) != 0; }
795     uint32_t getRefCount() const { return mRefCount.load(std::memory_order_relaxed); }
796     // This is used by SharedPtr::unique, so needs strong ordering.
797     bool isLastReferenceCount() const { return mRefCount.load(std::memory_order_acquire) == 1; }
798 #else
799     // Compiler still compile but should never actually produce code.
800     bool isReferenced() const
801     {
802         UNREACHABLE();
803         return false;
804     }
805     uint32_t getRefCount() const
806     {
807         UNREACHABLE();
808         return 0;
809     }
810     bool isLastReferenceCount() const
811     {
812         UNREACHABLE();
813         return false;
814     }
815 #endif
816 
817     T &get() { return mObject; }
818     const T &get() const { return mObject; }
819 
820   private:
821     std::atomic_uint mRefCount;
822     T mObject;
823 };
824 
825 // This is intended to have same interface as std::shared_ptr except this must used in thread safe
826 // environment.
827 template <typename>
828 class WeakPtr;
829 template <typename T, class RefCountedStorage = RefCounted<T>>
830 class SharedPtr final
831 {
832   public:
833     SharedPtr() : mRefCounted(nullptr), mDevice(VK_NULL_HANDLE) {}
834     SharedPtr(VkDevice device, T &&object) : mDevice(device)
835     {
836         mRefCounted = new RefCountedStorage(std::move(object));
837         mRefCounted->addRef();
838     }
839     SharedPtr(VkDevice device, const WeakPtr<T> &other)
840         : mRefCounted(other.mRefCounted), mDevice(device)
841     {
842         if (mRefCounted)
843         {
844             // There must already have another SharedPtr holding onto the underline object when
845             // WeakPtr is valid.
846             ASSERT(mRefCounted->isReferenced());
847             mRefCounted->addRef();
848         }
849     }
850     ~SharedPtr() { reset(); }
851 
852     SharedPtr(const SharedPtr &other) : mRefCounted(nullptr), mDevice(VK_NULL_HANDLE)
853     {
854         *this = other;
855     }
856 
857     SharedPtr(SharedPtr &&other) : mRefCounted(nullptr), mDevice(VK_NULL_HANDLE)
858     {
859         *this = std::move(other);
860     }
861 
862     template <class... Args>
863     static SharedPtr<T, RefCountedStorage> MakeShared(VkDevice device, Args &&...args)
864     {
865         SharedPtr<T, RefCountedStorage> newObject;
866         newObject.mRefCounted = new RefCountedStorage(std::forward<Args>(args)...);
867         newObject.mRefCounted->addRef();
868         newObject.mDevice = device;
869         return newObject;
870     }
871 
872     void reset()
873     {
874         if (mRefCounted)
875         {
876             releaseRef();
877             mRefCounted = nullptr;
878             mDevice     = VK_NULL_HANDLE;
879         }
880     }
881 
882     SharedPtr &operator=(SharedPtr &&other)
883     {
884         if (mRefCounted)
885         {
886             releaseRef();
887         }
888         mRefCounted       = other.mRefCounted;
889         mDevice           = other.mDevice;
890         other.mRefCounted = nullptr;
891         other.mDevice     = VK_NULL_HANDLE;
892         return *this;
893     }
894 
895     SharedPtr &operator=(const SharedPtr &other)
896     {
897         if (mRefCounted)
898         {
899             releaseRef();
900         }
901         mRefCounted = other.mRefCounted;
902         mDevice     = other.mDevice;
903         if (mRefCounted)
904         {
905             mRefCounted->addRef();
906         }
907         return *this;
908     }
909 
910     operator bool() const { return mRefCounted != nullptr; }
911 
912     T &operator*() const
913     {
914         ASSERT(mRefCounted != nullptr);
915         return mRefCounted->get();
916     }
917 
918     T *operator->() const { return get(); }
919 
920     T *get() const
921     {
922         ASSERT(mRefCounted != nullptr);
923         return &mRefCounted->get();
924     }
925 
926     bool unique() const
927     {
928         ASSERT(mRefCounted != nullptr);
929         return mRefCounted->isLastReferenceCount();
930     }
931 
932     bool owner_equal(const SharedPtr<T> &other) const { return mRefCounted == other.mRefCounted; }
933 
934     uint32_t getRefCount() const { return mRefCounted->getRefCount(); }
935 
936   private:
937     void releaseRef()
938     {
939         ASSERT(mRefCounted != nullptr);
940         unsigned int refCount = mRefCounted->getAndReleaseRef();
941         if (refCount == 1)
942         {
943             mRefCounted->get().destroy(mDevice);
944             SafeDelete(mRefCounted);
945         }
946     }
947 
948     friend class WeakPtr<T>;
949     RefCountedStorage *mRefCounted;
950     VkDevice mDevice;
951 };
952 
953 template <typename T>
954 using AtomicSharedPtr = SharedPtr<T, AtomicRefCounted<T>>;
955 
956 // This is intended to have same interface as std::weak_ptr
957 template <typename T>
958 class WeakPtr final
959 {
960   public:
961     using RefCountedStorage = RefCounted<T>;
962 
963     WeakPtr() : mRefCounted(nullptr) {}
964 
965     WeakPtr(const SharedPtr<T> &other) : mRefCounted(other.mRefCounted) {}
966 
967     void reset() { mRefCounted = nullptr; }
968 
969     operator bool() const
970     {
971         // There must have another SharedPtr holding onto the underline object when WeakPtr is
972         // valid.
973         ASSERT(mRefCounted == nullptr || mRefCounted->isReferenced());
974         return mRefCounted != nullptr;
975     }
976 
977     T *operator->() const { return get(); }
978 
979     T *get() const
980     {
981         ASSERT(mRefCounted != nullptr);
982         ASSERT(mRefCounted->isReferenced());
983         return &mRefCounted->get();
984     }
985 
986     long use_count() const
987     {
988         ASSERT(mRefCounted != nullptr);
989         // There must have another SharedPtr holding onto the underline object when WeakPtr is
990         // valid.
991         ASSERT(mRefCounted->isReferenced());
992         return mRefCounted->getRefCount();
993     }
994     bool owner_equal(const SharedPtr<T> &other) const
995     {
996         // There must have another SharedPtr holding onto the underlying object when WeakPtr is
997         // valid.
998         ASSERT(mRefCounted == nullptr || mRefCounted->isReferenced());
999         return mRefCounted == other.mRefCounted;
1000     }
1001 
1002   private:
1003     friend class SharedPtr<T>;
1004     RefCountedStorage *mRefCounted;
1005 };
1006 
1007 // Helper class to share ref-counted Vulkan objects.  Requires that T have a destroy method
1008 // that takes a VkDevice and returns void.
1009 template <typename T>
1010 class Shared final : angle::NonCopyable
1011 {
1012   public:
1013     Shared() : mRefCounted(nullptr) {}
1014     ~Shared() { ASSERT(mRefCounted == nullptr); }
1015 
1016     Shared(Shared &&other) { *this = std::move(other); }
1017     Shared &operator=(Shared &&other)
1018     {
1019         ASSERT(this != &other);
1020         mRefCounted       = other.mRefCounted;
1021         other.mRefCounted = nullptr;
1022         return *this;
1023     }
1024 
1025     void set(VkDevice device, RefCounted<T> *refCounted)
1026     {
1027         if (mRefCounted)
1028         {
1029             mRefCounted->releaseRef();
1030             if (!mRefCounted->isReferenced())
1031             {
1032                 mRefCounted->get().destroy(device);
1033                 SafeDelete(mRefCounted);
1034             }
1035         }
1036 
1037         mRefCounted = refCounted;
1038 
1039         if (mRefCounted)
1040         {
1041             mRefCounted->addRef();
1042         }
1043     }
1044 
1045     void setUnreferenced(RefCounted<T> *refCounted)
1046     {
1047         ASSERT(!mRefCounted);
1048         ASSERT(refCounted);
1049 
1050         mRefCounted = refCounted;
1051         mRefCounted->addRef();
1052     }
1053 
1054     void assign(VkDevice device, T &&newObject)
1055     {
1056         set(device, new RefCounted<T>(std::move(newObject)));
1057     }
1058 
1059     void copy(VkDevice device, const Shared<T> &other) { set(device, other.mRefCounted); }
1060 
1061     void copyUnreferenced(const Shared<T> &other) { setUnreferenced(other.mRefCounted); }
1062 
1063     void reset(VkDevice device) { set(device, nullptr); }
1064 
1065     template <typename RecyclerT>
1066     void resetAndRecycle(RecyclerT *recycler)
1067     {
1068         if (mRefCounted)
1069         {
1070             mRefCounted->releaseRef();
1071             if (!mRefCounted->isReferenced())
1072             {
1073                 ASSERT(mRefCounted->get().valid());
1074                 recycler->recycle(std::move(mRefCounted->get()));
1075                 SafeDelete(mRefCounted);
1076             }
1077 
1078             mRefCounted = nullptr;
1079         }
1080     }
1081 
1082     template <typename OnRelease>
1083     void resetAndRelease(OnRelease *onRelease)
1084     {
1085         if (mRefCounted)
1086         {
1087             mRefCounted->releaseRef();
1088             if (!mRefCounted->isReferenced())
1089             {
1090                 ASSERT(mRefCounted->get().valid());
1091                 (*onRelease)(std::move(mRefCounted->get()));
1092                 SafeDelete(mRefCounted);
1093             }
1094 
1095             mRefCounted = nullptr;
1096         }
1097     }
1098 
1099     bool isReferenced() const
1100     {
1101         // If reference is zero, the object should have been deleted.  I.e. if the object is not
1102         // nullptr, it should have a reference.
1103         ASSERT(!mRefCounted || mRefCounted->isReferenced());
1104         return mRefCounted != nullptr;
1105     }
1106 
1107     T &get()
1108     {
1109         ASSERT(mRefCounted && mRefCounted->isReferenced());
1110         return mRefCounted->get();
1111     }
1112     const T &get() const
1113     {
1114         ASSERT(mRefCounted && mRefCounted->isReferenced());
1115         return mRefCounted->get();
1116     }
1117 
1118   private:
1119     RefCounted<T> *mRefCounted;
1120 };
1121 
1122 template <typename T, typename StorageT = std::deque<T>>
1123 class Recycler final : angle::NonCopyable
1124 {
1125   public:
1126     Recycler() = default;
1127     Recycler(StorageT &&storage) { mObjectFreeList = std::move(storage); }
1128 
1129     void recycle(T &&garbageObject)
1130     {
1131         // Recycling invalid objects is pointless and potentially a bug.
1132         ASSERT(garbageObject.valid());
1133         mObjectFreeList.emplace_back(std::move(garbageObject));
1134     }
1135 
1136     void recycle(StorageT &&garbageObjects)
1137     {
1138         // Recycling invalid objects is pointless and potentially a bug.
1139         ASSERT(!garbageObjects.empty());
1140         mObjectFreeList.insert(mObjectFreeList.end(), garbageObjects.begin(), garbageObjects.end());
1141         ASSERT(garbageObjects.empty());
1142     }
1143 
1144     void refill(StorageT &&garbageObjects)
1145     {
1146         ASSERT(!garbageObjects.empty());
1147         ASSERT(mObjectFreeList.empty());
1148         mObjectFreeList.swap(garbageObjects);
1149     }
1150 
1151     void fetch(T *outObject)
1152     {
1153         ASSERT(!empty());
1154         *outObject = std::move(mObjectFreeList.back());
1155         mObjectFreeList.pop_back();
1156     }
1157 
1158     void destroy(VkDevice device)
1159     {
1160         while (!mObjectFreeList.empty())
1161         {
1162             T &object = mObjectFreeList.back();
1163             object.destroy(device);
1164             mObjectFreeList.pop_back();
1165         }
1166     }
1167 
1168     bool empty() const { return mObjectFreeList.empty(); }
1169 
1170   private:
1171     StorageT mObjectFreeList;
1172 };
1173 
1174 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
1175 struct SpecializationConstants final
1176 {
1177     VkBool32 surfaceRotation;
1178     uint32_t dither;
1179 };
1180 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
1181 
1182 template <typename T>
1183 using SpecializationConstantMap = angle::PackedEnumMap<sh::vk::SpecializationConstantId, T>;
1184 
1185 using ShaderModulePtr = SharedPtr<ShaderModule>;
1186 using ShaderModuleMap = gl::ShaderMap<ShaderModulePtr>;
1187 
1188 angle::Result InitShaderModule(ErrorContext *context,
1189                                ShaderModulePtr *shaderModulePtr,
1190                                const uint32_t *shaderCode,
1191                                size_t shaderCodeSize);
1192 
1193 void MakeDebugUtilsLabel(GLenum source, const char *marker, VkDebugUtilsLabelEXT *label);
1194 
1195 constexpr size_t kUnpackedDepthIndex   = gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1196 constexpr size_t kUnpackedStencilIndex = gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 1;
1197 constexpr uint32_t kUnpackedColorBuffersMask =
1198     angle::BitMask<uint32_t>(gl::IMPLEMENTATION_MAX_DRAW_BUFFERS);
1199 
1200 class ClearValuesArray final
1201 {
1202   public:
1203     ClearValuesArray();
1204     ~ClearValuesArray();
1205 
1206     ClearValuesArray(const ClearValuesArray &other);
1207     ClearValuesArray &operator=(const ClearValuesArray &rhs);
1208 
1209     void store(uint32_t index, VkImageAspectFlags aspectFlags, const VkClearValue &clearValue);
1210     void storeNoDepthStencil(uint32_t index, const VkClearValue &clearValue);
1211 
1212     void reset(size_t index)
1213     {
1214         mValues[index] = {};
1215         mEnabled.reset(index);
1216     }
1217 
1218     bool test(size_t index) const { return mEnabled.test(index); }
1219     bool testDepth() const { return mEnabled.test(kUnpackedDepthIndex); }
1220     bool testStencil() const { return mEnabled.test(kUnpackedStencilIndex); }
1221     gl::DrawBufferMask getColorMask() const;
1222 
1223     const VkClearValue &operator[](size_t index) const { return mValues[index]; }
1224 
1225     float getDepthValue() const { return mValues[kUnpackedDepthIndex].depthStencil.depth; }
1226     uint32_t getStencilValue() const { return mValues[kUnpackedStencilIndex].depthStencil.stencil; }
1227 
1228     const VkClearValue *data() const { return mValues.data(); }
1229     bool empty() const { return mEnabled.none(); }
1230     bool any() const { return mEnabled.any(); }
1231 
1232   private:
1233     gl::AttachmentArray<VkClearValue> mValues;
1234     gl::AttachmentsMask mEnabled;
1235 };
1236 
1237 // Defines Serials for Vulkan objects.
1238 #define ANGLE_VK_SERIAL_OP(X) \
1239     X(Buffer)                 \
1240     X(Image)                  \
1241     X(ImageOrBufferView)      \
1242     X(Sampler)
1243 
1244 #define ANGLE_DEFINE_VK_SERIAL_TYPE(Type)                                     \
1245     class Type##Serial                                                        \
1246     {                                                                         \
1247       public:                                                                 \
1248         constexpr Type##Serial() : mSerial(kInvalid) {}                       \
1249         constexpr explicit Type##Serial(uint32_t serial) : mSerial(serial) {} \
1250                                                                               \
1251         constexpr bool operator==(const Type##Serial &other) const            \
1252         {                                                                     \
1253             ASSERT(mSerial != kInvalid || other.mSerial != kInvalid);         \
1254             return mSerial == other.mSerial;                                  \
1255         }                                                                     \
1256         constexpr bool operator!=(const Type##Serial &other) const            \
1257         {                                                                     \
1258             ASSERT(mSerial != kInvalid || other.mSerial != kInvalid);         \
1259             return mSerial != other.mSerial;                                  \
1260         }                                                                     \
1261         constexpr uint32_t getValue() const                                   \
1262         {                                                                     \
1263             return mSerial;                                                   \
1264         }                                                                     \
1265         constexpr bool valid() const                                          \
1266         {                                                                     \
1267             return mSerial != kInvalid;                                       \
1268         }                                                                     \
1269                                                                               \
1270       private:                                                                \
1271         uint32_t mSerial;                                                     \
1272         static constexpr uint32_t kInvalid = 0;                               \
1273     };                                                                        \
1274     static constexpr Type##Serial kInvalid##Type##Serial = Type##Serial();
1275 
1276 ANGLE_VK_SERIAL_OP(ANGLE_DEFINE_VK_SERIAL_TYPE)
1277 
1278 #define ANGLE_DECLARE_GEN_VK_SERIAL(Type) Type##Serial generate##Type##Serial();
1279 
1280 class ResourceSerialFactory final : angle::NonCopyable
1281 {
1282   public:
1283     ResourceSerialFactory();
1284     ~ResourceSerialFactory();
1285 
1286     ANGLE_VK_SERIAL_OP(ANGLE_DECLARE_GEN_VK_SERIAL)
1287 
1288   private:
1289     uint32_t issueSerial();
1290 
1291     // Kept atomic so it can be accessed from multiple Context threads at once.
1292     std::atomic<uint32_t> mCurrentUniqueSerial;
1293 };
1294 
1295 #if defined(ANGLE_ENABLE_PERF_COUNTER_OUTPUT)
1296 constexpr bool kOutputCumulativePerfCounters = ANGLE_ENABLE_PERF_COUNTER_OUTPUT;
1297 #else
1298 constexpr bool kOutputCumulativePerfCounters = false;
1299 #endif
1300 
1301 // Performance and resource counters.
1302 struct RenderPassPerfCounters
1303 {
1304     // load/storeOps. Includes ops for resolve attachment. Maximum value = 2.
1305     uint8_t colorLoadOpClears;
1306     uint8_t colorLoadOpLoads;
1307     uint8_t colorLoadOpNones;
1308     uint8_t colorStoreOpStores;
1309     uint8_t colorStoreOpNones;
1310     uint8_t depthLoadOpClears;
1311     uint8_t depthLoadOpLoads;
1312     uint8_t depthLoadOpNones;
1313     uint8_t depthStoreOpStores;
1314     uint8_t depthStoreOpNones;
1315     uint8_t stencilLoadOpClears;
1316     uint8_t stencilLoadOpLoads;
1317     uint8_t stencilLoadOpNones;
1318     uint8_t stencilStoreOpStores;
1319     uint8_t stencilStoreOpNones;
1320     // Number of unresolve and resolve operations.  Maximum value for color =
1321     // gl::IMPLEMENTATION_MAX_DRAW_BUFFERS and for depth/stencil = 1 each.
1322     uint8_t colorAttachmentUnresolves;
1323     uint8_t colorAttachmentResolves;
1324     uint8_t depthAttachmentUnresolves;
1325     uint8_t depthAttachmentResolves;
1326     uint8_t stencilAttachmentUnresolves;
1327     uint8_t stencilAttachmentResolves;
1328     // Whether the depth/stencil attachment is using a read-only layout.
1329     uint8_t readOnlyDepthStencil;
1330 };
1331 
1332 // A Vulkan image level index.
1333 using LevelIndex = gl::LevelIndexWrapper<uint32_t>;
1334 
1335 // Ensure viewport is within Vulkan requirements
1336 void ClampViewport(VkViewport *viewport);
1337 
1338 constexpr bool IsDynamicDescriptor(VkDescriptorType descriptorType)
1339 {
1340     switch (descriptorType)
1341     {
1342         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1343         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1344             return true;
1345         default:
1346             return false;
1347     }
1348 }
1349 
1350 void ApplyPipelineCreationFeedback(ErrorContext *context,
1351                                    const VkPipelineCreationFeedback &feedback);
1352 
1353 angle::Result SetDebugUtilsObjectName(ContextVk *contextVk,
1354                                       VkObjectType objectType,
1355                                       uint64_t handle,
1356                                       const std::string &label);
1357 
1358 }  // namespace vk
1359 
1360 #if !defined(ANGLE_SHARED_LIBVULKAN)
1361 // Lazily load entry points for each extension as necessary.
1362 void InitDebugUtilsEXTFunctions(VkInstance instance);
1363 void InitTransformFeedbackEXTFunctions(VkDevice device);
1364 void InitRenderPass2KHRFunctions(VkDevice device);
1365 
1366 #    if defined(ANGLE_PLATFORM_FUCHSIA)
1367 // VK_FUCHSIA_imagepipe_surface
1368 void InitImagePipeSurfaceFUCHSIAFunctions(VkInstance instance);
1369 #    endif
1370 
1371 #    if defined(ANGLE_PLATFORM_ANDROID)
1372 // VK_ANDROID_external_memory_android_hardware_buffer
1373 void InitExternalMemoryHardwareBufferANDROIDFunctions(VkDevice device);
1374 #    endif
1375 
1376 // VK_KHR_external_semaphore_fd
1377 void InitExternalSemaphoreFdFunctions(VkDevice device);
1378 
1379 // VK_EXT_device_fault
1380 void InitDeviceFaultFunctions(VkDevice device);
1381 
1382 // VK_EXT_host_query_reset
1383 void InitHostQueryResetFunctions(VkDevice device);
1384 
1385 // VK_KHR_external_fence_fd
1386 void InitExternalFenceFdFunctions(VkDevice device);
1387 
1388 // VK_KHR_shared_presentable_image
1389 void InitGetSwapchainStatusKHRFunctions(VkDevice device);
1390 
1391 // VK_EXT_extended_dynamic_state
1392 void InitExtendedDynamicStateEXTFunctions(VkDevice device);
1393 
1394 // VK_EXT_extended_dynamic_state2
1395 void InitExtendedDynamicState2EXTFunctions(VkDevice device);
1396 
1397 // VK_EXT_vertex_input_dynamic_state
1398 void InitVertexInputDynamicStateEXTFunctions(VkDevice device);
1399 
1400 // VK_KHR_dynamic_rendering
1401 void InitDynamicRenderingFunctions(VkDevice device);
1402 
1403 // VK_KHR_dynamic_rendering_local_read
1404 void InitDynamicRenderingLocalReadFunctions(VkDevice device);
1405 
1406 // VK_KHR_fragment_shading_rate
1407 void InitFragmentShadingRateKHRInstanceFunction(VkInstance instance);
1408 void InitFragmentShadingRateKHRDeviceFunction(VkDevice device);
1409 
1410 // VK_GOOGLE_display_timing
1411 void InitGetPastPresentationTimingGoogleFunction(VkDevice device);
1412 
1413 // VK_EXT_host_image_copy
1414 void InitHostImageCopyFunctions(VkDevice device);
1415 
1416 // VK_KHR_Synchronization2
1417 void InitSynchronization2Functions(VkDevice device);
1418 
1419 #endif  // !defined(ANGLE_SHARED_LIBVULKAN)
1420 
1421 // Promoted to Vulkan 1.1
1422 void InitGetPhysicalDeviceProperties2KHRFunctionsFromCore();
1423 void InitExternalFenceCapabilitiesFunctionsFromCore();
1424 void InitExternalSemaphoreCapabilitiesFunctionsFromCore();
1425 void InitSamplerYcbcrKHRFunctionsFromCore();
1426 void InitGetMemoryRequirements2KHRFunctionsFromCore();
1427 void InitBindMemory2KHRFunctionsFromCore();
1428 
1429 GLenum CalculateGenerateMipmapFilter(ContextVk *contextVk, angle::FormatID formatID);
1430 
1431 namespace gl_vk
1432 {
1433 inline VkRect2D GetRect(const gl::Rectangle &source)
1434 {
1435     return {{source.x, source.y},
1436             {static_cast<uint32_t>(source.width), static_cast<uint32_t>(source.height)}};
1437 }
1438 VkFilter GetFilter(const GLenum filter);
1439 VkSamplerMipmapMode GetSamplerMipmapMode(const GLenum filter);
1440 VkSamplerAddressMode GetSamplerAddressMode(const GLenum wrap);
1441 VkPrimitiveTopology GetPrimitiveTopology(gl::PrimitiveMode mode);
1442 VkPolygonMode GetPolygonMode(const gl::PolygonMode polygonMode);
1443 VkCullModeFlagBits GetCullMode(const gl::RasterizerState &rasterState);
1444 VkFrontFace GetFrontFace(GLenum frontFace, bool invertCullFace);
1445 VkSampleCountFlagBits GetSamples(GLint sampleCount, bool limitSampleCountTo2);
1446 VkComponentSwizzle GetSwizzle(const GLenum swizzle);
1447 VkCompareOp GetCompareOp(const GLenum compareFunc);
1448 VkStencilOp GetStencilOp(const GLenum compareOp);
1449 VkLogicOp GetLogicOp(const GLenum logicOp);
1450 
1451 constexpr gl::ShaderMap<VkShaderStageFlagBits> kShaderStageMap = {
1452     {gl::ShaderType::Vertex, VK_SHADER_STAGE_VERTEX_BIT},
1453     {gl::ShaderType::TessControl, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT},
1454     {gl::ShaderType::TessEvaluation, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT},
1455     {gl::ShaderType::Fragment, VK_SHADER_STAGE_FRAGMENT_BIT},
1456     {gl::ShaderType::Geometry, VK_SHADER_STAGE_GEOMETRY_BIT},
1457     {gl::ShaderType::Compute, VK_SHADER_STAGE_COMPUTE_BIT},
1458 };
1459 
1460 void GetOffset(const gl::Offset &glOffset, VkOffset3D *vkOffset);
1461 void GetExtent(const gl::Extents &glExtent, VkExtent3D *vkExtent);
1462 VkImageType GetImageType(gl::TextureType textureType);
1463 VkImageViewType GetImageViewType(gl::TextureType textureType);
1464 VkColorComponentFlags GetColorComponentFlags(bool red, bool green, bool blue, bool alpha);
1465 VkShaderStageFlags GetShaderStageFlags(gl::ShaderBitSet activeShaders);
1466 
1467 void GetViewport(const gl::Rectangle &viewport,
1468                  float nearPlane,
1469                  float farPlane,
1470                  bool invertViewport,
1471                  bool upperLeftOrigin,
1472                  GLint renderAreaHeight,
1473                  VkViewport *viewportOut);
1474 
1475 void GetExtentsAndLayerCount(gl::TextureType textureType,
1476                              const gl::Extents &extents,
1477                              VkExtent3D *extentsOut,
1478                              uint32_t *layerCountOut);
1479 
1480 vk::LevelIndex GetLevelIndex(gl::LevelIndex levelGL, gl::LevelIndex baseLevel);
1481 
1482 VkImageTiling GetTilingMode(gl::TilingMode tilingMode);
1483 
1484 VkImageCompressionFixedRateFlagsEXT ConvertEGLFixedRateToVkFixedRate(
1485     const EGLenum eglCompressionRate,
1486     const angle::FormatID actualFormatID);
1487 }  // namespace gl_vk
1488 
1489 namespace vk_gl
1490 {
1491 // The Vulkan back-end will not support a sample count of 1, because of a Vulkan specification
1492 // restriction:
1493 //
1494 //   If the image was created with VkImageCreateInfo::samples equal to VK_SAMPLE_COUNT_1_BIT, the
1495 //   instruction must: have MS = 0.
1496 //
1497 // This restriction was tracked in http://anglebug.com/42262827 and Khronos-private Vulkan
1498 // specification issue https://gitlab.khronos.org/vulkan/vulkan/issues/1925.
1499 //
1500 // In addition, the Vulkan back-end will not support sample counts of 32 or 64, since there are no
1501 // standard sample locations for those sample counts.
1502 constexpr unsigned int kSupportedSampleCounts = (VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT |
1503                                                  VK_SAMPLE_COUNT_8_BIT | VK_SAMPLE_COUNT_16_BIT);
1504 
1505 // Find set bits in sampleCounts and add the corresponding sample count to the set.
1506 void AddSampleCounts(VkSampleCountFlags sampleCounts, gl::SupportedSampleSet *outSet);
1507 // Return the maximum sample count with a bit set in |sampleCounts|.
1508 GLuint GetMaxSampleCount(VkSampleCountFlags sampleCounts);
1509 // Return a supported sample count that's at least as large as the requested one.
1510 GLuint GetSampleCount(VkSampleCountFlags supportedCounts, GLuint requestedCount);
1511 
1512 gl::LevelIndex GetLevelIndex(vk::LevelIndex levelVk, gl::LevelIndex baseLevel);
1513 
1514 GLenum ConvertVkFixedRateToGLFixedRate(const VkImageCompressionFixedRateFlagsEXT vkCompressionRate);
1515 GLint ConvertCompressionFlagsToGLFixedRates(
1516     VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags,
1517     GLsizei bufSize,
1518     GLint *rates);
1519 
1520 EGLenum ConvertVkFixedRateToEGLFixedRate(
1521     const VkImageCompressionFixedRateFlagsEXT vkCompressionRate);
1522 std::vector<EGLint> ConvertCompressionFlagsToEGLFixedRate(
1523     VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags,
1524     size_t rateSize);
1525 }  // namespace vk_gl
1526 
1527 enum class RenderPassClosureReason
1528 {
1529     // Don't specify the reason (it should already be specified elsewhere)
1530     AlreadySpecifiedElsewhere,
1531 
1532     // Implicit closures due to flush/wait/etc.
1533     ContextDestruction,
1534     ContextChange,
1535     GLFlush,
1536     GLFinish,
1537     EGLSwapBuffers,
1538     EGLWaitClient,
1539     SurfaceUnMakeCurrent,
1540 
1541     // Closure due to switching rendering to another framebuffer.
1542     FramebufferBindingChange,
1543     FramebufferChange,
1544     NewRenderPass,
1545 
1546     // Incompatible use of resource in the same render pass
1547     BufferUseThenXfbWrite,
1548     XfbWriteThenVertexIndexBuffer,
1549     XfbWriteThenIndirectDrawBuffer,
1550     XfbResumeAfterDrawBasedClear,
1551     DepthStencilUseInFeedbackLoop,
1552     DepthStencilWriteAfterFeedbackLoop,
1553     PipelineBindWhileXfbActive,
1554     XfbWriteThenTextureBuffer,
1555 
1556     // Use of resource after render pass
1557     BufferWriteThenMap,
1558     BufferWriteThenOutOfRPRead,
1559     BufferUseThenOutOfRPWrite,
1560     ImageUseThenOutOfRPRead,
1561     ImageUseThenOutOfRPWrite,
1562     XfbWriteThenComputeRead,
1563     XfbWriteThenIndirectDispatchBuffer,
1564     ImageAttachmentThenComputeRead,
1565     GraphicsTextureImageAccessThenComputeAccess,
1566     GetQueryResult,
1567     BeginNonRenderPassQuery,
1568     EndNonRenderPassQuery,
1569     TimestampQuery,
1570     EndRenderPassQuery,
1571     GLReadPixels,
1572 
1573     // Synchronization
1574     BufferUseThenReleaseToExternal,
1575     ImageUseThenReleaseToExternal,
1576     BufferInUseWhenSynchronizedMap,
1577     GLMemoryBarrierThenStorageResource,
1578     StorageResourceUseThenGLMemoryBarrier,
1579     ExternalSemaphoreSignal,
1580     SyncObjectInit,
1581     SyncObjectWithFdInit,
1582     SyncObjectClientWait,
1583     SyncObjectServerWait,
1584     SyncObjectGetStatus,
1585     ForeignImageRelease,
1586 
1587     // Closures that ANGLE could have avoided, but doesn't for simplicity or optimization of more
1588     // common cases.
1589     XfbPause,
1590     FramebufferFetchEmulation,
1591     ColorBufferWithEmulatedAlphaInvalidate,
1592     GenerateMipmapOnCPU,
1593     CopyTextureOnCPU,
1594     TextureReformatToRenderable,
1595     DeviceLocalBufferMap,
1596     OutOfReservedQueueSerialForOutsideCommands,
1597 
1598     // UtilsVk
1599     GenerateMipmapWithDraw,
1600     PrepareForBlit,
1601     PrepareForImageCopy,
1602     TemporaryForClearTexture,
1603     TemporaryForImageClear,
1604     TemporaryForImageCopy,
1605     TemporaryForOverlayDraw,
1606 
1607     // LegacyDithering requires updating the render pass
1608     LegacyDithering,
1609 
1610     // In case of memory budget issues, pending garbage needs to be freed.
1611     ExcessivePendingGarbage,
1612     OutOfMemory,
1613 
1614     InvalidEnum,
1615     EnumCount = InvalidEnum,
1616 };
1617 
1618 // The scope of synchronization for a sync object.  Synchronization is done between the signal
1619 // entity (src) and the entities waiting on the signal (dst)
1620 //
1621 // - For GL fence sync objects, src is the current context and dst is host / the rest of share
1622 // group.
1623 // - For EGL fence sync objects, src is the current context and dst is host / all other contexts.
1624 // - For EGL global fence sync objects (which is an ANGLE extension), src is all contexts who have
1625 //   previously made a submission to the queue used by the current context and dst is host / all
1626 //   other contexts.
1627 enum class SyncFenceScope
1628 {
1629     CurrentContextToShareGroup,
1630     CurrentContextToAllContexts,
1631     AllContextsToAllContexts,
1632 };
1633 
1634 }  // namespace rx
1635 
1636 #define ANGLE_VK_TRY(context, command)                                                   \
1637     do                                                                                   \
1638     {                                                                                    \
1639         auto ANGLE_LOCAL_VAR = command;                                                  \
1640         if (ANGLE_UNLIKELY(ANGLE_LOCAL_VAR != VK_SUCCESS))                               \
1641         {                                                                                \
1642             (context)->handleError(ANGLE_LOCAL_VAR, __FILE__, ANGLE_FUNCTION, __LINE__); \
1643             return angle::Result::Stop;                                                  \
1644         }                                                                                \
1645     } while (0)
1646 
1647 #define ANGLE_VK_CHECK(context, test, error) ANGLE_VK_TRY(context, test ? VK_SUCCESS : error)
1648 
1649 #define ANGLE_VK_CHECK_MATH(context, result) \
1650     ANGLE_VK_CHECK(context, result, VK_ERROR_VALIDATION_FAILED_EXT)
1651 
1652 #define ANGLE_VK_CHECK_ALLOC(context, result) \
1653     ANGLE_VK_CHECK(context, result, VK_ERROR_OUT_OF_HOST_MEMORY)
1654 
1655 #define ANGLE_VK_UNREACHABLE(context) \
1656     UNREACHABLE();                    \
1657     ANGLE_VK_CHECK(context, false, VK_ERROR_FEATURE_NOT_PRESENT)
1658 
1659 // Returns VkResult in the case of an error.
1660 #define VK_RESULT_TRY(command)                             \
1661     do                                                     \
1662     {                                                      \
1663         auto ANGLE_LOCAL_VAR = command;                    \
1664         if (ANGLE_UNLIKELY(ANGLE_LOCAL_VAR != VK_SUCCESS)) \
1665         {                                                  \
1666             return ANGLE_LOCAL_VAR;                        \
1667         }                                                  \
1668     } while (0)
1669 
1670 #define VK_RESULT_CHECK(test, error) VK_RESULT_TRY((test) ? VK_SUCCESS : (error))
1671 
1672 // NVIDIA uses special formatting for the driver version:
1673 // Major: 10
1674 // Minor: 8
1675 // Sub-minor: 8
1676 // patch: 6
1677 #define ANGLE_VK_VERSION_MAJOR_NVIDIA(version) (((uint32_t)(version) >> 22) & 0x3ff)
1678 #define ANGLE_VK_VERSION_MINOR_NVIDIA(version) (((uint32_t)(version) >> 14) & 0xff)
1679 #define ANGLE_VK_VERSION_SUB_MINOR_NVIDIA(version) (((uint32_t)(version) >> 6) & 0xff)
1680 #define ANGLE_VK_VERSION_PATCH_NVIDIA(version) ((uint32_t)(version) & 0x3f)
1681 
1682 // Similarly for Intel on Windows:
1683 // Major: 18
1684 // Minor: 14
1685 #define ANGLE_VK_VERSION_MAJOR_WIN_INTEL(version) (((uint32_t)(version) >> 14) & 0x3ffff)
1686 #define ANGLE_VK_VERSION_MINOR_WIN_INTEL(version) ((uint32_t)(version) & 0x3fff)
1687 
1688 #endif  // LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
1689