• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_utils:
7 //    Helper functions for the Vulkan Renderer.
8 //
9 
10 #ifndef LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
11 #define LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
12 
13 #include <atomic>
14 #include <limits>
15 #include <queue>
16 
17 #include "GLSLANG/ShaderLang.h"
18 #include "common/FixedVector.h"
19 #include "common/Optional.h"
20 #include "common/PackedEnums.h"
21 #include "common/SimpleMutex.h"
22 #include "common/WorkerThread.h"
23 #include "common/backtrace_utils.h"
24 #include "common/debug.h"
25 #include "libANGLE/Error.h"
26 #include "libANGLE/Observer.h"
27 #include "libANGLE/angletypes.h"
28 #include "libANGLE/renderer/serial_utils.h"
29 #include "libANGLE/renderer/vulkan/SecondaryCommandBuffer.h"
30 #include "libANGLE/renderer/vulkan/SecondaryCommandPool.h"
31 #include "libANGLE/renderer/vulkan/VulkanSecondaryCommandBuffer.h"
32 #include "libANGLE/renderer/vulkan/vk_wrapper.h"
33 #include "platform/autogen/FeaturesVk_autogen.h"
34 #include "vulkan/vulkan_fuchsia_ext.h"
35 
36 #define ANGLE_GL_OBJECTS_X(PROC) \
37     PROC(Buffer)                 \
38     PROC(Context)                \
39     PROC(Framebuffer)            \
40     PROC(MemoryObject)           \
41     PROC(Overlay)                \
42     PROC(Program)                \
43     PROC(ProgramExecutable)      \
44     PROC(ProgramPipeline)        \
45     PROC(Query)                  \
46     PROC(Renderbuffer)           \
47     PROC(Sampler)                \
48     PROC(Semaphore)              \
49     PROC(Texture)                \
50     PROC(TransformFeedback)      \
51     PROC(VertexArray)
52 
53 #define ANGLE_PRE_DECLARE_OBJECT(OBJ) class OBJ;
54 
55 namespace egl
56 {
57 class Display;
58 class Image;
59 class ShareGroup;
60 }  // namespace egl
61 
62 namespace gl
63 {
64 class MockOverlay;
65 class ProgramExecutable;
66 struct RasterizerState;
67 struct SwizzleState;
68 struct VertexAttribute;
69 class VertexBinding;
70 
71 ANGLE_GL_OBJECTS_X(ANGLE_PRE_DECLARE_OBJECT)
72 }  // namespace gl
73 
74 #define ANGLE_PRE_DECLARE_VK_OBJECT(OBJ) class OBJ##Vk;
75 
76 namespace rx
77 {
78 class DisplayVk;
79 class ImageVk;
80 class ProgramExecutableVk;
81 class RenderbufferVk;
82 class RenderTargetVk;
83 class RenderPassCache;
84 class ShareGroupVk;
85 }  // namespace rx
86 
87 namespace angle
88 {
89 egl::Error ToEGL(Result result, EGLint errorCode);
90 }  // namespace angle
91 
92 namespace rx
93 {
94 ANGLE_GL_OBJECTS_X(ANGLE_PRE_DECLARE_VK_OBJECT)
95 
96 const char *VulkanResultString(VkResult result);
97 
98 constexpr size_t kMaxVulkanLayers = 20;
99 using VulkanLayerVector           = angle::FixedVector<const char *, kMaxVulkanLayers>;
100 
101 // Verify that validation layers are available.
102 bool GetAvailableValidationLayers(const std::vector<VkLayerProperties> &layerProps,
103                                   bool mustHaveLayers,
104                                   VulkanLayerVector *enabledLayerNames);
105 
106 enum class TextureDimension
107 {
108     TEX_2D,
109     TEX_CUBE,
110     TEX_3D,
111     TEX_2D_ARRAY,
112 };
113 
114 enum class BufferUsageType
115 {
116     Static      = 0,
117     Dynamic     = 1,
118     InvalidEnum = 2,
119     EnumCount   = InvalidEnum,
120 };
121 
122 // A maximum offset of 4096 covers almost every Vulkan driver on desktop (80%) and mobile (99%). The
123 // next highest values to meet native drivers are 16 bits or 32 bits.
124 constexpr uint32_t kAttributeOffsetMaxBits = 15;
125 constexpr uint32_t kInvalidMemoryTypeIndex = UINT32_MAX;
126 constexpr uint32_t kInvalidMemoryHeapIndex = UINT32_MAX;
127 
128 namespace vk
129 {
130 class Renderer;
131 
132 // Used for memory allocation tracking.
133 enum class MemoryAllocationType;
134 
135 // Encapsulate the graphics family index and VkQueue index (as seen in vkGetDeviceQueue API
136 // arguments) into one integer so that we can easily pass around without introduce extra overhead..
137 class DeviceQueueIndex final
138 {
139   public:
DeviceQueueIndex()140     constexpr DeviceQueueIndex()
141         : mFamilyIndex(kInvalidQueueFamilyIndex), mQueueIndex(kInvalidQueueIndex)
142     {}
DeviceQueueIndex(uint32_t familyIndex)143     constexpr DeviceQueueIndex(uint32_t familyIndex)
144         : mFamilyIndex((int8_t)familyIndex), mQueueIndex(kInvalidQueueIndex)
145     {
146         ASSERT(static_cast<uint32_t>(mFamilyIndex) == familyIndex);
147     }
DeviceQueueIndex(uint32_t familyIndex,uint32_t queueIndex)148     DeviceQueueIndex(uint32_t familyIndex, uint32_t queueIndex)
149         : mFamilyIndex((int8_t)familyIndex), mQueueIndex((int8_t)queueIndex)
150     {
151         // Ensure the value we actually don't truncate the useful bits.
152         ASSERT(static_cast<uint32_t>(mFamilyIndex) == familyIndex);
153         ASSERT(static_cast<uint32_t>(mQueueIndex) == queueIndex);
154     }
DeviceQueueIndex(const DeviceQueueIndex & other)155     DeviceQueueIndex(const DeviceQueueIndex &other) { *this = other; }
156 
157     DeviceQueueIndex &operator=(const DeviceQueueIndex &other)
158     {
159         mValue = other.mValue;
160         return *this;
161     }
162 
familyIndex()163     constexpr uint32_t familyIndex() const { return mFamilyIndex; }
queueIndex()164     constexpr uint32_t queueIndex() const { return mQueueIndex; }
165 
166     bool operator==(const DeviceQueueIndex &other) const { return mValue == other.mValue; }
167     bool operator!=(const DeviceQueueIndex &other) const { return mValue != other.mValue; }
168 
169   private:
170     static constexpr int8_t kInvalidQueueFamilyIndex = -1;
171     static constexpr int8_t kInvalidQueueIndex       = -1;
172     // The expectation is that these indices are small numbers that could easily fit into int8_t.
173     // int8_t is used instead of uint8_t because we need to handle VK_QUEUE_FAMILY_FOREIGN_EXT and
174     // VK_QUEUE_FAMILY_EXTERNAL properly which are essentially are negative values.
175     union
176     {
177         struct
178         {
179             int8_t mFamilyIndex;
180             int8_t mQueueIndex;
181         };
182         uint16_t mValue;
183     };
184 };
185 static constexpr DeviceQueueIndex kInvalidDeviceQueueIndex = DeviceQueueIndex();
186 static constexpr DeviceQueueIndex kForeignDeviceQueueIndex =
187     DeviceQueueIndex(VK_QUEUE_FAMILY_FOREIGN_EXT);
188 static constexpr DeviceQueueIndex kExternalDeviceQueueIndex =
189     DeviceQueueIndex(VK_QUEUE_FAMILY_EXTERNAL);
190 static_assert(kForeignDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_FOREIGN_EXT);
191 static_assert(kExternalDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_EXTERNAL);
192 static_assert(kInvalidDeviceQueueIndex.familyIndex() == VK_QUEUE_FAMILY_IGNORED);
193 
194 // A packed attachment index interface with vulkan API
195 class PackedAttachmentIndex final
196 {
197   public:
PackedAttachmentIndex(uint32_t index)198     explicit constexpr PackedAttachmentIndex(uint32_t index) : mAttachmentIndex(index) {}
199     constexpr PackedAttachmentIndex(const PackedAttachmentIndex &other)            = default;
200     constexpr PackedAttachmentIndex &operator=(const PackedAttachmentIndex &other) = default;
201 
get()202     constexpr uint32_t get() const { return mAttachmentIndex; }
203     PackedAttachmentIndex &operator++()
204     {
205         ++mAttachmentIndex;
206         return *this;
207     }
208     constexpr bool operator==(const PackedAttachmentIndex &other) const
209     {
210         return mAttachmentIndex == other.mAttachmentIndex;
211     }
212     constexpr bool operator!=(const PackedAttachmentIndex &other) const
213     {
214         return mAttachmentIndex != other.mAttachmentIndex;
215     }
216     constexpr bool operator<(const PackedAttachmentIndex &other) const
217     {
218         return mAttachmentIndex < other.mAttachmentIndex;
219     }
220 
221   private:
222     uint32_t mAttachmentIndex;
223 };
224 using PackedAttachmentCount                                    = PackedAttachmentIndex;
225 static constexpr PackedAttachmentIndex kAttachmentIndexInvalid = PackedAttachmentIndex(-1);
226 static constexpr PackedAttachmentIndex kAttachmentIndexZero    = PackedAttachmentIndex(0);
227 
228 // Prepend ptr to the pNext chain at chainStart
229 template <typename VulkanStruct1, typename VulkanStruct2>
AddToPNextChain(VulkanStruct1 * chainStart,VulkanStruct2 * ptr)230 void AddToPNextChain(VulkanStruct1 *chainStart, VulkanStruct2 *ptr)
231 {
232     // Catch bugs where this function is called with `&pointer` instead of `pointer`.
233     static_assert(!std::is_pointer<VulkanStruct1>::value);
234     static_assert(!std::is_pointer<VulkanStruct2>::value);
235 
236     ASSERT(ptr->pNext == nullptr);
237 
238     VkBaseOutStructure *localPtr = reinterpret_cast<VkBaseOutStructure *>(chainStart);
239     ptr->pNext                   = localPtr->pNext;
240     localPtr->pNext              = reinterpret_cast<VkBaseOutStructure *>(ptr);
241 }
242 
243 // Append ptr to the end of the chain
244 template <typename VulkanStruct1, typename VulkanStruct2>
AppendToPNextChain(VulkanStruct1 * chainStart,VulkanStruct2 * ptr)245 void AppendToPNextChain(VulkanStruct1 *chainStart, VulkanStruct2 *ptr)
246 {
247     static_assert(!std::is_pointer<VulkanStruct1>::value);
248     static_assert(!std::is_pointer<VulkanStruct2>::value);
249 
250     if (!ptr)
251     {
252         return;
253     }
254 
255     VkBaseOutStructure *endPtr = reinterpret_cast<VkBaseOutStructure *>(chainStart);
256     while (endPtr->pNext)
257     {
258         endPtr = endPtr->pNext;
259     }
260     endPtr->pNext = reinterpret_cast<VkBaseOutStructure *>(ptr);
261 }
262 
263 class QueueSerialIndexAllocator final
264 {
265   public:
QueueSerialIndexAllocator()266     QueueSerialIndexAllocator() : mLargestIndexEverAllocated(kInvalidQueueSerialIndex)
267     {
268         // Start with every index is free
269         mFreeIndexBitSetArray.set();
270         ASSERT(mFreeIndexBitSetArray.all());
271     }
allocate()272     SerialIndex allocate()
273     {
274         std::lock_guard<angle::SimpleMutex> lock(mMutex);
275         if (mFreeIndexBitSetArray.none())
276         {
277             ERR() << "Run out of queue serial index. All " << kMaxQueueSerialIndexCount
278                   << " indices are used.";
279             return kInvalidQueueSerialIndex;
280         }
281         SerialIndex index = static_cast<SerialIndex>(mFreeIndexBitSetArray.first());
282         ASSERT(index < kMaxQueueSerialIndexCount);
283         mFreeIndexBitSetArray.reset(index);
284         mLargestIndexEverAllocated = (~mFreeIndexBitSetArray).last();
285         return index;
286     }
287 
release(SerialIndex index)288     void release(SerialIndex index)
289     {
290         std::lock_guard<angle::SimpleMutex> lock(mMutex);
291         ASSERT(index <= mLargestIndexEverAllocated);
292         ASSERT(!mFreeIndexBitSetArray.test(index));
293         mFreeIndexBitSetArray.set(index);
294         // mLargestIndexEverAllocated is for optimization. Even if we released queueIndex, we may
295         // still have resources still have serial the index. Thus do not decrement
296         // mLargestIndexEverAllocated here. The only downside is that we may get into slightly less
297         // optimal code path in GetBatchCountUpToSerials.
298     }
299 
getLargestIndexEverAllocated()300     size_t getLargestIndexEverAllocated() const
301     {
302         return mLargestIndexEverAllocated.load(std::memory_order_consume);
303     }
304 
305   private:
306     angle::BitSetArray<kMaxQueueSerialIndexCount> mFreeIndexBitSetArray;
307     std::atomic<size_t> mLargestIndexEverAllocated;
308     angle::SimpleMutex mMutex;
309 };
310 
311 class [[nodiscard]] ScopedQueueSerialIndex final : angle::NonCopyable
312 {
313   public:
ScopedQueueSerialIndex()314     ScopedQueueSerialIndex() : mIndex(kInvalidQueueSerialIndex), mIndexAllocator(nullptr) {}
~ScopedQueueSerialIndex()315     ~ScopedQueueSerialIndex()
316     {
317         if (mIndex != kInvalidQueueSerialIndex)
318         {
319             ASSERT(mIndexAllocator != nullptr);
320             mIndexAllocator->release(mIndex);
321         }
322     }
323 
init(SerialIndex index,QueueSerialIndexAllocator * indexAllocator)324     void init(SerialIndex index, QueueSerialIndexAllocator *indexAllocator)
325     {
326         ASSERT(mIndex == kInvalidQueueSerialIndex);
327         ASSERT(index != kInvalidQueueSerialIndex);
328         ASSERT(indexAllocator != nullptr);
329         mIndex          = index;
330         mIndexAllocator = indexAllocator;
331     }
332 
get()333     SerialIndex get() const { return mIndex; }
334 
335   private:
336     SerialIndex mIndex;
337     QueueSerialIndexAllocator *mIndexAllocator;
338 };
339 
340 class RefCountedEventsGarbageRecycler;
341 // Abstracts error handling. Implemented by ContextVk for GL, DisplayVk for EGL, worker threads,
342 // CLContextVk etc.
343 class Context : angle::NonCopyable
344 {
345   public:
346     Context(Renderer *renderer);
347     virtual ~Context();
348 
349     virtual void handleError(VkResult result,
350                              const char *file,
351                              const char *function,
352                              unsigned int line) = 0;
353     VkDevice getDevice() const;
getRenderer()354     Renderer *getRenderer() const { return mRenderer; }
355     const angle::FeaturesVk &getFeatures() const;
356 
getPerfCounters()357     const angle::VulkanPerfCounters &getPerfCounters() const { return mPerfCounters; }
getPerfCounters()358     angle::VulkanPerfCounters &getPerfCounters() { return mPerfCounters; }
getRefCountedEventsGarbageRecycler()359     RefCountedEventsGarbageRecycler *getRefCountedEventsGarbageRecycler()
360     {
361         return mShareGroupRefCountedEventsGarbageRecycler;
362     }
getDeviceQueueIndex()363     const DeviceQueueIndex &getDeviceQueueIndex() const { return mDeviceQueueIndex; }
364 
365   protected:
366     Renderer *const mRenderer;
367     // Stash the ShareGroupVk's RefCountedEventRecycler here ImageHelper to conveniently access
368     RefCountedEventsGarbageRecycler *mShareGroupRefCountedEventsGarbageRecycler;
369     DeviceQueueIndex mDeviceQueueIndex;
370     angle::VulkanPerfCounters mPerfCounters;
371 };
372 
373 // Abstract global operations that are handled differently between EGL and OpenCL.
374 class GlobalOps : angle::NonCopyable
375 {
376   public:
377     virtual ~GlobalOps() = default;
378 
379     virtual void putBlob(const angle::BlobCacheKey &key, const angle::MemoryBuffer &value) = 0;
380     virtual bool getBlob(const angle::BlobCacheKey &key, angle::BlobCacheValue *valueOut)  = 0;
381 
382     virtual std::shared_ptr<angle::WaitableEvent> postMultiThreadWorkerTask(
383         const std::shared_ptr<angle::Closure> &task) = 0;
384 
385     virtual void notifyDeviceLost() = 0;
386 };
387 
388 class RenderPassDesc;
389 
390 #if ANGLE_USE_CUSTOM_VULKAN_OUTSIDE_RENDER_PASS_CMD_BUFFERS
391 using OutsideRenderPassCommandBuffer = priv::SecondaryCommandBuffer;
392 #else
393 using OutsideRenderPassCommandBuffer = VulkanSecondaryCommandBuffer;
394 #endif
395 #if ANGLE_USE_CUSTOM_VULKAN_RENDER_PASS_CMD_BUFFERS
396 using RenderPassCommandBuffer = priv::SecondaryCommandBuffer;
397 #else
398 using RenderPassCommandBuffer = VulkanSecondaryCommandBuffer;
399 #endif
400 
401 struct SecondaryCommandPools
402 {
403     SecondaryCommandPool outsideRenderPassPool;
404     SecondaryCommandPool renderPassPool;
405 };
406 
407 VkImageAspectFlags GetDepthStencilAspectFlags(const angle::Format &format);
408 VkImageAspectFlags GetFormatAspectFlags(const angle::Format &format);
409 
410 template <typename T>
411 struct ImplTypeHelper;
412 
413 // clang-format off
414 #define ANGLE_IMPL_TYPE_HELPER_GL(OBJ) \
415 template<>                             \
416 struct ImplTypeHelper<gl::OBJ>         \
417 {                                      \
418     using ImplType = OBJ##Vk;          \
419 };
420 // clang-format on
421 
422 ANGLE_GL_OBJECTS_X(ANGLE_IMPL_TYPE_HELPER_GL)
423 
424 template <>
425 struct ImplTypeHelper<gl::MockOverlay>
426 {
427     using ImplType = OverlayVk;
428 };
429 
430 template <>
431 struct ImplTypeHelper<egl::Display>
432 {
433     using ImplType = DisplayVk;
434 };
435 
436 template <>
437 struct ImplTypeHelper<egl::Image>
438 {
439     using ImplType = ImageVk;
440 };
441 
442 template <>
443 struct ImplTypeHelper<egl::ShareGroup>
444 {
445     using ImplType = ShareGroupVk;
446 };
447 
448 template <typename T>
449 using GetImplType = typename ImplTypeHelper<T>::ImplType;
450 
451 template <typename T>
452 GetImplType<T> *GetImpl(const T *glObject)
453 {
454     return GetImplAs<GetImplType<T>>(glObject);
455 }
456 
457 template <typename T>
458 GetImplType<T> *SafeGetImpl(const T *glObject)
459 {
460     return SafeGetImplAs<GetImplType<T>>(glObject);
461 }
462 
463 template <>
464 inline OverlayVk *GetImpl(const gl::MockOverlay *glObject)
465 {
466     return nullptr;
467 }
468 
469 // Reference to a deleted object. The object is due to be destroyed at some point in the future.
470 // |mHandleType| determines the type of the object and which destroy function should be called.
471 class GarbageObject
472 {
473   public:
474     GarbageObject();
475     GarbageObject(GarbageObject &&other);
476     GarbageObject &operator=(GarbageObject &&rhs);
477 
478     bool valid() const { return mHandle != VK_NULL_HANDLE; }
479     void destroy(Renderer *renderer);
480 
481     template <typename DerivedT, typename HandleT>
482     static GarbageObject Get(WrappedObject<DerivedT, HandleT> *object)
483     {
484         // Using c-style cast here to avoid conditional compile for MSVC 32-bit
485         //  which fails to compile with reinterpret_cast, requiring static_cast.
486         return GarbageObject(HandleTypeHelper<DerivedT>::kHandleType,
487                              (GarbageHandle)(object->release()));
488     }
489 
490   private:
491     VK_DEFINE_NON_DISPATCHABLE_HANDLE(GarbageHandle)
492     GarbageObject(HandleType handleType, GarbageHandle handle);
493 
494     HandleType mHandleType;
495     GarbageHandle mHandle;
496 };
497 
498 template <typename T>
499 GarbageObject GetGarbage(T *obj)
500 {
501     return GarbageObject::Get(obj);
502 }
503 
504 // A list of garbage objects. Has no object lifetime information.
505 using GarbageObjects = std::vector<GarbageObject>;
506 
507 class MemoryProperties final : angle::NonCopyable
508 {
509   public:
510     MemoryProperties();
511 
512     void init(VkPhysicalDevice physicalDevice);
513     bool hasLazilyAllocatedMemory() const;
514     VkResult findCompatibleMemoryIndex(Context *context,
515                                        const VkMemoryRequirements &memoryRequirements,
516                                        VkMemoryPropertyFlags requestedMemoryPropertyFlags,
517                                        bool isExternalMemory,
518                                        VkMemoryPropertyFlags *memoryPropertyFlagsOut,
519                                        uint32_t *indexOut) const;
520     void destroy();
521 
522     uint32_t getHeapIndexForMemoryType(uint32_t memoryType) const
523     {
524         if (memoryType == kInvalidMemoryTypeIndex)
525         {
526             return kInvalidMemoryHeapIndex;
527         }
528 
529         ASSERT(memoryType < getMemoryTypeCount());
530         return mMemoryProperties.memoryTypes[memoryType].heapIndex;
531     }
532 
533     VkDeviceSize getHeapSizeForMemoryType(uint32_t memoryType) const
534     {
535         uint32_t heapIndex = mMemoryProperties.memoryTypes[memoryType].heapIndex;
536         return mMemoryProperties.memoryHeaps[heapIndex].size;
537     }
538 
539     const VkMemoryType &getMemoryType(uint32_t i) const { return mMemoryProperties.memoryTypes[i]; }
540 
541     uint32_t getMemoryHeapCount() const { return mMemoryProperties.memoryHeapCount; }
542     uint32_t getMemoryTypeCount() const { return mMemoryProperties.memoryTypeCount; }
543 
544   private:
545     VkPhysicalDeviceMemoryProperties mMemoryProperties;
546 };
547 
548 // Similar to StagingImage, for Buffers.
549 class StagingBuffer final : angle::NonCopyable
550 {
551   public:
552     StagingBuffer();
553     void release(ContextVk *contextVk);
554     void collectGarbage(Renderer *renderer, const QueueSerial &queueSerial);
555     void destroy(Renderer *renderer);
556 
557     angle::Result init(Context *context, VkDeviceSize size, StagingUsage usage);
558 
559     Buffer &getBuffer() { return mBuffer; }
560     const Buffer &getBuffer() const { return mBuffer; }
561     size_t getSize() const { return mSize; }
562 
563   private:
564     Buffer mBuffer;
565     Allocation mAllocation;
566     size_t mSize;
567 };
568 
569 angle::Result InitMappableAllocation(Context *context,
570                                      const Allocator &allocator,
571                                      Allocation *allocation,
572                                      VkDeviceSize size,
573                                      int value,
574                                      VkMemoryPropertyFlags memoryPropertyFlags);
575 
576 VkResult AllocateBufferMemory(Context *context,
577                               vk::MemoryAllocationType memoryAllocationType,
578                               VkMemoryPropertyFlags requestedMemoryPropertyFlags,
579                               VkMemoryPropertyFlags *memoryPropertyFlagsOut,
580                               const void *extraAllocationInfo,
581                               Buffer *buffer,
582                               uint32_t *memoryTypeIndexOut,
583                               DeviceMemory *deviceMemoryOut,
584                               VkDeviceSize *sizeOut);
585 
586 VkResult AllocateImageMemory(Context *context,
587                              vk::MemoryAllocationType memoryAllocationType,
588                              VkMemoryPropertyFlags memoryPropertyFlags,
589                              VkMemoryPropertyFlags *memoryPropertyFlagsOut,
590                              const void *extraAllocationInfo,
591                              Image *image,
592                              uint32_t *memoryTypeIndexOut,
593                              DeviceMemory *deviceMemoryOut,
594                              VkDeviceSize *sizeOut);
595 
596 VkResult AllocateImageMemoryWithRequirements(Context *context,
597                                              vk::MemoryAllocationType memoryAllocationType,
598                                              VkMemoryPropertyFlags memoryPropertyFlags,
599                                              const VkMemoryRequirements &memoryRequirements,
600                                              const void *extraAllocationInfo,
601                                              const VkBindImagePlaneMemoryInfoKHR *extraBindInfo,
602                                              Image *image,
603                                              uint32_t *memoryTypeIndexOut,
604                                              DeviceMemory *deviceMemoryOut);
605 
606 VkResult AllocateBufferMemoryWithRequirements(Context *context,
607                                               MemoryAllocationType memoryAllocationType,
608                                               VkMemoryPropertyFlags memoryPropertyFlags,
609                                               const VkMemoryRequirements &memoryRequirements,
610                                               const void *extraAllocationInfo,
611                                               Buffer *buffer,
612                                               VkMemoryPropertyFlags *memoryPropertyFlagsOut,
613                                               uint32_t *memoryTypeIndexOut,
614                                               DeviceMemory *deviceMemoryOut);
615 
616 angle::Result InitShaderModule(Context *context,
617                                ShaderModule *shaderModule,
618                                const uint32_t *shaderCode,
619                                size_t shaderCodeSize);
620 
621 gl::TextureType Get2DTextureType(uint32_t layerCount, GLint samples);
622 
623 enum class RecordingMode
624 {
625     Start,
626     Append,
627 };
628 
629 // Helper class to handle RAII patterns for initialization. Requires that T have a destroy method
630 // that takes a VkDevice and returns void.
631 template <typename T>
632 class [[nodiscard]] DeviceScoped final : angle::NonCopyable
633 {
634   public:
635     DeviceScoped(VkDevice device) : mDevice(device) {}
636     ~DeviceScoped() { mVar.destroy(mDevice); }
637 
638     const T &get() const { return mVar; }
639     T &get() { return mVar; }
640 
641     T &&release() { return std::move(mVar); }
642 
643   private:
644     VkDevice mDevice;
645     T mVar;
646 };
647 
648 template <typename T>
649 class [[nodiscard]] AllocatorScoped final : angle::NonCopyable
650 {
651   public:
652     AllocatorScoped(const Allocator &allocator) : mAllocator(allocator) {}
653     ~AllocatorScoped() { mVar.destroy(mAllocator); }
654 
655     const T &get() const { return mVar; }
656     T &get() { return mVar; }
657 
658     T &&release() { return std::move(mVar); }
659 
660   private:
661     const Allocator &mAllocator;
662     T mVar;
663 };
664 
665 // Similar to DeviceScoped, but releases objects instead of destroying them. Requires that T have a
666 // release method that takes a ContextVk * and returns void.
667 template <typename T>
668 class [[nodiscard]] ContextScoped final : angle::NonCopyable
669 {
670   public:
671     ContextScoped(ContextVk *contextVk) : mContextVk(contextVk) {}
672     ~ContextScoped() { mVar.release(mContextVk); }
673 
674     const T &get() const { return mVar; }
675     T &get() { return mVar; }
676 
677     T &&release() { return std::move(mVar); }
678 
679   private:
680     ContextVk *mContextVk;
681     T mVar;
682 };
683 
684 template <typename T>
685 class [[nodiscard]] RendererScoped final : angle::NonCopyable
686 {
687   public:
688     RendererScoped(Renderer *renderer) : mRenderer(renderer) {}
689     ~RendererScoped() { mVar.release(mRenderer); }
690 
691     const T &get() const { return mVar; }
692     T &get() { return mVar; }
693 
694     T &&release() { return std::move(mVar); }
695 
696   private:
697     Renderer *mRenderer;
698     T mVar;
699 };
700 
701 // This is a very simple RefCount class that has no autoreleasing.
702 template <typename T>
703 class RefCounted : angle::NonCopyable
704 {
705   public:
706     RefCounted() : mRefCount(0) {}
707     explicit RefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
708     ~RefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
709 
710     RefCounted(RefCounted &&copy) : mRefCount(copy.mRefCount), mObject(std::move(copy.mObject))
711     {
712         ASSERT(this != &copy);
713         copy.mRefCount = 0;
714     }
715 
716     RefCounted &operator=(RefCounted &&rhs)
717     {
718         std::swap(mRefCount, rhs.mRefCount);
719         mObject = std::move(rhs.mObject);
720         return *this;
721     }
722 
723     void addRef()
724     {
725         ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
726         mRefCount++;
727     }
728 
729     void releaseRef()
730     {
731         ASSERT(isReferenced());
732         mRefCount--;
733     }
734 
735     uint32_t getAndReleaseRef()
736     {
737         ASSERT(isReferenced());
738         return mRefCount--;
739     }
740 
741     bool isReferenced() const { return mRefCount != 0; }
742 
743     T &get() { return mObject; }
744     const T &get() const { return mObject; }
745 
746     // A debug function to validate that the reference count is as expected used for assertions.
747     bool isRefCountAsExpected(uint32_t expectedRefCount) { return mRefCount == expectedRefCount; }
748 
749   private:
750     uint32_t mRefCount;
751     T mObject;
752 };
753 
754 // Atomic version of RefCounted.  Used in the descriptor set and pipeline layout caches, which are
755 // accessed by link jobs.  No std::move is allowed due to the atomic ref count.
756 template <typename T>
757 class AtomicRefCounted : angle::NonCopyable
758 {
759   public:
760     AtomicRefCounted() : mRefCount(0) {}
761     explicit AtomicRefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
762     ~AtomicRefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
763 
764     void addRef()
765     {
766         ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
767         mRefCount.fetch_add(1, std::memory_order_relaxed);
768     }
769 
770     // Warning: method does not perform any synchronization, therefore can not be used along with
771     // following `!isReferenced()` call to check if object is not longer accessed by other threads.
772     // Use `getAndReleaseRef()` instead, when synchronization is required.
773     void releaseRef()
774     {
775         ASSERT(isReferenced());
776         mRefCount.fetch_sub(1, std::memory_order_relaxed);
777     }
778 
779     // Performs acquire-release memory synchronization. When result is "1", the object is
780     // guaranteed to be no longer in use by other threads, and may be safely destroyed or updated.
781     // Warning: do not mix this method and the unsynchronized `releaseRef()` call.
782     unsigned int getAndReleaseRef()
783     {
784         ASSERT(isReferenced());
785         return mRefCount.fetch_sub(1, std::memory_order_acq_rel);
786     }
787 
788     // Warning: method does not perform any synchronization.  See `releaseRef()` for details.
789     // Method may be only used after external synchronization.
790     bool isReferenced() const { return mRefCount.load(std::memory_order_relaxed) != 0; }
791 
792     T &get() { return mObject; }
793     const T &get() const { return mObject; }
794 
795   private:
796     std::atomic_uint mRefCount;
797     T mObject;
798 };
799 
800 template <typename T, typename RC = RefCounted<T>>
801 class BindingPointer final : angle::NonCopyable
802 {
803   public:
804     BindingPointer() = default;
805     ~BindingPointer() { reset(); }
806 
807     BindingPointer(BindingPointer &&other) : mRefCounted(other.mRefCounted)
808     {
809         other.mRefCounted = nullptr;
810     }
811 
812     void set(RC *refCounted)
813     {
814         if (mRefCounted)
815         {
816             mRefCounted->releaseRef();
817         }
818 
819         mRefCounted = refCounted;
820 
821         if (mRefCounted)
822         {
823             mRefCounted->addRef();
824         }
825     }
826 
827     void reset() { set(nullptr); }
828 
829     T &get() { return mRefCounted->get(); }
830     const T &get() const { return mRefCounted->get(); }
831 
832     bool valid() const { return mRefCounted != nullptr; }
833 
834     RC *getRefCounted() { return mRefCounted; }
835 
836   private:
837     RC *mRefCounted = nullptr;
838 };
839 
840 template <typename T>
841 using AtomicBindingPointer = BindingPointer<T, AtomicRefCounted<T>>;
842 
843 // Helper class to share ref-counted Vulkan objects.  Requires that T have a destroy method
844 // that takes a VkDevice and returns void.
845 template <typename T>
846 class Shared final : angle::NonCopyable
847 {
848   public:
849     Shared() : mRefCounted(nullptr) {}
850     ~Shared() { ASSERT(mRefCounted == nullptr); }
851 
852     Shared(Shared &&other) { *this = std::move(other); }
853     Shared &operator=(Shared &&other)
854     {
855         ASSERT(this != &other);
856         mRefCounted       = other.mRefCounted;
857         other.mRefCounted = nullptr;
858         return *this;
859     }
860 
861     void set(VkDevice device, RefCounted<T> *refCounted)
862     {
863         if (mRefCounted)
864         {
865             mRefCounted->releaseRef();
866             if (!mRefCounted->isReferenced())
867             {
868                 mRefCounted->get().destroy(device);
869                 SafeDelete(mRefCounted);
870             }
871         }
872 
873         mRefCounted = refCounted;
874 
875         if (mRefCounted)
876         {
877             mRefCounted->addRef();
878         }
879     }
880 
881     void setUnreferenced(RefCounted<T> *refCounted)
882     {
883         ASSERT(!mRefCounted);
884         ASSERT(refCounted);
885 
886         mRefCounted = refCounted;
887         mRefCounted->addRef();
888     }
889 
890     void assign(VkDevice device, T &&newObject)
891     {
892         set(device, new RefCounted<T>(std::move(newObject)));
893     }
894 
895     void copy(VkDevice device, const Shared<T> &other) { set(device, other.mRefCounted); }
896 
897     void copyUnreferenced(const Shared<T> &other) { setUnreferenced(other.mRefCounted); }
898 
899     void reset(VkDevice device) { set(device, nullptr); }
900 
901     template <typename RecyclerT>
902     void resetAndRecycle(RecyclerT *recycler)
903     {
904         if (mRefCounted)
905         {
906             mRefCounted->releaseRef();
907             if (!mRefCounted->isReferenced())
908             {
909                 ASSERT(mRefCounted->get().valid());
910                 recycler->recycle(std::move(mRefCounted->get()));
911                 SafeDelete(mRefCounted);
912             }
913 
914             mRefCounted = nullptr;
915         }
916     }
917 
918     template <typename OnRelease>
919     void resetAndRelease(OnRelease *onRelease)
920     {
921         if (mRefCounted)
922         {
923             mRefCounted->releaseRef();
924             if (!mRefCounted->isReferenced())
925             {
926                 ASSERT(mRefCounted->get().valid());
927                 (*onRelease)(std::move(mRefCounted->get()));
928                 SafeDelete(mRefCounted);
929             }
930 
931             mRefCounted = nullptr;
932         }
933     }
934 
935     bool isReferenced() const
936     {
937         // If reference is zero, the object should have been deleted.  I.e. if the object is not
938         // nullptr, it should have a reference.
939         ASSERT(!mRefCounted || mRefCounted->isReferenced());
940         return mRefCounted != nullptr;
941     }
942 
943     T &get()
944     {
945         ASSERT(mRefCounted && mRefCounted->isReferenced());
946         return mRefCounted->get();
947     }
948     const T &get() const
949     {
950         ASSERT(mRefCounted && mRefCounted->isReferenced());
951         return mRefCounted->get();
952     }
953 
954   private:
955     RefCounted<T> *mRefCounted;
956 };
957 
958 template <typename T, typename StorageT = std::deque<T>>
959 class Recycler final : angle::NonCopyable
960 {
961   public:
962     Recycler() = default;
963     Recycler(StorageT &&storage) { mObjectFreeList = std::move(storage); }
964 
965     void recycle(T &&garbageObject)
966     {
967         // Recycling invalid objects is pointless and potentially a bug.
968         ASSERT(garbageObject.valid());
969         mObjectFreeList.emplace_back(std::move(garbageObject));
970     }
971 
972     void recycle(StorageT &&garbageObjects)
973     {
974         // Recycling invalid objects is pointless and potentially a bug.
975         ASSERT(!garbageObjects.empty());
976         mObjectFreeList.insert(mObjectFreeList.end(), garbageObjects.begin(), garbageObjects.end());
977         ASSERT(garbageObjects.empty());
978     }
979 
980     void refill(StorageT &&garbageObjects)
981     {
982         ASSERT(!garbageObjects.empty());
983         ASSERT(mObjectFreeList.empty());
984         mObjectFreeList.swap(garbageObjects);
985     }
986 
987     void fetch(T *outObject)
988     {
989         ASSERT(!empty());
990         *outObject = std::move(mObjectFreeList.back());
991         mObjectFreeList.pop_back();
992     }
993 
994     void destroy(VkDevice device)
995     {
996         while (!mObjectFreeList.empty())
997         {
998             T &object = mObjectFreeList.back();
999             object.destroy(device);
1000             mObjectFreeList.pop_back();
1001         }
1002     }
1003 
1004     bool empty() const { return mObjectFreeList.empty(); }
1005 
1006   private:
1007     StorageT mObjectFreeList;
1008 };
1009 
1010 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
1011 struct SpecializationConstants final
1012 {
1013     VkBool32 surfaceRotation;
1014     uint32_t dither;
1015 };
1016 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
1017 
1018 template <typename T>
1019 using SpecializationConstantMap = angle::PackedEnumMap<sh::vk::SpecializationConstantId, T>;
1020 
1021 using ShaderModulePointer = BindingPointer<ShaderModule>;
1022 using ShaderModuleMap     = gl::ShaderMap<ShaderModulePointer>;
1023 
1024 void MakeDebugUtilsLabel(GLenum source, const char *marker, VkDebugUtilsLabelEXT *label);
1025 
1026 constexpr size_t kUnpackedDepthIndex   = gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1027 constexpr size_t kUnpackedStencilIndex = gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 1;
1028 constexpr uint32_t kUnpackedColorBuffersMask =
1029     angle::BitMask<uint32_t>(gl::IMPLEMENTATION_MAX_DRAW_BUFFERS);
1030 
1031 class ClearValuesArray final
1032 {
1033   public:
1034     ClearValuesArray();
1035     ~ClearValuesArray();
1036 
1037     ClearValuesArray(const ClearValuesArray &other);
1038     ClearValuesArray &operator=(const ClearValuesArray &rhs);
1039 
1040     void store(uint32_t index, VkImageAspectFlags aspectFlags, const VkClearValue &clearValue);
1041     void storeNoDepthStencil(uint32_t index, const VkClearValue &clearValue);
1042 
1043     void reset(size_t index)
1044     {
1045         mValues[index] = {};
1046         mEnabled.reset(index);
1047     }
1048 
1049     bool test(size_t index) const { return mEnabled.test(index); }
1050     bool testDepth() const { return mEnabled.test(kUnpackedDepthIndex); }
1051     bool testStencil() const { return mEnabled.test(kUnpackedStencilIndex); }
1052     gl::DrawBufferMask getColorMask() const;
1053 
1054     const VkClearValue &operator[](size_t index) const { return mValues[index]; }
1055 
1056     float getDepthValue() const { return mValues[kUnpackedDepthIndex].depthStencil.depth; }
1057     uint32_t getStencilValue() const { return mValues[kUnpackedStencilIndex].depthStencil.stencil; }
1058 
1059     const VkClearValue *data() const { return mValues.data(); }
1060     bool empty() const { return mEnabled.none(); }
1061     bool any() const { return mEnabled.any(); }
1062 
1063   private:
1064     gl::AttachmentArray<VkClearValue> mValues;
1065     gl::AttachmentsMask mEnabled;
1066 };
1067 
1068 // Defines Serials for Vulkan objects.
1069 #define ANGLE_VK_SERIAL_OP(X) \
1070     X(Buffer)                 \
1071     X(Image)                  \
1072     X(ImageOrBufferView)      \
1073     X(Sampler)
1074 
1075 #define ANGLE_DEFINE_VK_SERIAL_TYPE(Type)                                     \
1076     class Type##Serial                                                        \
1077     {                                                                         \
1078       public:                                                                 \
1079         constexpr Type##Serial() : mSerial(kInvalid) {}                       \
1080         constexpr explicit Type##Serial(uint32_t serial) : mSerial(serial) {} \
1081                                                                               \
1082         constexpr bool operator==(const Type##Serial &other) const            \
1083         {                                                                     \
1084             ASSERT(mSerial != kInvalid || other.mSerial != kInvalid);         \
1085             return mSerial == other.mSerial;                                  \
1086         }                                                                     \
1087         constexpr bool operator!=(const Type##Serial &other) const            \
1088         {                                                                     \
1089             ASSERT(mSerial != kInvalid || other.mSerial != kInvalid);         \
1090             return mSerial != other.mSerial;                                  \
1091         }                                                                     \
1092         constexpr uint32_t getValue() const                                   \
1093         {                                                                     \
1094             return mSerial;                                                   \
1095         }                                                                     \
1096         constexpr bool valid() const                                          \
1097         {                                                                     \
1098             return mSerial != kInvalid;                                       \
1099         }                                                                     \
1100                                                                               \
1101       private:                                                                \
1102         uint32_t mSerial;                                                     \
1103         static constexpr uint32_t kInvalid = 0;                               \
1104     };                                                                        \
1105     static constexpr Type##Serial kInvalid##Type##Serial = Type##Serial();
1106 
1107 ANGLE_VK_SERIAL_OP(ANGLE_DEFINE_VK_SERIAL_TYPE)
1108 
1109 #define ANGLE_DECLARE_GEN_VK_SERIAL(Type) Type##Serial generate##Type##Serial();
1110 
1111 class ResourceSerialFactory final : angle::NonCopyable
1112 {
1113   public:
1114     ResourceSerialFactory();
1115     ~ResourceSerialFactory();
1116 
1117     ANGLE_VK_SERIAL_OP(ANGLE_DECLARE_GEN_VK_SERIAL)
1118 
1119   private:
1120     uint32_t issueSerial();
1121 
1122     // Kept atomic so it can be accessed from multiple Context threads at once.
1123     std::atomic<uint32_t> mCurrentUniqueSerial;
1124 };
1125 
1126 #if defined(ANGLE_ENABLE_PERF_COUNTER_OUTPUT)
1127 constexpr bool kOutputCumulativePerfCounters = ANGLE_ENABLE_PERF_COUNTER_OUTPUT;
1128 #else
1129 constexpr bool kOutputCumulativePerfCounters = false;
1130 #endif
1131 
1132 // Performance and resource counters.
1133 struct RenderPassPerfCounters
1134 {
1135     // load/storeOps. Includes ops for resolve attachment. Maximum value = 2.
1136     uint8_t colorLoadOpClears;
1137     uint8_t colorLoadOpLoads;
1138     uint8_t colorLoadOpNones;
1139     uint8_t colorStoreOpStores;
1140     uint8_t colorStoreOpNones;
1141     uint8_t depthLoadOpClears;
1142     uint8_t depthLoadOpLoads;
1143     uint8_t depthLoadOpNones;
1144     uint8_t depthStoreOpStores;
1145     uint8_t depthStoreOpNones;
1146     uint8_t stencilLoadOpClears;
1147     uint8_t stencilLoadOpLoads;
1148     uint8_t stencilLoadOpNones;
1149     uint8_t stencilStoreOpStores;
1150     uint8_t stencilStoreOpNones;
1151     // Number of unresolve and resolve operations.  Maximum value for color =
1152     // gl::IMPLEMENTATION_MAX_DRAW_BUFFERS and for depth/stencil = 1 each.
1153     uint8_t colorAttachmentUnresolves;
1154     uint8_t colorAttachmentResolves;
1155     uint8_t depthAttachmentUnresolves;
1156     uint8_t depthAttachmentResolves;
1157     uint8_t stencilAttachmentUnresolves;
1158     uint8_t stencilAttachmentResolves;
1159     // Whether the depth/stencil attachment is using a read-only layout.
1160     uint8_t readOnlyDepthStencil;
1161 };
1162 
1163 // A Vulkan image level index.
1164 using LevelIndex = gl::LevelIndexWrapper<uint32_t>;
1165 
1166 // Ensure viewport is within Vulkan requirements
1167 void ClampViewport(VkViewport *viewport);
1168 
1169 constexpr bool IsDynamicDescriptor(VkDescriptorType descriptorType)
1170 {
1171     switch (descriptorType)
1172     {
1173         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1174         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1175             return true;
1176         default:
1177             return false;
1178     }
1179 }
1180 
1181 void ApplyPipelineCreationFeedback(Context *context, const VkPipelineCreationFeedback &feedback);
1182 
1183 angle::Result SetDebugUtilsObjectName(ContextVk *contextVk,
1184                                       VkObjectType objectType,
1185                                       uint64_t handle,
1186                                       const std::string &label);
1187 
1188 }  // namespace vk
1189 
1190 #if !defined(ANGLE_SHARED_LIBVULKAN)
1191 // Lazily load entry points for each extension as necessary.
1192 void InitDebugUtilsEXTFunctions(VkInstance instance);
1193 void InitTransformFeedbackEXTFunctions(VkDevice device);
1194 void InitRenderPass2KHRFunctions(VkDevice device);
1195 
1196 #    if defined(ANGLE_PLATFORM_FUCHSIA)
1197 // VK_FUCHSIA_imagepipe_surface
1198 void InitImagePipeSurfaceFUCHSIAFunctions(VkInstance instance);
1199 #    endif
1200 
1201 #    if defined(ANGLE_PLATFORM_ANDROID)
1202 // VK_ANDROID_external_memory_android_hardware_buffer
1203 void InitExternalMemoryHardwareBufferANDROIDFunctions(VkInstance instance);
1204 #    endif
1205 
1206 #    if defined(ANGLE_PLATFORM_GGP)
1207 // VK_GGP_stream_descriptor_surface
1208 void InitGGPStreamDescriptorSurfaceFunctions(VkInstance instance);
1209 #    endif  // defined(ANGLE_PLATFORM_GGP)
1210 
1211 // VK_KHR_external_semaphore_fd
1212 void InitExternalSemaphoreFdFunctions(VkInstance instance);
1213 
1214 // VK_EXT_host_query_reset
1215 void InitHostQueryResetFunctions(VkDevice instance);
1216 
1217 // VK_KHR_external_fence_fd
1218 void InitExternalFenceFdFunctions(VkInstance instance);
1219 
1220 // VK_KHR_shared_presentable_image
1221 void InitGetSwapchainStatusKHRFunctions(VkDevice device);
1222 
1223 // VK_EXT_extended_dynamic_state
1224 void InitExtendedDynamicStateEXTFunctions(VkDevice device);
1225 
1226 // VK_EXT_extended_dynamic_state2
1227 void InitExtendedDynamicState2EXTFunctions(VkDevice device);
1228 
1229 // VK_EXT_vertex_input_dynamic_state
1230 void InitVertexInputDynamicStateEXTFunctions(VkDevice device);
1231 
1232 // VK_KHR_dynamic_rendering
1233 void InitDynamicRenderingFunctions(VkDevice device);
1234 
1235 // VK_KHR_dynamic_rendering_local_read
1236 void InitDynamicRenderingLocalReadFunctions(VkDevice device);
1237 
1238 // VK_KHR_fragment_shading_rate
1239 void InitFragmentShadingRateKHRInstanceFunction(VkInstance instance);
1240 void InitFragmentShadingRateKHRDeviceFunction(VkDevice device);
1241 
1242 // VK_GOOGLE_display_timing
1243 void InitGetPastPresentationTimingGoogleFunction(VkDevice device);
1244 
1245 // VK_EXT_host_image_copy
1246 void InitHostImageCopyFunctions(VkDevice device);
1247 
1248 #endif  // !defined(ANGLE_SHARED_LIBVULKAN)
1249 
1250 // Promoted to Vulkan 1.1
1251 void InitGetPhysicalDeviceProperties2KHRFunctionsFromCore();
1252 void InitExternalFenceCapabilitiesFunctionsFromCore();
1253 void InitExternalSemaphoreCapabilitiesFunctionsFromCore();
1254 void InitSamplerYcbcrKHRFunctionsFromCore();
1255 void InitGetMemoryRequirements2KHRFunctionsFromCore();
1256 void InitBindMemory2KHRFunctionsFromCore();
1257 
1258 GLenum CalculateGenerateMipmapFilter(ContextVk *contextVk, angle::FormatID formatID);
1259 size_t PackSampleCount(GLint sampleCount);
1260 
1261 namespace gl_vk
1262 {
1263 VkRect2D GetRect(const gl::Rectangle &source);
1264 VkFilter GetFilter(const GLenum filter);
1265 VkSamplerMipmapMode GetSamplerMipmapMode(const GLenum filter);
1266 VkSamplerAddressMode GetSamplerAddressMode(const GLenum wrap);
1267 VkPrimitiveTopology GetPrimitiveTopology(gl::PrimitiveMode mode);
1268 VkPolygonMode GetPolygonMode(const gl::PolygonMode polygonMode);
1269 VkCullModeFlagBits GetCullMode(const gl::RasterizerState &rasterState);
1270 VkFrontFace GetFrontFace(GLenum frontFace, bool invertCullFace);
1271 VkSampleCountFlagBits GetSamples(GLint sampleCount, bool limitSampleCountTo2);
1272 VkComponentSwizzle GetSwizzle(const GLenum swizzle);
1273 VkCompareOp GetCompareOp(const GLenum compareFunc);
1274 VkStencilOp GetStencilOp(const GLenum compareOp);
1275 VkLogicOp GetLogicOp(const GLenum logicOp);
1276 
1277 constexpr gl::ShaderMap<VkShaderStageFlagBits> kShaderStageMap = {
1278     {gl::ShaderType::Vertex, VK_SHADER_STAGE_VERTEX_BIT},
1279     {gl::ShaderType::TessControl, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT},
1280     {gl::ShaderType::TessEvaluation, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT},
1281     {gl::ShaderType::Fragment, VK_SHADER_STAGE_FRAGMENT_BIT},
1282     {gl::ShaderType::Geometry, VK_SHADER_STAGE_GEOMETRY_BIT},
1283     {gl::ShaderType::Compute, VK_SHADER_STAGE_COMPUTE_BIT},
1284 };
1285 
1286 void GetOffset(const gl::Offset &glOffset, VkOffset3D *vkOffset);
1287 void GetExtent(const gl::Extents &glExtent, VkExtent3D *vkExtent);
1288 VkImageType GetImageType(gl::TextureType textureType);
1289 VkImageViewType GetImageViewType(gl::TextureType textureType);
1290 VkColorComponentFlags GetColorComponentFlags(bool red, bool green, bool blue, bool alpha);
1291 VkShaderStageFlags GetShaderStageFlags(gl::ShaderBitSet activeShaders);
1292 
1293 void GetViewport(const gl::Rectangle &viewport,
1294                  float nearPlane,
1295                  float farPlane,
1296                  bool invertViewport,
1297                  bool upperLeftOrigin,
1298                  GLint renderAreaHeight,
1299                  VkViewport *viewportOut);
1300 
1301 void GetExtentsAndLayerCount(gl::TextureType textureType,
1302                              const gl::Extents &extents,
1303                              VkExtent3D *extentsOut,
1304                              uint32_t *layerCountOut);
1305 
1306 vk::LevelIndex GetLevelIndex(gl::LevelIndex levelGL, gl::LevelIndex baseLevel);
1307 
1308 VkImageTiling GetTilingMode(gl::TilingMode tilingMode);
1309 
1310 }  // namespace gl_vk
1311 
1312 namespace vk_gl
1313 {
1314 // The Vulkan back-end will not support a sample count of 1, because of a Vulkan specification
1315 // restriction:
1316 //
1317 //   If the image was created with VkImageCreateInfo::samples equal to VK_SAMPLE_COUNT_1_BIT, the
1318 //   instruction must: have MS = 0.
1319 //
1320 // This restriction was tracked in http://anglebug.com/4196 and Khronos-private Vulkan
1321 // specification issue https://gitlab.khronos.org/vulkan/vulkan/issues/1925.
1322 //
1323 // In addition, the Vulkan back-end will not support sample counts of 32 or 64, since there are no
1324 // standard sample locations for those sample counts.
1325 constexpr unsigned int kSupportedSampleCounts = (VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT |
1326                                                  VK_SAMPLE_COUNT_8_BIT | VK_SAMPLE_COUNT_16_BIT);
1327 
1328 // Find set bits in sampleCounts and add the corresponding sample count to the set.
1329 void AddSampleCounts(VkSampleCountFlags sampleCounts, gl::SupportedSampleSet *outSet);
1330 // Return the maximum sample count with a bit set in |sampleCounts|.
1331 GLuint GetMaxSampleCount(VkSampleCountFlags sampleCounts);
1332 // Return a supported sample count that's at least as large as the requested one.
1333 GLuint GetSampleCount(VkSampleCountFlags supportedCounts, GLuint requestedCount);
1334 
1335 gl::LevelIndex GetLevelIndex(vk::LevelIndex levelVk, gl::LevelIndex baseLevel);
1336 }  // namespace vk_gl
1337 
1338 enum class RenderPassClosureReason
1339 {
1340     // Don't specify the reason (it should already be specified elsewhere)
1341     AlreadySpecifiedElsewhere,
1342 
1343     // Implicit closures due to flush/wait/etc.
1344     ContextDestruction,
1345     ContextChange,
1346     GLFlush,
1347     GLFinish,
1348     EGLSwapBuffers,
1349     EGLWaitClient,
1350     SurfaceUnMakeCurrent,
1351 
1352     // Closure due to switching rendering to another framebuffer.
1353     FramebufferBindingChange,
1354     FramebufferChange,
1355     NewRenderPass,
1356 
1357     // Incompatible use of resource in the same render pass
1358     BufferUseThenXfbWrite,
1359     XfbWriteThenVertexIndexBuffer,
1360     XfbWriteThenIndirectDrawBuffer,
1361     XfbResumeAfterDrawBasedClear,
1362     DepthStencilUseInFeedbackLoop,
1363     DepthStencilWriteAfterFeedbackLoop,
1364     PipelineBindWhileXfbActive,
1365 
1366     // Use of resource after render pass
1367     BufferWriteThenMap,
1368     BufferWriteThenOutOfRPRead,
1369     BufferUseThenOutOfRPWrite,
1370     ImageUseThenOutOfRPRead,
1371     ImageUseThenOutOfRPWrite,
1372     XfbWriteThenComputeRead,
1373     XfbWriteThenIndirectDispatchBuffer,
1374     ImageAttachmentThenComputeRead,
1375     GraphicsTextureImageAccessThenComputeAccess,
1376     GetQueryResult,
1377     BeginNonRenderPassQuery,
1378     EndNonRenderPassQuery,
1379     TimestampQuery,
1380     EndRenderPassQuery,
1381     GLReadPixels,
1382 
1383     // Synchronization
1384     BufferUseThenReleaseToExternal,
1385     ImageUseThenReleaseToExternal,
1386     BufferInUseWhenSynchronizedMap,
1387     GLMemoryBarrierThenStorageResource,
1388     StorageResourceUseThenGLMemoryBarrier,
1389     ExternalSemaphoreSignal,
1390     SyncObjectInit,
1391     SyncObjectWithFdInit,
1392     SyncObjectClientWait,
1393     SyncObjectServerWait,
1394     SyncObjectGetStatus,
1395 
1396     // Closures that ANGLE could have avoided, but doesn't for simplicity or optimization of more
1397     // common cases.
1398     XfbPause,
1399     FramebufferFetchEmulation,
1400     ColorBufferInvalidate,
1401     GenerateMipmapOnCPU,
1402     CopyTextureOnCPU,
1403     TextureReformatToRenderable,
1404     DeviceLocalBufferMap,
1405     OutOfReservedQueueSerialForOutsideCommands,
1406 
1407     // UtilsVk
1408     PrepareForBlit,
1409     PrepareForImageCopy,
1410     TemporaryForImageClear,
1411     TemporaryForImageCopy,
1412     TemporaryForOverlayDraw,
1413 
1414     // LegacyDithering requires updating the render pass
1415     LegacyDithering,
1416 
1417     // In case of memory budget issues, pending garbage needs to be freed.
1418     ExcessivePendingGarbage,
1419     OutOfMemory,
1420 
1421     InvalidEnum,
1422     EnumCount = InvalidEnum,
1423 };
1424 
1425 // The scope of synchronization for a sync object.  Synchronization is done between the signal
1426 // entity (src) and the entities waiting on the signal (dst)
1427 //
1428 // - For GL fence sync objects, src is the current context and dst is host / the rest of share
1429 // group.
1430 // - For EGL fence sync objects, src is the current context and dst is host / all other contexts.
1431 // - For EGL global fence sync objects (which is an ANGLE extension), src is all contexts who have
1432 //   previously made a submission to the queue used by the current context and dst is host / all
1433 //   other contexts.
1434 enum class SyncFenceScope
1435 {
1436     CurrentContextToShareGroup,
1437     CurrentContextToAllContexts,
1438     AllContextsToAllContexts,
1439 };
1440 
1441 }  // namespace rx
1442 
1443 #define ANGLE_VK_TRY(context, command)                                                   \
1444     do                                                                                   \
1445     {                                                                                    \
1446         auto ANGLE_LOCAL_VAR = command;                                                  \
1447         if (ANGLE_UNLIKELY(ANGLE_LOCAL_VAR != VK_SUCCESS))                               \
1448         {                                                                                \
1449             (context)->handleError(ANGLE_LOCAL_VAR, __FILE__, ANGLE_FUNCTION, __LINE__); \
1450             return angle::Result::Stop;                                                  \
1451         }                                                                                \
1452     } while (0)
1453 
1454 #define ANGLE_VK_CHECK(context, test, error) ANGLE_VK_TRY(context, test ? VK_SUCCESS : error)
1455 
1456 #define ANGLE_VK_CHECK_MATH(context, result) \
1457     ANGLE_VK_CHECK(context, result, VK_ERROR_VALIDATION_FAILED_EXT)
1458 
1459 #define ANGLE_VK_CHECK_ALLOC(context, result) \
1460     ANGLE_VK_CHECK(context, result, VK_ERROR_OUT_OF_HOST_MEMORY)
1461 
1462 #define ANGLE_VK_UNREACHABLE(context) \
1463     UNREACHABLE();                    \
1464     ANGLE_VK_CHECK(context, false, VK_ERROR_FEATURE_NOT_PRESENT)
1465 
1466 // Returns VkResult in the case of an error.
1467 #define VK_RESULT_TRY(command)                             \
1468     do                                                     \
1469     {                                                      \
1470         auto ANGLE_LOCAL_VAR = command;                    \
1471         if (ANGLE_UNLIKELY(ANGLE_LOCAL_VAR != VK_SUCCESS)) \
1472         {                                                  \
1473             return ANGLE_LOCAL_VAR;                        \
1474         }                                                  \
1475     } while (0)
1476 
1477 #define VK_RESULT_CHECK(test, error) VK_RESULT_TRY((test) ? VK_SUCCESS : (error))
1478 
1479 // NVIDIA uses special formatting for the driver version:
1480 // Major: 10
1481 // Minor: 8
1482 // Sub-minor: 8
1483 // patch: 6
1484 #define ANGLE_VK_VERSION_MAJOR_NVIDIA(version) (((uint32_t)(version) >> 22) & 0x3ff)
1485 #define ANGLE_VK_VERSION_MINOR_NVIDIA(version) (((uint32_t)(version) >> 14) & 0xff)
1486 #define ANGLE_VK_VERSION_SUB_MINOR_NVIDIA(version) (((uint32_t)(version) >> 6) & 0xff)
1487 #define ANGLE_VK_VERSION_PATCH_NVIDIA(version) ((uint32_t)(version) & 0x3f)
1488 
1489 // Similarly for Intel on Windows:
1490 // Major: 18
1491 // Minor: 14
1492 #define ANGLE_VK_VERSION_MAJOR_WIN_INTEL(version) (((uint32_t)(version) >> 14) & 0x3ffff)
1493 #define ANGLE_VK_VERSION_MINOR_WIN_INTEL(version) ((uint32_t)(version) & 0x3fff)
1494 
1495 #endif  // LIBANGLE_RENDERER_VULKAN_VK_UTILS_H_
1496