1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_helpers:
7 // Helper utility classes that manage Vulkan resources.
8
9 #ifndef LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
10 #define LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
11
12 #include "common/MemoryBuffer.h"
13 #include "common/SimpleMutex.h"
14 #include "libANGLE/renderer/vulkan/MemoryTracking.h"
15 #include "libANGLE/renderer/vulkan/Suballocation.h"
16 #include "libANGLE/renderer/vulkan/vk_cache_utils.h"
17 #include "libANGLE/renderer/vulkan/vk_format_utils.h"
18 #include "libANGLE/renderer/vulkan/vk_ref_counted_event.h"
19
20 #include <functional>
21
22 namespace gl
23 {
24 class ImageIndex;
25 } // namespace gl
26
27 namespace rx
28 {
29 namespace vk
30 {
31 constexpr VkBufferUsageFlags kVertexBufferUsageFlags =
32 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
33 constexpr VkBufferUsageFlags kIndexBufferUsageFlags =
34 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
35 constexpr VkBufferUsageFlags kIndirectBufferUsageFlags =
36 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
37 constexpr size_t kVertexBufferAlignment = 4;
38 constexpr size_t kIndexBufferAlignment = 4;
39 constexpr size_t kIndirectBufferAlignment = 4;
40
41 constexpr VkBufferUsageFlags kStagingBufferFlags =
42 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
43 constexpr size_t kStagingBufferSize = 1024 * 16;
44
45 constexpr VkImageCreateFlags kVkImageCreateFlagsNone = 0;
46
47 // Most likely initial chroma filter mode given GL_TEXTURE_EXTERNAL_OES default
48 // min & mag filters are linear.
49 constexpr VkFilter kDefaultYCbCrChromaFilter = VK_FILTER_LINEAR;
50
51 constexpr VkPipelineStageFlags kSwapchainAcquireImageWaitStageFlags =
52 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | // First use is a blit command.
53 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | // First use is a draw command.
54 VK_PIPELINE_STAGE_TRANSFER_BIT; // First use is a clear without scissor.
55
56 // For each level, write layers that don't conflict in parallel. The layer is hashed to
57 // `layer % kMaxParallelLayerWrites` and used to track whether that subresource is currently
58 // being written. If so, a barrier is inserted; otherwise, the barrier is avoided. If the updated
59 // layer count is greater than kMaxParallelLayerWrites, there will be a few unnecessary
60 // barriers.
61 constexpr uint32_t kMaxParallelLayerWrites = 64;
62 using ImageLayerWriteMask = std::bitset<kMaxParallelLayerWrites>;
63
64 using StagingBufferOffsetArray = std::array<VkDeviceSize, 2>;
65
66 // Imagine an image going through a few layout transitions:
67 //
68 // srcStage 1 dstStage 2 srcStage 2 dstStage 3
69 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
70 // srcAccess 1 dstAccess 2 srcAccess 2 dstAccess 3
71 // \_________________ ___________________/
72 // \/
73 // A transition
74 //
75 // Every transition requires 6 pieces of information: from/to layouts, src/dst stage masks and
76 // src/dst access masks. At the moment we decide to transition the image to Layout 2 (i.e.
77 // Transition 1), we need to have Layout 1, srcStage 1 and srcAccess 1 stored as history of the
78 // image. To perform the transition, we need to know Layout 2, dstStage 2 and dstAccess 2.
79 // Additionally, we need to know srcStage 2 and srcAccess 2 to retain them for the next transition.
80 //
81 // That is, with the history kept, on every new transition we need 5 pieces of new information:
82 // layout/dstStage/dstAccess to transition into the layout, and srcStage/srcAccess for the future
83 // transition out from it. Given the small number of possible combinations of these values, an
84 // enum is used were each value encapsulates these 5 pieces of information:
85 //
86 // +--------------------------------+
87 // srcStage 1 | dstStage 2 srcStage 2 | dstStage 3
88 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
89 // srcAccess 1 |dstAccess 2 srcAccess 2| dstAccess 3
90 // +--------------- ---------------+
91 // \/
92 // One enum value
93 //
94 // Note that, while generally dstStage for the to-transition and srcStage for the from-transition
95 // are the same, they may occasionally be BOTTOM_OF_PIPE and TOP_OF_PIPE respectively.
96 enum class ImageLayout
97 {
98 Undefined = 0,
99 // Framebuffer attachment layouts are placed first, so they can fit in fewer bits in
100 // PackedAttachmentOpsDesc.
101
102 // Color (Write):
103 ColorWrite,
104 // Used only with dynamic rendering, because it needs a different VkImageLayout
105 ColorWriteAndInput,
106 MSRTTEmulationColorUnresolveAndResolve,
107
108 // Depth (Write), Stencil (Write)
109 DepthWriteStencilWrite,
110 // Used only with dynamic rendering, because it needs a different VkImageLayout. For
111 // simplicity, depth/stencil attachments when used as input attachments don't attempt to
112 // distinguish read-only aspects. That's only useful for supporting feedback loops, but if an
113 // application is reading depth or stencil through an input attachment, it's safe to assume they
114 // wouldn't be accessing the other aspect through a sampler!
115 DepthStencilWriteAndInput,
116
117 // Depth (Write), Stencil (Read)
118 DepthWriteStencilRead,
119 DepthWriteStencilReadFragmentShaderStencilRead,
120 DepthWriteStencilReadAllShadersStencilRead,
121
122 // Depth (Read), Stencil (Write)
123 DepthReadStencilWrite,
124 DepthReadStencilWriteFragmentShaderDepthRead,
125 DepthReadStencilWriteAllShadersDepthRead,
126
127 // Depth (Read), Stencil (Read)
128 DepthReadStencilRead,
129 DepthReadStencilReadFragmentShaderRead,
130 DepthReadStencilReadAllShadersRead,
131
132 // The GENERAL layout is used when there's a feedback loop. For depth/stencil it doesn't matter
133 // which aspect is participating in feedback and whether the other aspect is read-only.
134 ColorWriteFragmentShaderFeedback,
135 ColorWriteAllShadersFeedback,
136 DepthStencilFragmentShaderFeedback,
137 DepthStencilAllShadersFeedback,
138
139 // Depth/stencil resolve is special because it uses the _color_ output stage and mask
140 DepthStencilResolve,
141 MSRTTEmulationDepthStencilUnresolveAndResolve,
142
143 Present,
144 SharedPresent,
145 // The rest of the layouts.
146 ExternalPreInitialized,
147 ExternalShadersReadOnly,
148 ExternalShadersWrite,
149 ForeignAccess,
150 TransferSrc,
151 TransferDst,
152 TransferSrcDst,
153 // Used when the image is transitioned on the host for use by host image copy
154 HostCopy,
155 VertexShaderReadOnly,
156 VertexShaderWrite,
157 // PreFragment == Vertex, Tessellation and Geometry stages
158 PreFragmentShadersReadOnly,
159 PreFragmentShadersWrite,
160 FragmentShadingRateAttachmentReadOnly,
161 FragmentShaderReadOnly,
162 FragmentShaderWrite,
163 ComputeShaderReadOnly,
164 ComputeShaderWrite,
165 AllGraphicsShadersReadOnly,
166 AllGraphicsShadersWrite,
167 TransferDstAndComputeWrite,
168
169 InvalidEnum,
170 EnumCount = InvalidEnum,
171 };
172
173 VkImageCreateFlags GetImageCreateFlags(gl::TextureType textureType);
174
175 ImageLayout GetImageLayoutFromGLImageLayout(ErrorContext *context, GLenum layout);
176
177 GLenum ConvertImageLayoutToGLImageLayout(ImageLayout imageLayout);
178
179 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout);
180
181 class ImageHelper;
182
183 // Abstracts contexts where command recording is done in response to API calls, and includes
184 // data structures that are Vulkan-related, need to be accessed by the internals of |namespace vk|
185 // object, but are otherwise managed by these API objects.
186 class Context : public ErrorContext
187 {
188 public:
189 Context(Renderer *renderer);
190 virtual ~Context() override;
191
getRefCountedEventsGarbageRecycler()192 RefCountedEventsGarbageRecycler *getRefCountedEventsGarbageRecycler()
193 {
194 return mShareGroupRefCountedEventsGarbageRecycler;
195 }
196
197 void onForeignImageUse(ImageHelper *image);
198 void finalizeForeignImage(ImageHelper *image);
199 void finalizeAllForeignImages();
200
201 protected:
hasForeignImagesToTransition()202 bool hasForeignImagesToTransition() const
203 {
204 return !mForeignImagesInUse.empty() || !mImagesToTransitionToForeign.empty();
205 }
206
207 // Stash the ShareGroupVk's RefCountedEventRecycler here ImageHelper to conveniently access
208 RefCountedEventsGarbageRecycler *mShareGroupRefCountedEventsGarbageRecycler;
209 // List of foreign images that are currently used in recorded commands but haven't been
210 // submitted. The use of these images has not yet finalized.
211 angle::HashSet<ImageHelper *> mForeignImagesInUse;
212 // List of image barriers for foreign images to transition them back to the FOREIGN queue on
213 // submission. Once the use of an ImageHelper is finalized, e.g. because it is being deleted,
214 // or the commands are about to be submitted, a queue family ownership transfer is generated for
215 // it (thus far residing in |mForeignImagesInUse|) and added to |mImagesToTransitionToForeign|,
216 // it's marked as belonging to the foreign queue, and removed from |mForeignImagesInUse|.
217 std::vector<VkImageMemoryBarrier> mImagesToTransitionToForeign;
218 };
219
220 // A dynamic buffer is conceptually an infinitely long buffer. Each time you write to the buffer,
221 // you will always write to a previously unused portion. After a series of writes, you must flush
222 // the buffer data to the device. Buffer lifetime currently assumes that each new allocation will
223 // last as long or longer than each prior allocation.
224 //
225 // Dynamic buffers are used to implement a variety of data streaming operations in Vulkan, such
226 // as for immediate vertex array and element array data, uniform updates, and other dynamic data.
227 //
228 // Internally dynamic buffers keep a collection of VkBuffers. When we write past the end of a
229 // currently active VkBuffer we keep it until it is no longer in use. We then mark it available
230 // for future allocations in a free list.
231 class BufferHelper;
232 using BufferHelperQueue = std::deque<std::unique_ptr<BufferHelper>>;
233
234 class DynamicBuffer : angle::NonCopyable
235 {
236 public:
237 DynamicBuffer();
238 DynamicBuffer(DynamicBuffer &&other);
239 ~DynamicBuffer();
240
241 void init(Renderer *renderer,
242 VkBufferUsageFlags usage,
243 size_t alignment,
244 size_t initialSize,
245 bool hostVisible);
246
247 // This call will allocate a new region at the end of the current buffer. If it can't find
248 // enough space in the current buffer, it returns false. This gives caller a chance to deal with
249 // buffer switch that may occur with allocate call.
250 bool allocateFromCurrentBuffer(size_t sizeInBytes, BufferHelper **bufferHelperOut);
251
252 // This call will allocate a new region at the end of the buffer with default alignment. It
253 // internally may trigger a new buffer to be created (which is returned in the optional
254 // parameter `newBufferAllocatedOut`). The new region will be in the returned buffer at given
255 // offset.
256 angle::Result allocate(Context *context,
257 size_t sizeInBytes,
258 BufferHelper **bufferHelperOut,
259 bool *newBufferAllocatedOut);
260
261 // This releases resources when they might currently be in use.
262 void release(Context *context);
263
264 // This adds in-flight buffers to the mResourceUseList in the share group and then releases
265 // them.
266 void updateQueueSerialAndReleaseInFlightBuffers(ContextVk *contextVk,
267 const QueueSerial &queueSerial);
268
269 // This frees resources immediately.
270 void destroy(Renderer *renderer);
271
getCurrentBuffer()272 BufferHelper *getCurrentBuffer() const { return mBuffer.get(); }
273
274 // **Accumulate** an alignment requirement. A dynamic buffer is used as the staging buffer for
275 // image uploads, which can contain updates to unrelated mips, possibly with different formats.
276 // The staging buffer should have an alignment that can satisfy all those formats, i.e. it's the
277 // lcm of all alignments set in its lifetime.
278 void requireAlignment(Renderer *renderer, size_t alignment);
getAlignment()279 size_t getAlignment() const { return mAlignment; }
280
281 // For testing only!
282 void setMinimumSizeForTesting(size_t minSize);
283
isCoherent()284 bool isCoherent() const
285 {
286 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
287 }
288
valid()289 bool valid() const { return mSize != 0; }
290
291 private:
292 void reset();
293 angle::Result allocateNewBuffer(ErrorContext *context);
294
295 VkBufferUsageFlags mUsage;
296 bool mHostVisible;
297 size_t mInitialSize;
298 std::unique_ptr<BufferHelper> mBuffer;
299 uint32_t mNextAllocationOffset;
300 size_t mSize;
301 size_t mSizeInRecentHistory;
302 size_t mAlignment;
303 VkMemoryPropertyFlags mMemoryPropertyFlags;
304
305 BufferHelperQueue mInFlightBuffers;
306 BufferHelperQueue mBufferFreeList;
307 };
308
309 // Class DescriptorSetHelper. This is a wrapper of VkDescriptorSet with GPU resource use tracking.
310 using DescriptorPoolPointer = SharedPtr<DescriptorPoolHelper>;
311 using DescriptorPoolWeakPointer = WeakPtr<DescriptorPoolHelper>;
312 class DescriptorSetHelper final : public Resource
313 {
314 public:
DescriptorSetHelper()315 DescriptorSetHelper() : mDescriptorSet(VK_NULL_HANDLE), mLastUsedFrame(0) {}
DescriptorSetHelper(const VkDescriptorSet & descriptorSet,const DescriptorPoolPointer & pool)316 DescriptorSetHelper(const VkDescriptorSet &descriptorSet, const DescriptorPoolPointer &pool)
317 : mDescriptorSet(descriptorSet), mPool(pool), mLastUsedFrame(0)
318 {}
DescriptorSetHelper(const ResourceUse & use,const VkDescriptorSet & descriptorSet,const DescriptorPoolPointer & pool)319 DescriptorSetHelper(const ResourceUse &use,
320 const VkDescriptorSet &descriptorSet,
321 const DescriptorPoolPointer &pool)
322 : mDescriptorSet(descriptorSet), mPool(pool), mLastUsedFrame(0)
323 {
324 mUse = use;
325 }
DescriptorSetHelper(DescriptorSetHelper && other)326 DescriptorSetHelper(DescriptorSetHelper &&other)
327 : Resource(std::move(other)),
328 mDescriptorSet(other.mDescriptorSet),
329 mPool(other.mPool),
330 mLastUsedFrame(other.mLastUsedFrame)
331 {
332 other.mDescriptorSet = VK_NULL_HANDLE;
333 other.mPool.reset();
334 other.mLastUsedFrame = 0;
335 }
336
~DescriptorSetHelper()337 ~DescriptorSetHelper() override
338 {
339 ASSERT(mDescriptorSet == VK_NULL_HANDLE);
340 ASSERT(!mPool);
341 }
342
343 void destroy(VkDevice device);
344
getDescriptorSet()345 VkDescriptorSet getDescriptorSet() const { return mDescriptorSet; }
getPool()346 DescriptorPoolWeakPointer &getPool() { return mPool; }
347
valid()348 bool valid() const { return mDescriptorSet != VK_NULL_HANDLE; }
349
updateLastUsedFrame(uint32_t frame)350 void updateLastUsedFrame(uint32_t frame) { mLastUsedFrame = frame; }
getLastUsedFrame()351 uint32_t getLastUsedFrame() const { return mLastUsedFrame; }
352
353 private:
354 VkDescriptorSet mDescriptorSet;
355 // So that DescriptorPoolHelper::resetGarbage can clear mPool weak pointer here
356 friend class DescriptorPoolHelper;
357 // We hold weak pointer here due to DynamicDescriptorPool::allocateNewPool() and
358 // DynamicDescriptorPool::checkAndReleaseUnusedPool() rely on pool's refcount to tell if it is
359 // eligible for eviction or not.
360 DescriptorPoolWeakPointer mPool;
361 // The frame that it was last used.
362 uint32_t mLastUsedFrame;
363 };
364 using DescriptorSetPointer = SharedPtr<DescriptorSetHelper>;
365 using DescriptorSetList = std::deque<DescriptorSetPointer>;
366
367 // Uses DescriptorPool to allocate descriptor sets as needed. If a descriptor pool becomes full, we
368 // allocate new pools internally as needed. Renderer takes care of the lifetime of the discarded
369 // pools. Note that we used a fixed layout for descriptor pools in ANGLE.
370
371 // Shared handle to a descriptor pool. Each helper is allocated from the dynamic descriptor pool.
372 // Can be used to share descriptor pools between multiple ProgramVks and the ContextVk.
373 class DescriptorPoolHelper final : angle::NonCopyable
374 {
375 public:
376 DescriptorPoolHelper();
377 ~DescriptorPoolHelper();
378
valid()379 bool valid() { return mDescriptorPool.valid(); }
380
381 angle::Result init(ErrorContext *context,
382 const std::vector<VkDescriptorPoolSize> &poolSizesIn,
383 uint32_t maxSets);
384 void destroy(VkDevice device);
385
386 bool allocateDescriptorSet(ErrorContext *context,
387 const DescriptorSetLayout &descriptorSetLayout,
388 const DescriptorPoolPointer &pool,
389 DescriptorSetPointer *descriptorSetOut);
390
addPendingGarbage(DescriptorSetPointer && garbage)391 void addPendingGarbage(DescriptorSetPointer &&garbage)
392 {
393 ASSERT(garbage.unique());
394 mValidDescriptorSets--;
395 mPendingGarbageList.emplace_back(std::move(garbage));
396 }
addFinishedGarbage(DescriptorSetPointer && garbage)397 void addFinishedGarbage(DescriptorSetPointer &&garbage)
398 {
399 ASSERT(garbage.unique());
400 mValidDescriptorSets--;
401 mFinishedGarbageList.emplace_back(std::move(garbage));
402 }
403 bool recycleFromGarbage(Renderer *renderer, DescriptorSetPointer *descriptorSetOut);
404 void destroyGarbage();
405 void cleanupPendingGarbage();
406
hasValidDescriptorSet()407 bool hasValidDescriptorSet() const { return mValidDescriptorSets != 0; }
canDestroy()408 bool canDestroy() const { return mValidDescriptorSets == 0 && mPendingGarbageList.empty(); }
409
410 private:
411 bool allocateVkDescriptorSet(ErrorContext *context,
412 const DescriptorSetLayout &descriptorSetLayout,
413 VkDescriptorSet *descriptorSetOut);
414
415 Renderer *mRenderer;
416
417 // The initial number of descriptorSets when the pool is created. This should equal to
418 // mValidDescriptorSets+mGarbageList.size()+mFreeDescriptorSets.
419 uint32_t mMaxDescriptorSets;
420 // Track the number of descriptorSets allocated out of this pool that are valid. DescriptorSets
421 // that have been allocated but in the mGarbageList is considered as invalid.
422 uint32_t mValidDescriptorSets;
423 // The number of remaining descriptorSets in the pool that remain to be allocated.
424 uint32_t mFreeDescriptorSets;
425
426 DescriptorPool mDescriptorPool;
427
428 // Keeps track descriptorSets that has been released. Because freeing descriptorSet require
429 // DescriptorPool, we store individually released descriptor sets here instead of usual garbage
430 // list in the renderer to avoid complicated threading issues and other weirdness associated
431 // with pooled object destruction. This list is mutually exclusive with mDescriptorSetCache.
432 DescriptorSetList mFinishedGarbageList;
433 DescriptorSetList mPendingGarbageList;
434 };
435
436 class DynamicDescriptorPool final : angle::NonCopyable
437 {
438 public:
439 DynamicDescriptorPool();
440 ~DynamicDescriptorPool();
441
442 DynamicDescriptorPool(DynamicDescriptorPool &&other);
443 DynamicDescriptorPool &operator=(DynamicDescriptorPool &&other);
444
445 // The DynamicDescriptorPool only handles one pool size at this time.
446 // Note that setSizes[i].descriptorCount is expected to be the number of descriptors in
447 // an individual set. The pool size will be calculated accordingly.
448 angle::Result init(ErrorContext *context,
449 const VkDescriptorPoolSize *setSizes,
450 size_t setSizeCount,
451 const DescriptorSetLayout &descriptorSetLayout);
452
453 void destroy(VkDevice device);
454
valid()455 bool valid() const { return !mDescriptorPools.empty(); }
456
457 // We use the descriptor type to help count the number of free sets.
458 // By convention, sets are indexed according to the constants in vk_cache_utils.h.
459 angle::Result allocateDescriptorSet(ErrorContext *context,
460 const DescriptorSetLayout &descriptorSetLayout,
461 DescriptorSetPointer *descriptorSetOut);
462
463 angle::Result getOrAllocateDescriptorSet(Context *context,
464 uint32_t currentFrame,
465 const DescriptorSetDesc &desc,
466 const DescriptorSetLayout &descriptorSetLayout,
467 DescriptorSetPointer *descriptorSetOut,
468 SharedDescriptorSetCacheKey *sharedCacheKeyOut);
469
470 void releaseCachedDescriptorSet(Renderer *renderer, const DescriptorSetDesc &desc);
471 void destroyCachedDescriptorSet(Renderer *renderer, const DescriptorSetDesc &desc);
472
473 template <typename Accumulator>
accumulateDescriptorCacheStats(VulkanCacheType cacheType,Accumulator * accum)474 void accumulateDescriptorCacheStats(VulkanCacheType cacheType, Accumulator *accum) const
475 {
476 accum->accumulateCacheStats(cacheType, mCacheStats);
477 }
resetDescriptorCacheStats()478 void resetDescriptorCacheStats() { mCacheStats.resetHitAndMissCount(); }
getTotalCacheKeySizeBytes()479 size_t getTotalCacheKeySizeBytes() const
480 {
481 return mDescriptorSetCache.getTotalCacheKeySizeBytes();
482 }
483
484 // Release the pool if it is no longer been used and contains no valid descriptorSet.
485 void destroyUnusedPool(Renderer *renderer, const DescriptorPoolWeakPointer &pool);
486 void checkAndDestroyUnusedPool(Renderer *renderer);
487
488 // For ASSERT use only. Return true if mDescriptorSetCache contains DescriptorSet for desc.
489 bool hasCachedDescriptorSet(const DescriptorSetDesc &desc) const;
490 // For testing only!
491 static uint32_t GetMaxSetsPerPoolForTesting();
492 static void SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
493 static uint32_t GetMaxSetsPerPoolMultiplierForTesting();
494 static void SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPool);
495
496 private:
497 angle::Result allocateNewPool(ErrorContext *context);
498 bool allocateFromExistingPool(ErrorContext *context,
499 const DescriptorSetLayout &descriptorSetLayout,
500 DescriptorSetPointer *descriptorSetOut);
501 bool recycleFromGarbage(Renderer *renderer, DescriptorSetPointer *descriptorSetOut);
502 bool evictStaleDescriptorSets(Renderer *renderer,
503 uint32_t oldestFrameToKeep,
504 uint32_t currentFrame);
505
506 static constexpr uint32_t kMaxSetsPerPoolMax = 512;
507 static uint32_t mMaxSetsPerPool;
508 static uint32_t mMaxSetsPerPoolMultiplier;
509 std::vector<DescriptorPoolPointer> mDescriptorPools;
510 std::vector<VkDescriptorPoolSize> mPoolSizes;
511 // This cached handle is used for verifying the layout being used to allocate descriptor sets
512 // from the pool matches the layout that the pool was created for, to ensure that the free
513 // descriptor count is accurate and new pools are created appropriately.
514 VkDescriptorSetLayout mCachedDescriptorSetLayout;
515
516 // LRU list for cache eviction: most recent used at front, least used at back.
517 struct DescriptorSetLRUEntry
518 {
519 SharedDescriptorSetCacheKey sharedCacheKey;
520 DescriptorSetPointer descriptorSet;
521 };
522 using DescriptorSetLRUList = std::list<DescriptorSetLRUEntry>;
523 using DescriptorSetLRUListIterator = DescriptorSetLRUList::iterator;
524 DescriptorSetLRUList mLRUList;
525 // Tracks cache for descriptorSet. Note that cached DescriptorSet can be reuse even if it is GPU
526 // busy.
527 DescriptorSetCache<DescriptorSetLRUListIterator> mDescriptorSetCache;
528 // Statistics for the cache.
529 CacheStats mCacheStats;
530 };
531 using DynamicDescriptorPoolPointer = SharedPtr<DynamicDescriptorPool>;
532
533 // Maps from a descriptor set layout (represented by DescriptorSetLayoutDesc) to a set of
534 // DynamicDescriptorPools. The purpose of the class is so multiple GL Programs can share descriptor
535 // set caches. We need to stratify the sets by the descriptor set layout to ensure compatibility.
536 class MetaDescriptorPool final : angle::NonCopyable
537 {
538 public:
539 MetaDescriptorPool();
540 ~MetaDescriptorPool();
541
542 void destroy(Renderer *renderer);
543
544 angle::Result bindCachedDescriptorPool(ErrorContext *context,
545 const DescriptorSetLayoutDesc &descriptorSetLayoutDesc,
546 uint32_t descriptorCountMultiplier,
547 DescriptorSetLayoutCache *descriptorSetLayoutCache,
548 DynamicDescriptorPoolPointer *dynamicDescriptorPoolOut);
549
550 template <typename Accumulator>
accumulateDescriptorCacheStats(VulkanCacheType cacheType,Accumulator * accum)551 void accumulateDescriptorCacheStats(VulkanCacheType cacheType, Accumulator *accum) const
552 {
553 for (const auto &iter : mPayload)
554 {
555 const vk::DynamicDescriptorPoolPointer &pool = iter.second;
556 pool->accumulateDescriptorCacheStats(cacheType, accum);
557 }
558 }
559
resetDescriptorCacheStats()560 void resetDescriptorCacheStats()
561 {
562 for (auto &iter : mPayload)
563 {
564 vk::DynamicDescriptorPoolPointer &pool = iter.second;
565 pool->resetDescriptorCacheStats();
566 }
567 }
568
getTotalCacheKeySizeBytes()569 size_t getTotalCacheKeySizeBytes() const
570 {
571 size_t totalSize = 0;
572
573 for (const auto &iter : mPayload)
574 {
575 const DynamicDescriptorPoolPointer &pool = iter.second;
576 totalSize += pool->getTotalCacheKeySizeBytes();
577 }
578
579 return totalSize;
580 }
581
582 private:
583 std::unordered_map<DescriptorSetLayoutDesc, DynamicDescriptorPoolPointer> mPayload;
584 };
585
586 template <typename Pool>
587 class DynamicallyGrowingPool : angle::NonCopyable
588 {
589 public:
590 DynamicallyGrowingPool();
591 virtual ~DynamicallyGrowingPool();
592
isValid()593 bool isValid() { return mPoolSize > 0; }
594
595 protected:
596 angle::Result initEntryPool(ErrorContext *contextVk, uint32_t poolSize);
597
598 virtual void destroyPoolImpl(VkDevice device, Pool &poolToDestroy) = 0;
599 void destroyEntryPool(VkDevice device);
600
601 // Checks to see if any pool is already free, in which case it sets it as current pool and
602 // returns true.
603 bool findFreeEntryPool(ContextVk *contextVk);
604
605 // Allocates a new entry and initializes it with the given pool.
606 angle::Result allocateNewEntryPool(ContextVk *contextVk, Pool &&pool);
607
608 // Called by the implementation whenever an entry is freed.
609 void onEntryFreed(ContextVk *contextVk, size_t poolIndex, const ResourceUse &use);
610
getPool(size_t index)611 const Pool &getPool(size_t index) const
612 {
613 return const_cast<DynamicallyGrowingPool *>(this)->getPool(index);
614 }
615
getPool(size_t index)616 Pool &getPool(size_t index)
617 {
618 ASSERT(index < mPools.size());
619 return mPools[index].pool;
620 }
621
getPoolSize()622 uint32_t getPoolSize() const { return mPoolSize; }
623
624 virtual angle::Result allocatePoolImpl(ContextVk *contextVk,
625 Pool &poolToAllocate,
626 uint32_t entriesToAllocate) = 0;
627 angle::Result allocatePoolEntries(ContextVk *contextVk,
628 uint32_t entryCount,
629 uint32_t *poolIndexOut,
630 uint32_t *currentEntryOut);
631
632 private:
633 // The pool size, to know when a pool is completely freed.
634 uint32_t mPoolSize;
635
636 struct PoolResource : public Resource
637 {
638 PoolResource(Pool &&poolIn, uint32_t freedCountIn);
639 PoolResource(PoolResource &&other);
640
641 Pool pool;
642
643 // A count corresponding to each pool indicating how many of its allocated entries
644 // have been freed. Once that value reaches mPoolSize for each pool, that pool is considered
645 // free and reusable. While keeping a bitset would allow allocation of each index, the
646 // slight runtime overhead of finding free indices is not worth the slight memory overhead
647 // of creating new pools when unnecessary.
648 uint32_t freedCount;
649 };
650 std::vector<PoolResource> mPools;
651
652 // Index into mPools indicating pool we are currently allocating from.
653 size_t mCurrentPool;
654 // Index inside mPools[mCurrentPool] indicating which index can be allocated next.
655 uint32_t mCurrentFreeEntry;
656 };
657
658 // DynamicQueryPool allocates indices out of QueryPool as needed. Once a QueryPool is exhausted,
659 // another is created. The query pools live permanently, but are recycled as indices get freed.
660
661 // These are arbitrary default sizes for query pools.
662 constexpr uint32_t kDefaultOcclusionQueryPoolSize = 64;
663 constexpr uint32_t kDefaultTimestampQueryPoolSize = 64;
664 constexpr uint32_t kDefaultTransformFeedbackQueryPoolSize = 128;
665 constexpr uint32_t kDefaultPrimitivesGeneratedQueryPoolSize = 128;
666
667 class QueryHelper;
668
669 class DynamicQueryPool final : public DynamicallyGrowingPool<QueryPool>
670 {
671 public:
672 DynamicQueryPool();
673 ~DynamicQueryPool() override;
674
675 angle::Result init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize);
676 void destroy(VkDevice device);
677
678 angle::Result allocateQuery(ContextVk *contextVk, QueryHelper *queryOut, uint32_t queryCount);
679 void freeQuery(ContextVk *contextVk, QueryHelper *query);
680
getQueryPool(size_t index)681 const QueryPool &getQueryPool(size_t index) const { return getPool(index); }
682
683 private:
684 angle::Result allocatePoolImpl(ContextVk *contextVk,
685 QueryPool &poolToAllocate,
686 uint32_t entriesToAllocate) override;
687 void destroyPoolImpl(VkDevice device, QueryPool &poolToDestroy) override;
688
689 // Information required to create new query pools
690 VkQueryType mQueryType;
691 };
692
693 // Stores the result of a Vulkan query call. XFB queries in particular store two result values.
694 class QueryResult final
695 {
696 public:
QueryResult(uint32_t intsPerResult)697 QueryResult(uint32_t intsPerResult) : mIntsPerResult(intsPerResult), mResults{} {}
698
699 void operator+=(const QueryResult &rhs)
700 {
701 mResults[0] += rhs.mResults[0];
702 mResults[1] += rhs.mResults[1];
703 }
704
getDataSize()705 size_t getDataSize() const { return mIntsPerResult * sizeof(uint64_t); }
706 void setResults(uint64_t *results, uint32_t queryCount);
getResult(size_t index)707 uint64_t getResult(size_t index) const
708 {
709 ASSERT(index < mIntsPerResult);
710 return mResults[index];
711 }
712
713 static constexpr size_t kDefaultResultIndex = 0;
714 static constexpr size_t kTransformFeedbackPrimitivesWrittenIndex = 0;
715 static constexpr size_t kPrimitivesGeneratedIndex = 1;
716
717 private:
718 uint32_t mIntsPerResult;
719 std::array<uint64_t, 2> mResults;
720 };
721
722 // Queries in Vulkan are identified by the query pool and an index for a query within that pool.
723 // Unlike other pools, such as descriptor pools where an allocation returns an independent object
724 // from the pool, the query allocations are not done through a Vulkan function and are only an
725 // integer index.
726 //
727 // Furthermore, to support arbitrarily large number of queries, DynamicQueryPool creates query pools
728 // of a fixed size as needed and allocates indices within those pools.
729 //
730 // The QueryHelper class below keeps the pool and index pair together. For multiview, multiple
731 // consecutive query indices are implicitly written to by the driver, so the query count is
732 // additionally kept.
733 class QueryHelper final : public Resource
734 {
735 public:
736 QueryHelper();
737 ~QueryHelper() override;
738 QueryHelper(QueryHelper &&rhs);
739 QueryHelper &operator=(QueryHelper &&rhs);
740 void init(const DynamicQueryPool *dynamicQueryPool,
741 const size_t queryPoolIndex,
742 uint32_t query,
743 uint32_t queryCount);
744 void deinit();
745
valid()746 bool valid() const { return mDynamicQueryPool != nullptr; }
747
748 // Begin/end queries. These functions break the render pass.
749 angle::Result beginQuery(ContextVk *contextVk);
750 angle::Result endQuery(ContextVk *contextVk);
751 // Begin/end queries within a started render pass.
752 angle::Result beginRenderPassQuery(ContextVk *contextVk);
753 void endRenderPassQuery(ContextVk *contextVk);
754
755 angle::Result flushAndWriteTimestamp(ContextVk *contextVk);
756 // When syncing gpu/cpu time, main thread accesses primary directly
757 void writeTimestampToPrimary(ContextVk *contextVk, PrimaryCommandBuffer *primary);
758 // All other timestamp accesses should be made on outsideRenderPassCommandBuffer
759 void writeTimestamp(ContextVk *contextVk,
760 OutsideRenderPassCommandBuffer *outsideRenderPassCommandBuffer);
761
762 // Whether this query helper has generated and submitted any commands.
763 bool hasSubmittedCommands() const;
764
765 angle::Result getUint64ResultNonBlocking(ContextVk *contextVk,
766 QueryResult *resultOut,
767 bool *availableOut);
768 angle::Result getUint64Result(ContextVk *contextVk, QueryResult *resultOut);
769
770 private:
771 friend class DynamicQueryPool;
getQueryPool()772 const QueryPool &getQueryPool() const
773 {
774 ASSERT(valid());
775 return mDynamicQueryPool->getQueryPool(mQueryPoolIndex);
776 }
777
778 // Reset needs to always be done outside a render pass, which may be different from the
779 // passed-in command buffer (which could be the render pass').
780 template <typename CommandBufferT>
781 void beginQueryImpl(ContextVk *contextVk,
782 OutsideRenderPassCommandBuffer *resetCommandBuffer,
783 CommandBufferT *commandBuffer);
784 template <typename CommandBufferT>
785 void endQueryImpl(ContextVk *contextVk, CommandBufferT *commandBuffer);
786 template <typename CommandBufferT>
787 void resetQueryPoolImpl(ContextVk *contextVk,
788 const QueryPool &queryPool,
789 CommandBufferT *commandBuffer);
790 VkResult getResultImpl(ContextVk *contextVk,
791 const VkQueryResultFlags flags,
792 QueryResult *resultOut);
793
794 const DynamicQueryPool *mDynamicQueryPool;
795 size_t mQueryPoolIndex;
796 uint32_t mQuery;
797 uint32_t mQueryCount;
798
799 enum class QueryStatus
800 {
801 Inactive,
802 Active,
803 Ended
804 };
805 QueryStatus mStatus;
806 };
807
808 // Semaphores that are allocated from the semaphore pool are encapsulated in a helper object,
809 // keeping track of where in the pool they are allocated from.
810 class SemaphoreHelper final : angle::NonCopyable
811 {
812 public:
813 SemaphoreHelper();
814 ~SemaphoreHelper();
815
816 SemaphoreHelper(SemaphoreHelper &&other);
817 SemaphoreHelper &operator=(SemaphoreHelper &&other);
818
819 void init(const size_t semaphorePoolIndex, const Semaphore *semaphore);
820 void deinit();
821
getSemaphore()822 const Semaphore *getSemaphore() const { return mSemaphore; }
823
824 // Used only by DynamicSemaphorePool.
getSemaphorePoolIndex()825 size_t getSemaphorePoolIndex() const { return mSemaphorePoolIndex; }
826
827 private:
828 size_t mSemaphorePoolIndex;
829 const Semaphore *mSemaphore;
830 };
831
832 // This defines enum for VkPipelineStageFlagBits so that we can use it to compare and index into
833 // array.
834 enum class PipelineStage : uint32_t
835 {
836 // Bellow are ordered based on Graphics Pipeline Stages
837 TopOfPipe = 0,
838 DrawIndirect = 1,
839 VertexInput = 2,
840 VertexShader = 3,
841 TessellationControl = 4,
842 TessellationEvaluation = 5,
843 GeometryShader = 6,
844 TransformFeedback = 7,
845 FragmentShadingRate = 8,
846 EarlyFragmentTest = 9,
847 FragmentShader = 10,
848 LateFragmentTest = 11,
849 ColorAttachmentOutput = 12,
850
851 // Compute specific pipeline Stage
852 ComputeShader = 13,
853
854 // Transfer specific pipeline Stage
855 Transfer = 14,
856 BottomOfPipe = 15,
857
858 // Host specific pipeline stage
859 Host = 16,
860
861 InvalidEnum = 17,
862 EnumCount = InvalidEnum,
863 };
864 using PipelineStagesMask = angle::PackedEnumBitSet<PipelineStage, uint32_t>;
865
866 PipelineStage GetPipelineStage(gl::ShaderType stage);
867
868 struct ImageMemoryBarrierData
869 {
870 const char *name;
871
872 // The Vk layout corresponding to the ImageLayout key.
873 VkImageLayout layout;
874
875 // The stage in which the image is used (or Bottom/Top if not using any specific stage). Unless
876 // Bottom/Top (Bottom used for transition to and Top used for transition from), the two values
877 // should match.
878 VkPipelineStageFlags dstStageMask;
879 VkPipelineStageFlags srcStageMask;
880 // Access mask when transitioning into this layout.
881 VkAccessFlags dstAccessMask;
882 // Access mask when transitioning out from this layout. Note that source access mask never
883 // needs a READ bit, as WAR hazards don't need memory barriers (just execution barriers).
884 VkAccessFlags srcAccessMask;
885 // Read or write.
886 ResourceAccess type;
887 // *CommandBufferHelper track an array of PipelineBarriers. This indicates which array element
888 // this should be merged into. Right now we track individual barrier for every PipelineStage. If
889 // layout has a single stage mask bit, we use that stage as index. If layout has multiple stage
890 // mask bits, we pick the lowest stage as the index since it is the first stage that needs
891 // barrier.
892 PipelineStage barrierIndex;
893 EventStage eventStage;
894 // The pipeline stage flags group that used for heuristic.
895 PipelineStageGroup pipelineStageGroup;
896 };
897 using ImageLayoutToMemoryBarrierDataMap = angle::PackedEnumMap<ImageLayout, ImageMemoryBarrierData>;
898
899 // Initialize ImageLayout to ImageMemoryBarrierData mapping table.
900 void InitializeImageLayoutAndMemoryBarrierDataMap(
901 ImageLayoutToMemoryBarrierDataMap *mapping,
902 VkPipelineStageFlags supportedVulkanPipelineStageMask);
903
904 // This wraps data and API for vkCmdPipelineBarrier call
905 class PipelineBarrier : angle::NonCopyable
906 {
907 public:
PipelineBarrier()908 PipelineBarrier()
909 : mSrcStageMask(0),
910 mDstStageMask(0),
911 mMemoryBarrierSrcAccess(0),
912 mMemoryBarrierDstAccess(0),
913 mImageMemoryBarriers()
914 {}
~PipelineBarrier()915 ~PipelineBarrier() { ASSERT(mImageMemoryBarriers.empty()); }
916
isEmpty()917 bool isEmpty() const { return mImageMemoryBarriers.empty() && mMemoryBarrierDstAccess == 0; }
918
execute(PrimaryCommandBuffer * primary)919 void execute(PrimaryCommandBuffer *primary)
920 {
921 if (isEmpty())
922 {
923 return;
924 }
925
926 // Issue vkCmdPipelineBarrier call
927 VkMemoryBarrier memoryBarrier = {};
928 uint32_t memoryBarrierCount = 0;
929 if (mMemoryBarrierDstAccess != 0)
930 {
931 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
932 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
933 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
934 memoryBarrierCount++;
935 }
936 primary->pipelineBarrier(
937 mSrcStageMask, mDstStageMask, 0, memoryBarrierCount, &memoryBarrier, 0, nullptr,
938 static_cast<uint32_t>(mImageMemoryBarriers.size()), mImageMemoryBarriers.data());
939
940 reset();
941 }
942
943 // merge two barriers into one
merge(PipelineBarrier * other)944 void merge(PipelineBarrier *other)
945 {
946 mSrcStageMask |= other->mSrcStageMask;
947 mDstStageMask |= other->mDstStageMask;
948 mMemoryBarrierSrcAccess |= other->mMemoryBarrierSrcAccess;
949 mMemoryBarrierDstAccess |= other->mMemoryBarrierDstAccess;
950 mImageMemoryBarriers.insert(mImageMemoryBarriers.end(), other->mImageMemoryBarriers.begin(),
951 other->mImageMemoryBarriers.end());
952 other->reset();
953 }
954
mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkAccessFlags srcAccess,VkAccessFlags dstAccess)955 void mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,
956 VkPipelineStageFlags dstStageMask,
957 VkAccessFlags srcAccess,
958 VkAccessFlags dstAccess)
959 {
960 mSrcStageMask |= srcStageMask;
961 mDstStageMask |= dstStageMask;
962 mMemoryBarrierSrcAccess |= srcAccess;
963 mMemoryBarrierDstAccess |= dstAccess;
964 }
965
mergeImageBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)966 void mergeImageBarrier(VkPipelineStageFlags srcStageMask,
967 VkPipelineStageFlags dstStageMask,
968 const VkImageMemoryBarrier &imageMemoryBarrier)
969 {
970 ASSERT(imageMemoryBarrier.pNext == nullptr);
971 mSrcStageMask |= srcStageMask;
972 mDstStageMask |= dstStageMask;
973 mImageMemoryBarriers.push_back(imageMemoryBarrier);
974 }
975
reset()976 void reset()
977 {
978 mSrcStageMask = 0;
979 mDstStageMask = 0;
980 mMemoryBarrierSrcAccess = 0;
981 mMemoryBarrierDstAccess = 0;
982 mImageMemoryBarriers.clear();
983 }
984
985 void addDiagnosticsString(std::ostringstream &out) const;
986
987 private:
988 VkPipelineStageFlags mSrcStageMask;
989 VkPipelineStageFlags mDstStageMask;
990 VkAccessFlags mMemoryBarrierSrcAccess;
991 VkAccessFlags mMemoryBarrierDstAccess;
992 std::vector<VkImageMemoryBarrier> mImageMemoryBarriers;
993 };
994
995 class PipelineBarrierArray final
996 {
997 public:
mergeMemoryBarrier(PipelineStage stageIndex,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkAccessFlags srcAccess,VkAccessFlags dstAccess)998 void mergeMemoryBarrier(PipelineStage stageIndex,
999 VkPipelineStageFlags srcStageMask,
1000 VkPipelineStageFlags dstStageMask,
1001 VkAccessFlags srcAccess,
1002 VkAccessFlags dstAccess)
1003 {
1004 mBarriers[stageIndex].mergeMemoryBarrier(srcStageMask, dstStageMask, srcAccess, dstAccess);
1005 mBarrierMask.set(stageIndex);
1006 }
1007
mergeImageBarrier(PipelineStage stageIndex,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)1008 void mergeImageBarrier(PipelineStage stageIndex,
1009 VkPipelineStageFlags srcStageMask,
1010 VkPipelineStageFlags dstStageMask,
1011 const VkImageMemoryBarrier &imageMemoryBarrier)
1012 {
1013 mBarriers[stageIndex].mergeImageBarrier(srcStageMask, dstStageMask, imageMemoryBarrier);
1014 mBarrierMask.set(stageIndex);
1015 }
1016
1017 void execute(Renderer *renderer, PrimaryCommandBuffer *primary);
1018
1019 void addDiagnosticsString(std::ostringstream &out) const;
1020
1021 private:
1022 angle::PackedEnumMap<PipelineStage, PipelineBarrier> mBarriers;
1023 PipelineStagesMask mBarrierMask;
1024 };
1025
1026 enum class MemoryCoherency : uint8_t
1027 {
1028 CachedNonCoherent,
1029 CachedPreferCoherent,
1030 UnCachedCoherent,
1031
1032 InvalidEnum = 3,
1033 EnumCount = 3,
1034 };
IsCached(MemoryCoherency coherency)1035 ANGLE_INLINE bool IsCached(MemoryCoherency coherency)
1036 {
1037 return coherency == MemoryCoherency::CachedNonCoherent ||
1038 coherency == MemoryCoherency::CachedPreferCoherent;
1039 }
1040
1041 class BufferPool;
1042
1043 class BufferHelper : public ReadWriteResource
1044 {
1045 public:
1046 BufferHelper();
1047 ~BufferHelper() override;
1048
1049 BufferHelper(BufferHelper &&other);
1050 BufferHelper &operator=(BufferHelper &&other);
1051
1052 angle::Result init(ErrorContext *context,
1053 const VkBufferCreateInfo &createInfo,
1054 VkMemoryPropertyFlags memoryPropertyFlags);
1055 angle::Result initExternal(ErrorContext *context,
1056 VkMemoryPropertyFlags memoryProperties,
1057 const VkBufferCreateInfo &requestedCreateInfo,
1058 GLeglClientBufferEXT clientBuffer);
1059 VkResult initSuballocation(Context *context,
1060 uint32_t memoryTypeIndex,
1061 size_t size,
1062 size_t alignment,
1063 BufferUsageType usageType,
1064 BufferPool *pool);
1065
1066 void destroy(Renderer *renderer);
1067 void release(Renderer *renderer);
1068 void release(Context *context);
1069 void releaseBufferAndDescriptorSetCache(ContextVk *contextVk);
1070
getBufferSerial()1071 BufferSerial getBufferSerial() const { return mSerial; }
getBlockSerial()1072 BufferSerial getBlockSerial() const
1073 {
1074 ASSERT(mSuballocation.valid());
1075 return mSuballocation.getBlockSerial();
1076 }
getBufferBlock()1077 BufferBlock *getBufferBlock() const { return mSuballocation.getBufferBlock(); }
valid()1078 bool valid() const { return mSuballocation.valid(); }
getBuffer()1079 const Buffer &getBuffer() const { return mSuballocation.getBuffer(); }
getOffset()1080 VkDeviceSize getOffset() const { return mSuballocation.getOffset(); }
getSize()1081 VkDeviceSize getSize() const { return mSuballocation.getSize(); }
getMemoryPropertyFlags()1082 VkMemoryMapFlags getMemoryPropertyFlags() const
1083 {
1084 return mSuballocation.getMemoryPropertyFlags();
1085 }
getMappedMemory()1086 uint8_t *getMappedMemory() const
1087 {
1088 ASSERT(isMapped());
1089 return mSuballocation.getMappedMemory();
1090 }
1091 // Returns the main buffer block's pointer.
getBlockMemory()1092 uint8_t *getBlockMemory() const { return mSuballocation.getBlockMemory(); }
getBlockMemorySize()1093 VkDeviceSize getBlockMemorySize() const { return mSuballocation.getBlockMemorySize(); }
isHostVisible()1094 bool isHostVisible() const { return mSuballocation.isHostVisible(); }
isCoherent()1095 bool isCoherent() const { return mSuballocation.isCoherent(); }
isCached()1096 bool isCached() const { return mSuballocation.isCached(); }
isMapped()1097 bool isMapped() const { return mSuballocation.isMapped(); }
1098
1099 angle::Result map(ErrorContext *context, uint8_t **ptrOut);
1100 angle::Result mapWithOffset(ErrorContext *context, uint8_t **ptrOut, size_t offset);
unmap(Renderer * renderer)1101 void unmap(Renderer *renderer) {}
1102 // After a sequence of writes, call flush to ensure the data is visible to the device.
1103 angle::Result flush(Renderer *renderer);
1104 angle::Result flush(Renderer *renderer, VkDeviceSize offset, VkDeviceSize size);
1105 // After a sequence of writes, call invalidate to ensure the data is visible to the host.
1106 angle::Result invalidate(Renderer *renderer);
1107 angle::Result invalidate(Renderer *renderer, VkDeviceSize offset, VkDeviceSize size);
1108
1109 void changeQueueFamily(uint32_t srcQueueFamilyIndex,
1110 uint32_t dstQueueFamilyIndex,
1111 OutsideRenderPassCommandBuffer *commandBuffer);
1112
1113 // Performs an ownership transfer from an external instance or API.
1114 void acquireFromExternal(DeviceQueueIndex externalQueueIndex,
1115 DeviceQueueIndex newDeviceQueueIndex,
1116 OutsideRenderPassCommandBuffer *commandBuffer);
1117
1118 // Performs an ownership transfer to an external instance or API.
1119 void releaseToExternal(DeviceQueueIndex externalQueueIndex,
1120 OutsideRenderPassCommandBuffer *commandBuffer);
1121
1122 // Returns true if the image is owned by an external API or instance.
isReleasedToExternal()1123 bool isReleasedToExternal() const { return mIsReleasedToExternal; }
1124
1125 void recordReadBarrier(Context *context,
1126 VkAccessFlags readAccessType,
1127 VkPipelineStageFlags readPipelineStageFlags,
1128 PipelineStage stageIndex,
1129 PipelineBarrierArray *pipelineBarriers,
1130 EventBarrierArray *eventBarriers,
1131 RefCountedEventCollector *eventCollector);
1132
1133 void recordWriteBarrier(Context *context,
1134 VkAccessFlags writeAccessType,
1135 VkPipelineStageFlags writeStage,
1136 PipelineStage stageIndex,
1137 const QueueSerial &queueSerial,
1138 PipelineBarrierArray *pipelineBarriers,
1139 EventBarrierArray *eventBarriers,
1140 RefCountedEventCollector *eventCollector);
1141
1142 void recordReadEvent(Context *context,
1143 VkAccessFlags readAccessType,
1144 VkPipelineStageFlags readPipelineStageFlags,
1145 PipelineStage stageIndex,
1146 const QueueSerial &queueSerial,
1147 EventStage eventStage,
1148 RefCountedEventArray *refCountedEventArray);
1149
1150 void recordWriteEvent(Context *context,
1151 VkAccessFlags writeAccessType,
1152 VkPipelineStageFlags writePipelineStageFlags,
1153 const QueueSerial &writeQueueSerial,
1154 PipelineStage writeStage,
1155 RefCountedEventArray *refCountedEventArray);
1156
1157 void fillWithColor(const angle::Color<uint8_t> &color,
1158 const gl::InternalFormat &internalFormat);
1159
1160 void fillWithPattern(const void *pattern, size_t patternSize, size_t offset, size_t size);
1161
1162 // Special handling for VertexArray code so that we can create a dedicated VkBuffer for the
1163 // sub-range of memory of the actual buffer data size that user requested (i.e, excluding extra
1164 // paddings that we added for alignment, which will not get zero filled).
1165 const Buffer &getBufferForVertexArray(ContextVk *contextVk,
1166 VkDeviceSize actualDataSize,
1167 VkDeviceSize *offsetOut);
1168
onNewDescriptorSet(const SharedDescriptorSetCacheKey & sharedCacheKey)1169 void onNewDescriptorSet(const SharedDescriptorSetCacheKey &sharedCacheKey)
1170 {
1171 mDescriptorSetCacheManager.addKey(sharedCacheKey);
1172 }
1173
1174 angle::Result initializeNonZeroMemory(ErrorContext *context,
1175 VkBufferUsageFlags usage,
1176 VkDeviceSize size);
1177
1178 // Buffer's user size and allocation size may be different due to alignment requirement. In
1179 // normal usage we just use the actual allocation size and it is good enough. But when
1180 // robustResourceInit is enabled, mBufferWithUserSize is created to match the exact user
1181 // size. Thus when user size changes, we must clear and recreate this mBufferWithUserSize.
1182 // Returns true if mBufferWithUserSize is released.
1183 bool onBufferUserSizeChange(Renderer *renderer);
1184
1185 void initializeBarrierTracker(ErrorContext *context);
1186
1187 // Returns the current VkAccessFlags bits
getCurrentWriteAccess()1188 VkAccessFlags getCurrentWriteAccess() const { return mCurrentWriteAccess; }
1189
1190 private:
1191 // Only called by DynamicBuffer.
1192 friend class DynamicBuffer;
setSuballocationOffsetAndSize(VkDeviceSize offset,VkDeviceSize size)1193 void setSuballocationOffsetAndSize(VkDeviceSize offset, VkDeviceSize size)
1194 {
1195 mSuballocation.setOffsetAndSize(offset, size);
1196 }
1197
1198 void releaseImpl(Renderer *renderer);
1199
updatePipelineStageWriteHistory(PipelineStage writeStage)1200 void updatePipelineStageWriteHistory(PipelineStage writeStage)
1201 {
1202 mTransformFeedbackWriteHeuristicBits <<= 1;
1203 if (writeStage == PipelineStage::TransformFeedback)
1204 {
1205 mTransformFeedbackWriteHeuristicBits |= 1;
1206 }
1207 }
1208
1209 // Suballocation object.
1210 BufferSuballocation mSuballocation;
1211 // This normally is invalid. We always use the BufferBlock's buffer and offset combination. But
1212 // when robust resource init is enabled, we may want to create a dedicated VkBuffer for the
1213 // suballocation so that vulkan driver will ensure no access beyond this sub-range. In that
1214 // case, this VkBuffer will be created lazily as needed.
1215 Buffer mBufferWithUserSize;
1216
1217 // For memory barriers.
1218 DeviceQueueIndex mCurrentDeviceQueueIndex;
1219
1220 // Access that not tracked by VkEvents
1221 VkFlags mCurrentWriteAccess;
1222 VkFlags mCurrentReadAccess;
1223 VkPipelineStageFlags mCurrentWriteStages;
1224 VkPipelineStageFlags mCurrentReadStages;
1225
1226 // The current refCounted event. When barrier is needed, we should wait for this event.
1227 RefCountedEventWithAccessFlags mCurrentWriteEvent;
1228 RefCountedEventArrayWithAccessFlags mCurrentReadEvents;
1229
1230 // Track history of pipeline stages being used. This information provides
1231 // heuristic for making decisions if a VkEvent should be used to track the operation.
1232 static constexpr uint32_t kTransformFeedbackWriteHeuristicWindowSize = 16;
1233 angle::BitSet16<kTransformFeedbackWriteHeuristicWindowSize>
1234 mTransformFeedbackWriteHeuristicBits;
1235
1236 BufferSerial mSerial;
1237 // Manages the descriptorSet cache that created with this BufferHelper object.
1238 DescriptorSetCacheManager mDescriptorSetCacheManager;
1239 // For external buffer
1240 GLeglClientBufferEXT mClientBuffer;
1241
1242 // Whether ANGLE currently has ownership of this resource or it's released to external.
1243 bool mIsReleasedToExternal;
1244 };
1245
1246 class BufferPool : angle::NonCopyable
1247 {
1248 public:
1249 BufferPool();
1250 BufferPool(BufferPool &&other);
1251 ~BufferPool();
1252
1253 // Init that gives the ability to pass in specified memory property flags for the buffer.
1254 void initWithFlags(Renderer *renderer,
1255 vma::VirtualBlockCreateFlags flags,
1256 VkBufferUsageFlags usage,
1257 VkDeviceSize initialSize,
1258 uint32_t memoryTypeIndex,
1259 VkMemoryPropertyFlags memoryProperty);
1260
1261 VkResult allocateBuffer(ErrorContext *context,
1262 VkDeviceSize sizeInBytes,
1263 VkDeviceSize alignment,
1264 BufferSuballocation *suballocation);
1265
1266 // Frees resources immediately, or orphan the non-empty BufferBlocks if allowed. If orphan is
1267 // not allowed, it will assert if BufferBlock is still not empty.
1268 void destroy(Renderer *renderer, bool orphanAllowed);
1269 // Remove and destroy empty BufferBlocks
1270 void pruneEmptyBuffers(Renderer *renderer);
1271
valid()1272 bool valid() const { return mSize != 0; }
1273
1274 void addStats(std::ostringstream *out) const;
getBufferCount()1275 size_t getBufferCount() const { return mBufferBlocks.size() + mEmptyBufferBlocks.size(); }
getMemorySize()1276 VkDeviceSize getMemorySize() const { return mTotalMemorySize; }
1277
1278 private:
1279 VkResult allocateNewBuffer(ErrorContext *context, VkDeviceSize sizeInBytes);
1280 VkDeviceSize getTotalEmptyMemorySize() const;
1281
1282 vma::VirtualBlockCreateFlags mVirtualBlockCreateFlags;
1283 VkBufferUsageFlags mUsage;
1284 bool mHostVisible;
1285 VkDeviceSize mSize;
1286 uint32_t mMemoryTypeIndex;
1287 VkDeviceSize mTotalMemorySize;
1288 BufferBlockPointerVector mBufferBlocks;
1289 std::deque<BufferBlockPointer> mEmptyBufferBlocks;
1290 // Tracks the number of new buffers needed for suballocation since last pruneEmptyBuffers call.
1291 // We will use this heuristic information to decide how many empty buffers to keep around.
1292 size_t mNumberOfNewBuffersNeededSinceLastPrune;
1293 // max size to go down the suballocation code path. Any allocation greater or equal this size
1294 // will call into vulkan directly to allocate a dedicated VkDeviceMemory.
1295 static constexpr size_t kMaxBufferSizeForSuballocation = 4 * 1024 * 1024;
1296 };
1297 using BufferPoolPointerArray = std::array<std::unique_ptr<BufferPool>, VK_MAX_MEMORY_TYPES>;
1298
1299 // Stores clear value In packed attachment index
1300 class PackedClearValuesArray final
1301 {
1302 public:
1303 PackedClearValuesArray();
1304 ~PackedClearValuesArray();
1305
1306 PackedClearValuesArray(const PackedClearValuesArray &other);
1307 PackedClearValuesArray &operator=(const PackedClearValuesArray &rhs);
1308 void storeColor(PackedAttachmentIndex index, const VkClearValue &clearValue);
1309 // Caller must take care to pack depth and stencil value together.
1310 void storeDepthStencil(PackedAttachmentIndex index, const VkClearValue &clearValue);
1311 const VkClearValue &operator[](PackedAttachmentIndex index) const
1312 {
1313 return mValues[index.get()];
1314 }
data()1315 const VkClearValue *data() const { return mValues.data(); }
1316
1317 private:
1318 gl::AttachmentArray<VkClearValue> mValues;
1319 };
1320
1321 // Reference to a render pass attachment (color or depth/stencil) alongside render-pass-related
1322 // tracking such as when the attachment is last written to or invalidated. This is used to
1323 // determine loadOp and storeOp of the attachment, and enables optimizations that need to know
1324 // how the attachment has been used.
1325 class RenderPassAttachment final
1326 {
1327 public:
1328 RenderPassAttachment();
1329 ~RenderPassAttachment() = default;
1330
1331 void init(ImageHelper *image,
1332 UniqueSerial imageSiblingSerial,
1333 gl::LevelIndex levelIndex,
1334 uint32_t layerIndex,
1335 uint32_t layerCount,
1336 VkImageAspectFlagBits aspect);
1337 void reset();
1338
1339 void onAccess(ResourceAccess access, uint32_t currentCmdCount);
1340 void invalidate(const gl::Rectangle &invalidateArea,
1341 bool isAttachmentEnabled,
1342 uint32_t currentCmdCount);
1343 void onRenderAreaGrowth(ContextVk *contextVk, const gl::Rectangle &newRenderArea);
1344 void finalizeLoadStore(ErrorContext *context,
1345 uint32_t currentCmdCount,
1346 bool hasUnresolveAttachment,
1347 bool hasResolveAttachment,
1348 RenderPassLoadOp *loadOp,
1349 RenderPassStoreOp *storeOp,
1350 bool *isInvalidatedOut);
1351 void restoreContent();
hasAnyAccess()1352 bool hasAnyAccess() const { return mAccess != ResourceAccess::Unused; }
hasWriteAccess()1353 bool hasWriteAccess() const { return HasResourceWriteAccess(mAccess); }
1354
getImage()1355 ImageHelper *getImage() { return mImage; }
1356
hasImage(const ImageHelper * image,UniqueSerial imageSiblingSerial)1357 bool hasImage(const ImageHelper *image, UniqueSerial imageSiblingSerial) const
1358 {
1359 // Compare values because we do want that invalid serials compare equal.
1360 return mImage == image && mImageSiblingSerial.getValue() == imageSiblingSerial.getValue();
1361 }
1362
1363 private:
1364 bool hasWriteAfterInvalidate(uint32_t currentCmdCount) const;
1365 bool isInvalidated(uint32_t currentCmdCount) const;
1366 bool onAccessImpl(ResourceAccess access, uint32_t currentCmdCount);
1367
1368 // The attachment image itself
1369 ImageHelper *mImage;
1370 // Invalid or serial of EGLImage/Surface sibling target.
1371 UniqueSerial mImageSiblingSerial;
1372 // The subresource used in the render pass
1373 gl::LevelIndex mLevelIndex;
1374 uint32_t mLayerIndex;
1375 uint32_t mLayerCount;
1376 VkImageAspectFlagBits mAspect;
1377 // Tracks the highest access during the entire render pass (Write being the highest), excluding
1378 // clear through loadOp. This allows loadOp=Clear to be optimized out when we find out that the
1379 // attachment is not used in the render pass at all and storeOp=DontCare, or that a
1380 // mid-render-pass clear could be hoisted to loadOp=Clear.
1381 ResourceAccess mAccess;
1382 // The index of the last draw command after which the attachment is invalidated
1383 uint32_t mInvalidatedCmdCount;
1384 // The index of the last draw command after which the attachment output is disabled
1385 uint32_t mDisabledCmdCount;
1386 // The area that has been invalidated
1387 gl::Rectangle mInvalidateArea;
1388 };
1389
1390 // Stores RenderPassAttachment In packed attachment index
1391 class PackedRenderPassAttachmentArray final
1392 {
1393 public:
PackedRenderPassAttachmentArray()1394 PackedRenderPassAttachmentArray() : mAttachments{} {}
1395 ~PackedRenderPassAttachmentArray() = default;
1396 RenderPassAttachment &operator[](PackedAttachmentIndex index)
1397 {
1398 return mAttachments[index.get()];
1399 }
reset()1400 void reset()
1401 {
1402 for (RenderPassAttachment &attachment : mAttachments)
1403 {
1404 attachment.reset();
1405 }
1406 }
1407
1408 private:
1409 gl::AttachmentArray<RenderPassAttachment> mAttachments;
1410 };
1411
1412 class SecondaryCommandBufferCollector final
1413 {
1414 public:
1415 SecondaryCommandBufferCollector() = default;
1416 SecondaryCommandBufferCollector(const SecondaryCommandBufferCollector &) = delete;
1417 SecondaryCommandBufferCollector(SecondaryCommandBufferCollector &&) = default;
1418 void operator=(const SecondaryCommandBufferCollector &) = delete;
1419 SecondaryCommandBufferCollector &operator=(SecondaryCommandBufferCollector &&) = default;
~SecondaryCommandBufferCollector()1420 ~SecondaryCommandBufferCollector() { ASSERT(empty()); }
1421
1422 void collectCommandBuffer(priv::SecondaryCommandBuffer &&commandBuffer);
1423 void collectCommandBuffer(VulkanSecondaryCommandBuffer &&commandBuffer);
1424 void releaseCommandBuffers();
1425
empty()1426 bool empty() const { return mCollectedCommandBuffers.empty(); }
1427
1428 private:
1429 std::vector<VulkanSecondaryCommandBuffer> mCollectedCommandBuffers;
1430 };
1431
1432 struct CommandsState
1433 {
1434 std::vector<VkSemaphore> waitSemaphores;
1435 std::vector<VkPipelineStageFlags> waitSemaphoreStageMasks;
1436 PrimaryCommandBuffer primaryCommands;
1437 SecondaryCommandBufferCollector secondaryCommands;
1438 };
1439
1440 // How the ImageHelper object is being used by the renderpass
1441 enum class RenderPassUsage
1442 {
1443 // Attached to the render target of the current renderpass commands. It could be read/write or
1444 // read only access.
1445 RenderTargetAttachment,
1446 // This is special case of RenderTargetAttachment where the render target access is read only.
1447 // Right now it is only tracked for depth stencil attachment
1448 DepthReadOnlyAttachment,
1449 StencilReadOnlyAttachment,
1450 // This is special case of RenderTargetAttachment where the render target access is formed
1451 // feedback loop. Right now it is only tracked for depth stencil attachment
1452 DepthFeedbackLoop,
1453 StencilFeedbackLoop,
1454 // Attached to the texture sampler of the current renderpass commands
1455 ColorTextureSampler,
1456 DepthTextureSampler,
1457 StencilTextureSampler,
1458 // Fragment shading rate attachment
1459 FragmentShadingRateReadOnlyAttachment,
1460
1461 InvalidEnum,
1462 EnumCount = InvalidEnum,
1463 };
1464 using RenderPassUsageFlags = angle::PackedEnumBitSet<RenderPassUsage, uint16_t>;
1465 constexpr RenderPassUsageFlags kDepthStencilReadOnlyBits = RenderPassUsageFlags(
1466 {RenderPassUsage::DepthReadOnlyAttachment, RenderPassUsage::StencilReadOnlyAttachment});
1467 constexpr RenderPassUsageFlags kDepthStencilFeedbackModeBits = RenderPassUsageFlags(
1468 {RenderPassUsage::DepthFeedbackLoop, RenderPassUsage::StencilFeedbackLoop});
1469
1470 // The following are used to help track the state of an invalidated attachment.
1471 // This value indicates an "infinite" CmdCount that is not valid for comparing
1472 constexpr uint32_t kInfiniteCmdCount = 0xFFFFFFFF;
1473
1474 // CommandBufferHelperCommon and derivatives OutsideRenderPassCommandBufferHelper and
1475 // RenderPassCommandBufferHelper wrap the outside/inside render pass secondary command buffers,
1476 // together with other information such as barriers to issue before the command buffer, tracking of
1477 // resource usages, etc.
1478 class CommandBufferHelperCommon : angle::NonCopyable
1479 {
1480 public:
1481 void bufferWrite(Context *context,
1482 VkAccessFlags writeAccessType,
1483 PipelineStage writeStage,
1484 BufferHelper *buffer);
1485
1486 void bufferWrite(Context *context,
1487 VkAccessFlags writeAccessType,
1488 const gl::ShaderBitSet &writeShaderStages,
1489 BufferHelper *buffer);
1490
1491 void bufferRead(Context *context,
1492 VkAccessFlags readAccessType,
1493 PipelineStage readStage,
1494 BufferHelper *buffer);
1495
1496 void bufferRead(Context *context,
1497 VkAccessFlags readAccessType,
1498 const gl::ShaderBitSet &readShaderStages,
1499 BufferHelper *buffer);
1500
usesBuffer(const BufferHelper & buffer)1501 bool usesBuffer(const BufferHelper &buffer) const
1502 {
1503 return buffer.usedByCommandBuffer(mQueueSerial);
1504 }
1505
usesBufferForWrite(const BufferHelper & buffer)1506 bool usesBufferForWrite(const BufferHelper &buffer) const
1507 {
1508 return buffer.writtenByCommandBuffer(mQueueSerial);
1509 }
1510
getAndResetHasHostVisibleBufferWrite()1511 bool getAndResetHasHostVisibleBufferWrite()
1512 {
1513 bool hostBufferWrite = mIsAnyHostVisibleBufferWritten;
1514 mIsAnyHostVisibleBufferWritten = false;
1515 return hostBufferWrite;
1516 }
1517
1518 void executeBarriers(Renderer *renderer, CommandsState *commandsState);
1519
1520 // The markOpen and markClosed functions are to aid in proper use of the *CommandBufferHelper.
1521 // saw invalid use due to threading issues that can be easily caught by marking when it's safe
1522 // (open) to write to the command buffer.
1523 #if !defined(ANGLE_ENABLE_ASSERTS)
markOpen()1524 void markOpen() {}
markClosed()1525 void markClosed() {}
1526 #endif
1527
setHasShaderStorageOutput()1528 void setHasShaderStorageOutput() { mHasShaderStorageOutput = true; }
hasShaderStorageOutput()1529 bool hasShaderStorageOutput() const { return mHasShaderStorageOutput; }
1530
hasGLMemoryBarrierIssued()1531 bool hasGLMemoryBarrierIssued() const { return mHasGLMemoryBarrierIssued; }
1532
retainResource(Resource * resource)1533 void retainResource(Resource *resource) { resource->setQueueSerial(mQueueSerial); }
1534
retainResourceForWrite(ReadWriteResource * writeResource)1535 void retainResourceForWrite(ReadWriteResource *writeResource)
1536 {
1537 writeResource->setWriteQueueSerial(mQueueSerial);
1538 }
1539
1540 // Update image with this command buffer's queueSerial. If VkEvent is enabled, image's current
1541 // event is also updated with this command's event.
1542 void retainImageWithEvent(Context *context, ImageHelper *image);
1543
1544 // Returns true if event already existed in this command buffer.
hasSetEventPendingFlush(const RefCountedEvent & event)1545 bool hasSetEventPendingFlush(const RefCountedEvent &event) const
1546 {
1547 ASSERT(event.valid());
1548 return mRefCountedEvents.getEvent(event.getEventStage()) == event;
1549 }
1550
1551 // Issue VkCmdSetEvent call for events in this command buffer.
1552 template <typename CommandBufferT>
1553 void flushSetEventsImpl(Context *context, CommandBufferT *commandBuffer);
1554
getQueueSerial()1555 const QueueSerial &getQueueSerial() const { return mQueueSerial; }
1556
setAcquireNextImageSemaphore(VkSemaphore semaphore)1557 void setAcquireNextImageSemaphore(VkSemaphore semaphore)
1558 {
1559 ASSERT(semaphore != VK_NULL_HANDLE);
1560 ASSERT(!mAcquireNextImageSemaphore.valid());
1561 mAcquireNextImageSemaphore.setHandle(semaphore);
1562 }
1563
1564 protected:
1565 CommandBufferHelperCommon();
1566 ~CommandBufferHelperCommon();
1567
1568 void initializeImpl();
1569
1570 void resetImpl(ErrorContext *context);
1571
1572 template <class DerivedT>
1573 angle::Result attachCommandPoolImpl(ErrorContext *context, SecondaryCommandPool *commandPool);
1574 template <class DerivedT, bool kIsRenderPassBuffer>
1575 angle::Result detachCommandPoolImpl(ErrorContext *context,
1576 SecondaryCommandPool **commandPoolOut);
1577 template <class DerivedT>
1578 void releaseCommandPoolImpl();
1579
1580 template <class DerivedT>
1581 void attachAllocatorImpl(SecondaryCommandMemoryAllocator *allocator);
1582 template <class DerivedT>
1583 SecondaryCommandMemoryAllocator *detachAllocatorImpl();
1584
1585 template <class DerivedT>
1586 void assertCanBeRecycledImpl();
1587
1588 void bufferWriteImpl(Context *context,
1589 VkAccessFlags writeAccessType,
1590 VkPipelineStageFlags writePipelineStageFlags,
1591 PipelineStage writeStage,
1592 BufferHelper *buffer);
1593
1594 void bufferReadImpl(Context *context,
1595 VkAccessFlags readAccessType,
1596 VkPipelineStageFlags readPipelineStageFlags,
1597 PipelineStage readStage,
1598 BufferHelper *buffer);
1599
1600 void imageReadImpl(Context *context,
1601 VkImageAspectFlags aspectFlags,
1602 ImageLayout imageLayout,
1603 BarrierType barrierType,
1604 ImageHelper *image);
1605
1606 void imageWriteImpl(Context *context,
1607 gl::LevelIndex level,
1608 uint32_t layerStart,
1609 uint32_t layerCount,
1610 VkImageAspectFlags aspectFlags,
1611 ImageLayout imageLayout,
1612 BarrierType barrierType,
1613 ImageHelper *image);
1614
1615 void updateImageLayoutAndBarrier(Context *context,
1616 ImageHelper *image,
1617 VkImageAspectFlags aspectFlags,
1618 ImageLayout imageLayout,
1619 BarrierType barrierType);
1620
1621 void addCommandDiagnosticsCommon(std::ostringstream *out);
1622
1623 // Allocator used by this class.
1624 SecondaryCommandBlockAllocator mCommandAllocator;
1625
1626 // Barriers to be executed before the command buffer.
1627 PipelineBarrierArray mPipelineBarriers;
1628 EventBarrierArray mEventBarriers;
1629
1630 // The command pool *CommandBufferHelper::mCommandBuffer is allocated from. Only used with
1631 // Vulkan secondary command buffers (as opposed to ANGLE's SecondaryCommandBuffer).
1632 SecondaryCommandPool *mCommandPool;
1633
1634 // Whether the command buffers contains any draw/dispatch calls that possibly output data
1635 // through storage buffers and images. This is used to determine whether glMemoryBarrier*
1636 // should flush the command buffer.
1637 bool mHasShaderStorageOutput;
1638 // Whether glMemoryBarrier has been called while commands are recorded in this command buffer.
1639 // This is used to know when to check and potentially flush the command buffer if storage
1640 // buffers and images are used in it.
1641 bool mHasGLMemoryBarrierIssued;
1642
1643 // Tracks resources used in the command buffer.
1644 QueueSerial mQueueSerial;
1645
1646 // Only used for swapChain images
1647 Semaphore mAcquireNextImageSemaphore;
1648
1649 // The list of RefCountedEvents that have be tracked
1650 RefCountedEventArray mRefCountedEvents;
1651 // The list of RefCountedEvents that should be garbage collected when it gets reset.
1652 RefCountedEventCollector mRefCountedEventCollector;
1653
1654 // Check for any buffer write commands recorded for host-visible buffers
1655 bool mIsAnyHostVisibleBufferWritten = false;
1656 };
1657
1658 class SecondaryCommandBufferCollector;
1659
1660 class OutsideRenderPassCommandBufferHelper final : public CommandBufferHelperCommon
1661 {
1662 public:
1663 OutsideRenderPassCommandBufferHelper();
1664 ~OutsideRenderPassCommandBufferHelper();
1665
1666 angle::Result initialize(ErrorContext *context);
1667
1668 angle::Result reset(ErrorContext *context,
1669 SecondaryCommandBufferCollector *commandBufferCollector);
1670
ExecutesInline()1671 static constexpr bool ExecutesInline()
1672 {
1673 return OutsideRenderPassCommandBuffer::ExecutesInline();
1674 }
1675
getCommandBuffer()1676 OutsideRenderPassCommandBuffer &getCommandBuffer() { return mCommandBuffer; }
1677
empty()1678 bool empty() const { return mCommandBuffer.empty(); }
1679
1680 angle::Result attachCommandPool(ErrorContext *context, SecondaryCommandPool *commandPool);
1681 angle::Result detachCommandPool(ErrorContext *context, SecondaryCommandPool **commandPoolOut);
1682 void releaseCommandPool();
1683
1684 void attachAllocator(SecondaryCommandMemoryAllocator *allocator);
1685 SecondaryCommandMemoryAllocator *detachAllocator();
1686
1687 void assertCanBeRecycled();
1688
1689 #if defined(ANGLE_ENABLE_ASSERTS)
markOpen()1690 void markOpen() { mCommandBuffer.open(); }
markClosed()1691 void markClosed() { mCommandBuffer.close(); }
1692 #endif
1693
1694 void imageRead(Context *context,
1695 VkImageAspectFlags aspectFlags,
1696 ImageLayout imageLayout,
1697 ImageHelper *image);
1698
1699 void imageWrite(Context *context,
1700 gl::LevelIndex level,
1701 uint32_t layerStart,
1702 uint32_t layerCount,
1703 VkImageAspectFlags aspectFlags,
1704 ImageLayout imageLayout,
1705 ImageHelper *image);
1706
1707 // Update image with this command buffer's queueSerial.
1708 void retainImage(ImageHelper *image);
1709
1710 // Call SetEvent and have image's current event pointing to it.
1711 void trackImageWithEvent(Context *context, ImageHelper *image);
1712
1713 // Issues SetEvent calls to the command buffer.
flushSetEvents(Context * context)1714 void flushSetEvents(Context *context) { flushSetEventsImpl(context, &mCommandBuffer); }
1715 // Clean up event garbage. Note that ImageHelper object may still holding reference count to it,
1716 // so the event itself will not gets destroyed until the last refCount goes away.
1717 void collectRefCountedEventsGarbage(RefCountedEventsGarbageRecycler *garbageRecycler);
1718
getRefCountedEventCollector()1719 RefCountedEventCollector *getRefCountedEventCollector() { return &mRefCountedEventCollector; }
1720
1721 angle::Result flushToPrimary(Context *context, CommandsState *commandsState);
1722
setGLMemoryBarrierIssued()1723 void setGLMemoryBarrierIssued()
1724 {
1725 if (!mCommandBuffer.empty())
1726 {
1727 mHasGLMemoryBarrierIssued = true;
1728 }
1729 }
1730
1731 std::string getCommandDiagnostics();
1732
setQueueSerial(SerialIndex index,Serial serial)1733 void setQueueSerial(SerialIndex index, Serial serial)
1734 {
1735 mQueueSerial = QueueSerial(index, serial);
1736 }
1737
1738 private:
1739 angle::Result initializeCommandBuffer(ErrorContext *context);
1740 angle::Result endCommandBuffer(ErrorContext *context);
1741
1742 OutsideRenderPassCommandBuffer mCommandBuffer;
1743 bool mIsCommandBufferEnded = false;
1744
1745 friend class CommandBufferHelperCommon;
1746 };
1747
1748 enum class ImagelessFramebuffer
1749 {
1750 No,
1751 Yes,
1752 };
1753
1754 enum class ClearTextureMode
1755 {
1756 FullClear,
1757 PartialClear,
1758 };
1759
1760 enum class RenderPassSource
1761 {
1762 DefaultFramebuffer,
1763 FramebufferObject,
1764 InternalUtils,
1765 };
1766
1767 class RenderPassFramebuffer : angle::NonCopyable
1768 {
1769 public:
1770 RenderPassFramebuffer() = default;
~RenderPassFramebuffer()1771 ~RenderPassFramebuffer() { mInitialFramebuffer.release(); }
1772
1773 RenderPassFramebuffer &operator=(RenderPassFramebuffer &&other)
1774 {
1775 mInitialFramebuffer.setHandle(other.mInitialFramebuffer.release());
1776 std::swap(mImageViews, other.mImageViews);
1777 mWidth = other.mWidth;
1778 mHeight = other.mHeight;
1779 mLayers = other.mLayers;
1780 mIsImageless = other.mIsImageless;
1781 mIsDefault = other.mIsDefault;
1782 return *this;
1783 }
1784
1785 void reset();
1786
setFramebuffer(ErrorContext * context,Framebuffer && initialFramebuffer,FramebufferAttachmentsVector<VkImageView> && imageViews,uint32_t width,uint32_t height,uint32_t layers,ImagelessFramebuffer imagelessFramebuffer,RenderPassSource source)1787 void setFramebuffer(ErrorContext *context,
1788 Framebuffer &&initialFramebuffer,
1789 FramebufferAttachmentsVector<VkImageView> &&imageViews,
1790 uint32_t width,
1791 uint32_t height,
1792 uint32_t layers,
1793 ImagelessFramebuffer imagelessFramebuffer,
1794 RenderPassSource source)
1795 {
1796 // Framebuffers are mutually exclusive with dynamic rendering.
1797 ASSERT(initialFramebuffer.valid() != context->getFeatures().preferDynamicRendering.enabled);
1798 mInitialFramebuffer = std::move(initialFramebuffer);
1799 mImageViews = std::move(imageViews);
1800 mWidth = width;
1801 mHeight = height;
1802 mLayers = layers;
1803 mIsImageless = imagelessFramebuffer == ImagelessFramebuffer::Yes;
1804 mIsDefault = source == RenderPassSource::DefaultFramebuffer;
1805 }
1806
isImageless()1807 bool isImageless() const { return mIsImageless; }
isDefault()1808 bool isDefault() const { return mIsDefault; }
getFramebuffer()1809 const Framebuffer &getFramebuffer() const { return mInitialFramebuffer; }
needsNewFramebufferWithResolveAttachments()1810 bool needsNewFramebufferWithResolveAttachments() const { return !mInitialFramebuffer.valid(); }
getLayers()1811 uint32_t getLayers() const { return mLayers; }
1812
1813 // Helpers to determine if a resolve attachment already exists
hasColorResolveAttachment(size_t colorIndexGL)1814 bool hasColorResolveAttachment(size_t colorIndexGL)
1815 {
1816 const size_t viewIndex = kColorResolveAttachmentBegin + colorIndexGL;
1817 return viewIndex < mImageViews.size() && mImageViews[viewIndex] != VK_NULL_HANDLE;
1818 }
hasDepthStencilResolveAttachment()1819 bool hasDepthStencilResolveAttachment()
1820 {
1821 return mImageViews[kDepthStencilResolveAttachment] != VK_NULL_HANDLE;
1822 }
1823
1824 // Add a resolve attachment. This is only called through glBlitFramebuffer, as other cases
1825 // where resolve attachments are implicitly added already include the resolve attachment when
1826 // initially populating mImageViews.
addColorResolveAttachment(size_t colorIndexGL,VkImageView view)1827 void addColorResolveAttachment(size_t colorIndexGL, VkImageView view)
1828 {
1829 addResolveAttachment(kColorResolveAttachmentBegin + colorIndexGL, view);
1830 }
addDepthStencilResolveAttachment(VkImageView view)1831 void addDepthStencilResolveAttachment(VkImageView view)
1832 {
1833 addResolveAttachment(kDepthStencilResolveAttachment, view);
1834 }
1835
1836 // Prepare for rendering by creating a new framebuffer because the initial framebuffer is not
1837 // valid (due to added resolve attachments). This is called when the render pass is finalized.
1838 angle::Result packResolveViewsAndCreateFramebuffer(ErrorContext *context,
1839 const RenderPass &renderPass,
1840 Framebuffer *framebufferOut);
1841
1842 // Prepare for rendering using the initial imageless framebuffer.
1843 void packResolveViewsForRenderPassBegin(VkRenderPassAttachmentBeginInfo *beginInfoOut);
1844
1845 // For use with dynamic rendering.
getUnpackedImageViews()1846 const FramebufferAttachmentsVector<VkImageView> &getUnpackedImageViews() const
1847 {
1848 return mImageViews;
1849 }
1850
1851 // Packs views in a contiguous list.
1852 //
1853 // It can be used before creating a framebuffer, or when starting a render pass with an
1854 // imageless framebuffer.
1855 static void PackViews(FramebufferAttachmentsVector<VkImageView> *views);
1856
1857 static constexpr size_t kColorResolveAttachmentBegin = gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 2;
1858 static constexpr size_t kDepthStencilResolveAttachment =
1859 gl::IMPLEMENTATION_MAX_DRAW_BUFFERS * 2 + 2;
1860
1861 private:
1862 void addResolveAttachment(size_t viewIndex, VkImageView view);
1863 void packResolveViews();
1864
1865 // The following is the framebuffer object that was used to start the render pass. If the
1866 // resolve attachments have not been modified, the same framebuffer object can be used.
1867 // Otherwise a temporary framebuffer object is created when the render pass is closed. This
1868 // inefficiency is removed with VK_KHR_dynamic_rendering when supported.
1869 Framebuffer mInitialFramebuffer;
1870
1871 // The first gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 2 attachments are laid out as follows:
1872 //
1873 // - Color attachments, if any
1874 // - Depth/stencil attachment, if any
1875 // - Fragment shading rate attachment, if any
1876 // - Padding if needed
1877 //
1878 // Starting from index gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 2, there are potentially another
1879 // gl::IMPLEMENTATION_MAX_DRAW_BUFFERS + 1 resolve attachments. However, these are not packed
1880 // (with gaps per missing attachment, and depth/stencil resolve is last). This allow more
1881 // resolve attachments to be added by optimizing calls to glBlitFramebuffer. When the render
1882 // pass is closed, the resolve attachments are packed.
1883 FramebufferAttachmentsVector<VkImageView> mImageViews = {};
1884
1885 uint32_t mWidth = 0;
1886 uint32_t mHeight = 0;
1887 uint32_t mLayers = 0;
1888
1889 // Whether this is an imageless framebuffer. Currently, window surface and UtilsVk framebuffers
1890 // aren't imageless, unless imageless framebuffers aren't supported altogether.
1891 bool mIsImageless = false;
1892 // Whether this is the default framebuffer (i.e. corresponding to the window surface).
1893 bool mIsDefault = false;
1894 };
1895
1896 class RenderPassCommandBufferHelper final : public CommandBufferHelperCommon
1897 {
1898 public:
1899 RenderPassCommandBufferHelper();
1900 ~RenderPassCommandBufferHelper();
1901
1902 angle::Result initialize(ErrorContext *context);
1903
1904 angle::Result reset(ErrorContext *context,
1905 SecondaryCommandBufferCollector *commandBufferCollector);
1906
ExecutesInline()1907 static constexpr bool ExecutesInline() { return RenderPassCommandBuffer::ExecutesInline(); }
1908
getCommandBuffer()1909 RenderPassCommandBuffer &getCommandBuffer()
1910 {
1911 return mCommandBuffers[mCurrentSubpassCommandBufferIndex];
1912 }
1913
empty()1914 bool empty() const { return mCommandBuffers[0].empty(); }
1915
1916 angle::Result attachCommandPool(ErrorContext *context, SecondaryCommandPool *commandPool);
1917 void detachCommandPool(SecondaryCommandPool **commandPoolOut);
1918 void releaseCommandPool();
1919
1920 void attachAllocator(SecondaryCommandMemoryAllocator *allocator);
1921 SecondaryCommandMemoryAllocator *detachAllocator();
1922
1923 void assertCanBeRecycled();
1924
1925 #if defined(ANGLE_ENABLE_ASSERTS)
markOpen()1926 void markOpen() { getCommandBuffer().open(); }
markClosed()1927 void markClosed() { getCommandBuffer().close(); }
1928 #endif
1929
1930 void imageRead(ContextVk *contextVk,
1931 VkImageAspectFlags aspectFlags,
1932 ImageLayout imageLayout,
1933 ImageHelper *image);
1934
1935 void imageWrite(ContextVk *contextVk,
1936 gl::LevelIndex level,
1937 uint32_t layerStart,
1938 uint32_t layerCount,
1939 VkImageAspectFlags aspectFlags,
1940 ImageLayout imageLayout,
1941 ImageHelper *image);
1942
1943 void colorImagesDraw(gl::LevelIndex level,
1944 uint32_t layerStart,
1945 uint32_t layerCount,
1946 ImageHelper *image,
1947 ImageHelper *resolveImage,
1948 UniqueSerial imageSiblingSerial,
1949 PackedAttachmentIndex packedAttachmentIndex);
1950 void depthStencilImagesDraw(gl::LevelIndex level,
1951 uint32_t layerStart,
1952 uint32_t layerCount,
1953 ImageHelper *image,
1954 ImageHelper *resolveImage,
1955 UniqueSerial imageSiblingSerial);
1956 void fragmentShadingRateImageRead(ImageHelper *image);
1957
1958 bool usesImage(const ImageHelper &image) const;
1959 bool startedAndUsesImageWithBarrier(const ImageHelper &image) const;
1960
1961 angle::Result flushToPrimary(Context *context,
1962 CommandsState *commandsState,
1963 const RenderPass &renderPass,
1964 VkFramebuffer framebufferOverride);
1965
started()1966 bool started() const { return mRenderPassStarted; }
1967
1968 // Finalize the layout if image has any deferred layout transition.
1969 void finalizeImageLayout(Context *context,
1970 const ImageHelper *image,
1971 UniqueSerial imageSiblingSerial);
1972
1973 angle::Result beginRenderPass(ContextVk *contextVk,
1974 RenderPassFramebuffer &&framebuffer,
1975 const gl::Rectangle &renderArea,
1976 const RenderPassDesc &renderPassDesc,
1977 const AttachmentOpsArray &renderPassAttachmentOps,
1978 const PackedAttachmentCount colorAttachmentCount,
1979 const PackedAttachmentIndex depthStencilAttachmentIndex,
1980 const PackedClearValuesArray &clearValues,
1981 const QueueSerial &queueSerial,
1982 RenderPassCommandBuffer **commandBufferOut);
1983
1984 angle::Result endRenderPass(ContextVk *contextVk);
1985
1986 angle::Result nextSubpass(ContextVk *contextVk, RenderPassCommandBuffer **commandBufferOut);
1987
1988 void beginTransformFeedback(size_t validBufferCount,
1989 const VkBuffer *counterBuffers,
1990 const VkDeviceSize *counterBufferOffsets,
1991 bool rebindBuffers);
1992
1993 void endTransformFeedback();
1994
1995 void invalidateRenderPassColorAttachment(const gl::State &state,
1996 size_t colorIndexGL,
1997 PackedAttachmentIndex attachmentIndex,
1998 const gl::Rectangle &invalidateArea);
1999 void invalidateRenderPassDepthAttachment(const gl::DepthStencilState &dsState,
2000 const gl::Rectangle &invalidateArea);
2001 void invalidateRenderPassStencilAttachment(const gl::DepthStencilState &dsState,
2002 GLuint framebufferStencilSize,
2003 const gl::Rectangle &invalidateArea);
2004
2005 void updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,
2006 const VkClearValue &colorClearValue);
2007 void updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,
2008 const VkClearValue &clearValue);
2009
getRenderArea()2010 const gl::Rectangle &getRenderArea() const { return mRenderArea; }
2011
2012 // If render pass is started with a small render area due to a small scissor, and if a new
2013 // larger scissor is specified, grow the render area to accommodate it.
2014 void growRenderArea(ContextVk *contextVk, const gl::Rectangle &newRenderArea);
2015
2016 void resumeTransformFeedback();
2017 void pauseTransformFeedback();
isTransformFeedbackStarted()2018 bool isTransformFeedbackStarted() const { return mValidTransformFeedbackBufferCount > 0; }
isTransformFeedbackActiveUnpaused()2019 bool isTransformFeedbackActiveUnpaused() const { return mIsTransformFeedbackActiveUnpaused; }
2020
getAndResetCounter()2021 uint32_t getAndResetCounter()
2022 {
2023 uint32_t count = mCounter;
2024 mCounter = 0;
2025 return count;
2026 }
2027
getFramebuffer()2028 RenderPassFramebuffer &getFramebuffer() { return mFramebuffer; }
getFramebuffer()2029 const RenderPassFramebuffer &getFramebuffer() const { return mFramebuffer; }
2030
2031 void onColorAccess(PackedAttachmentIndex packedAttachmentIndex, ResourceAccess access);
2032 void onDepthAccess(ResourceAccess access);
2033 void onStencilAccess(ResourceAccess access);
2034
hasAnyColorAccess(PackedAttachmentIndex packedAttachmentIndex)2035 bool hasAnyColorAccess(PackedAttachmentIndex packedAttachmentIndex)
2036 {
2037 ASSERT(packedAttachmentIndex < mColorAttachmentsCount);
2038 return mColorAttachments[packedAttachmentIndex].hasAnyAccess();
2039 }
hasAnyDepthAccess()2040 bool hasAnyDepthAccess() { return mDepthAttachment.hasAnyAccess(); }
hasAnyStencilAccess()2041 bool hasAnyStencilAccess() { return mStencilAttachment.hasAnyAccess(); }
2042
2043 void addColorResolveAttachment(size_t colorIndexGL,
2044 ImageHelper *image,
2045 VkImageView view,
2046 gl::LevelIndex level,
2047 uint32_t layerStart,
2048 uint32_t layerCount,
2049 UniqueSerial imageSiblingSerial);
2050 void addDepthStencilResolveAttachment(ImageHelper *image,
2051 VkImageView view,
2052 VkImageAspectFlags aspects,
2053 gl::LevelIndex level,
2054 uint32_t layerStart,
2055 uint32_t layerCount,
2056 UniqueSerial imageSiblingSerial);
2057
hasDepthWriteOrClear()2058 bool hasDepthWriteOrClear() const
2059 {
2060 return mDepthAttachment.hasWriteAccess() ||
2061 mAttachmentOps[mDepthStencilAttachmentIndex].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR;
2062 }
2063
hasStencilWriteOrClear()2064 bool hasStencilWriteOrClear() const
2065 {
2066 return mStencilAttachment.hasWriteAccess() ||
2067 mAttachmentOps[mDepthStencilAttachmentIndex].stencilLoadOp ==
2068 VK_ATTACHMENT_LOAD_OP_CLEAR;
2069 }
2070
getRenderPassDesc()2071 const RenderPassDesc &getRenderPassDesc() const { return mRenderPassDesc; }
getAttachmentOps()2072 const AttachmentOpsArray &getAttachmentOps() const { return mAttachmentOps; }
2073
setFramebufferFetchMode(FramebufferFetchMode framebufferFetchMode)2074 void setFramebufferFetchMode(FramebufferFetchMode framebufferFetchMode)
2075 {
2076 mRenderPassDesc.setFramebufferFetchMode(framebufferFetchMode);
2077 }
2078
setImageOptimizeForPresent(ImageHelper * image)2079 void setImageOptimizeForPresent(ImageHelper *image) { mImageOptimizeForPresent = image; }
isImageOptimizedForPresent(const ImageHelper * image)2080 bool isImageOptimizedForPresent(const ImageHelper *image) const
2081 {
2082 return mImageOptimizeForPresent == image;
2083 }
2084
setGLMemoryBarrierIssued()2085 void setGLMemoryBarrierIssued()
2086 {
2087 if (mRenderPassStarted)
2088 {
2089 mHasGLMemoryBarrierIssued = true;
2090 }
2091 }
2092 std::string getCommandDiagnostics();
2093
2094 // Readonly depth stencil mode and feedback loop mode
2095 void updateDepthReadOnlyMode(RenderPassUsageFlags dsUsageFlags);
2096 void updateStencilReadOnlyMode(RenderPassUsageFlags dsUsageFlags);
2097 void updateDepthStencilReadOnlyMode(RenderPassUsageFlags dsUsageFlags,
2098 VkImageAspectFlags dsAspectFlags);
2099
2100 void collectRefCountedEventsGarbage(Renderer *renderer,
2101 RefCountedEventsGarbageRecycler *garbageRecycler);
2102
2103 void updatePerfCountersForDynamicRenderingInstance(ErrorContext *context,
2104 angle::VulkanPerfCounters *countersOut);
2105
isDefault()2106 bool isDefault() const { return mFramebuffer.isDefault(); }
2107
2108 private:
getSubpassCommandBufferCount()2109 uint32_t getSubpassCommandBufferCount() const { return mCurrentSubpassCommandBufferIndex + 1; }
2110
2111 angle::Result initializeCommandBuffer(ErrorContext *context);
2112 angle::Result beginRenderPassCommandBuffer(ContextVk *contextVk);
2113 angle::Result endRenderPassCommandBuffer(ContextVk *contextVk);
2114
getRenderPassWriteCommandCount()2115 uint32_t getRenderPassWriteCommandCount()
2116 {
2117 // All subpasses are chained (no subpasses running in parallel), so the cmd count can be
2118 // considered continuous among subpasses.
2119 return mPreviousSubpassesCmdCount + getCommandBuffer().getRenderPassWriteCommandCount();
2120 }
2121
2122 void updateStartedRenderPassWithDepthStencilMode(RenderPassAttachment *resolveAttachment,
2123 bool renderPassHasWriteOrClear,
2124 RenderPassUsageFlags dsUsageFlags,
2125 RenderPassUsage readOnlyAttachmentUsage);
2126
2127 // We can't determine the image layout at the renderpass start time since their full usage
2128 // aren't known until later time. We finalize the layout when either ImageHelper object is
2129 // released or when renderpass ends.
2130 void finalizeColorImageLayout(Context *context,
2131 ImageHelper *image,
2132 PackedAttachmentIndex packedAttachmentIndex,
2133 bool isResolveImage);
2134 void finalizeColorImageLoadStore(Context *context, PackedAttachmentIndex packedAttachmentIndex);
2135 void finalizeDepthStencilImageLayout(Context *context);
2136 void finalizeDepthStencilResolveImageLayout(Context *context);
2137 void finalizeDepthStencilLoadStore(Context *context);
2138
2139 void finalizeColorImageLayoutAndLoadStore(Context *context,
2140 PackedAttachmentIndex packedAttachmentIndex);
2141 void finalizeDepthStencilImageLayoutAndLoadStore(Context *context);
2142 void finalizeFragmentShadingRateImageLayout(Context *context);
2143
2144 // When using Vulkan secondary command buffers, each subpass must be recorded in a separate
2145 // command buffer. Currently ANGLE produces render passes with at most 2 subpasses.
2146 static constexpr size_t kMaxSubpassCount = 2;
2147 std::array<RenderPassCommandBuffer, kMaxSubpassCount> mCommandBuffers;
2148 uint32_t mCurrentSubpassCommandBufferIndex;
2149
2150 // RenderPass state
2151 uint32_t mCounter;
2152 RenderPassDesc mRenderPassDesc;
2153 AttachmentOpsArray mAttachmentOps;
2154 RenderPassFramebuffer mFramebuffer;
2155 gl::Rectangle mRenderArea;
2156 PackedClearValuesArray mClearValues;
2157 bool mRenderPassStarted;
2158
2159 // Transform feedback state
2160 gl::TransformFeedbackBuffersArray<VkBuffer> mTransformFeedbackCounterBuffers;
2161 gl::TransformFeedbackBuffersArray<VkDeviceSize> mTransformFeedbackCounterBufferOffsets;
2162 uint32_t mValidTransformFeedbackBufferCount;
2163 bool mRebindTransformFeedbackBuffers;
2164 bool mIsTransformFeedbackActiveUnpaused;
2165
2166 // State tracking for whether to optimize the storeOp to DONT_CARE
2167 uint32_t mPreviousSubpassesCmdCount;
2168
2169 // Keep track of the depth/stencil attachment index
2170 PackedAttachmentIndex mDepthStencilAttachmentIndex;
2171
2172 // Array size of mColorAttachments
2173 PackedAttachmentCount mColorAttachmentsCount;
2174 // Attached render target images. Color and depth resolve images always come last.
2175 PackedRenderPassAttachmentArray mColorAttachments;
2176 PackedRenderPassAttachmentArray mColorResolveAttachments;
2177
2178 RenderPassAttachment mDepthAttachment;
2179 RenderPassAttachment mDepthResolveAttachment;
2180
2181 RenderPassAttachment mStencilAttachment;
2182 RenderPassAttachment mStencilResolveAttachment;
2183
2184 RenderPassAttachment mFragmentShadingRateAtachment;
2185
2186 // This is last renderpass before present and this is the image that will be presented. We can
2187 // use final layout of the render pass to transition it to the presentable layout. With dynamic
2188 // rendering, the barrier is recorded after the pass without needing an outside render pass
2189 // command buffer.
2190 ImageHelper *mImageOptimizeForPresent;
2191 ImageLayout mImageOptimizeForPresentOriginalLayout;
2192
2193 // The list of VkEvents copied from RefCountedEventArray
2194 EventArray mVkEventArray;
2195
2196 friend class CommandBufferHelperCommon;
2197 };
2198
2199 // The following class helps support both Vulkan and ANGLE secondary command buffers by
2200 // encapsulating their differences.
2201 template <typename CommandBufferHelperT>
2202 class CommandBufferRecycler
2203 {
2204 public:
CommandBufferRecycler()2205 CommandBufferRecycler() { mCommandBufferHelperFreeList.reserve(8); }
2206 ~CommandBufferRecycler() = default;
2207
2208 void onDestroy();
2209
2210 angle::Result getCommandBufferHelper(ErrorContext *context,
2211 SecondaryCommandPool *commandPool,
2212 SecondaryCommandMemoryAllocator *commandsAllocator,
2213 CommandBufferHelperT **commandBufferHelperOut);
2214
2215 void recycleCommandBufferHelper(CommandBufferHelperT **commandBuffer);
2216
2217 private:
2218 angle::SimpleMutex mMutex;
2219 std::vector<CommandBufferHelperT *> mCommandBufferHelperFreeList;
2220 };
2221
2222 // The source of update to an ImageHelper
2223 enum class UpdateSource
2224 {
2225 // Clear an image subresource.
2226 Clear,
2227 ClearPartial,
2228 // Clear only the emulated channels of the subresource. This operation is more expensive than
2229 // Clear, and so is only used for emulated color formats and only for external images. Color
2230 // only because depth or stencil clear is already per channel, so Clear works for them.
2231 // External only because they may contain data that needs to be preserved. Additionally, this
2232 // is a one-time only clear. Once the emulated channels are cleared, ANGLE ensures that they
2233 // remain untouched.
2234 ClearEmulatedChannelsOnly,
2235 // When an image with emulated channels is invalidated, a clear may be restaged to keep the
2236 // contents of the emulated channels defined. This is given a dedicated enum value, so it can
2237 // be removed if the invalidate is undone at the end of the render pass.
2238 ClearAfterInvalidate,
2239 // The source of the copy is a buffer.
2240 Buffer,
2241 // The source of the copy is an image.
2242 Image,
2243 };
2244
2245 enum class ApplyImageUpdate
2246 {
2247 ImmediatelyInUnlockedTailCall,
2248 Immediately,
2249 Defer,
2250 };
2251
2252 constexpr VkImageAspectFlagBits IMAGE_ASPECT_DEPTH_STENCIL =
2253 static_cast<VkImageAspectFlagBits>(VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
2254
2255 bool FormatHasNecessaryFeature(Renderer *renderer,
2256 angle::FormatID formatID,
2257 VkImageTiling tilingMode,
2258 VkFormatFeatureFlags featureBits);
2259
2260 bool CanCopyWithTransfer(Renderer *renderer,
2261 VkImageUsageFlags srcUsage,
2262 angle::FormatID dstFormatID,
2263 VkImageTiling dstTilingMode);
2264
2265 class ImageViewHelper;
2266 class ImageHelper final : public Resource, public angle::Subject
2267 {
2268 public:
2269 ImageHelper();
2270 ~ImageHelper() override;
2271
2272 angle::Result init(ErrorContext *context,
2273 gl::TextureType textureType,
2274 const VkExtent3D &extents,
2275 const Format &format,
2276 GLint samples,
2277 VkImageUsageFlags usage,
2278 gl::LevelIndex firstLevel,
2279 uint32_t mipLevels,
2280 uint32_t layerCount,
2281 bool isRobustResourceInitEnabled,
2282 bool hasProtectedContent);
2283 angle::Result initFromCreateInfo(ErrorContext *context,
2284 const VkImageCreateInfo &requestedCreateInfo,
2285 VkMemoryPropertyFlags memoryPropertyFlags);
2286 angle::Result copyToBufferOneOff(ErrorContext *context,
2287 BufferHelper *stagingBuffer,
2288 VkBufferImageCopy copyRegion);
2289 angle::Result initMSAASwapchain(ErrorContext *context,
2290 gl::TextureType textureType,
2291 const VkExtent3D &extents,
2292 bool rotatedAspectRatio,
2293 angle::FormatID intendedFormatID,
2294 angle::FormatID actualFormatID,
2295 GLint samples,
2296 VkImageUsageFlags usage,
2297 gl::LevelIndex firstLevel,
2298 uint32_t mipLevels,
2299 uint32_t layerCount,
2300 bool isRobustResourceInitEnabled,
2301 bool hasProtectedContent);
2302 angle::Result initExternal(ErrorContext *context,
2303 gl::TextureType textureType,
2304 const VkExtent3D &extents,
2305 angle::FormatID intendedFormatID,
2306 angle::FormatID actualFormatID,
2307 GLint samples,
2308 VkImageUsageFlags usage,
2309 VkImageCreateFlags additionalCreateFlags,
2310 ImageLayout initialLayout,
2311 const void *externalImageCreateInfo,
2312 gl::LevelIndex firstLevel,
2313 uint32_t mipLevels,
2314 uint32_t layerCount,
2315 bool isRobustResourceInitEnabled,
2316 bool hasProtectedContent,
2317 YcbcrConversionDesc conversionDesc,
2318 const void *compressionControl);
2319 VkResult initMemory(ErrorContext *context,
2320 const MemoryProperties &memoryProperties,
2321 VkMemoryPropertyFlags flags,
2322 VkMemoryPropertyFlags excludedFlags,
2323 const VkMemoryRequirements *memoryRequirements,
2324 const bool allocateDedicatedMemory,
2325 MemoryAllocationType allocationType,
2326 VkMemoryPropertyFlags *flagsOut,
2327 VkDeviceSize *sizeOut);
2328 angle::Result initMemoryAndNonZeroFillIfNeeded(ErrorContext *context,
2329 bool hasProtectedContent,
2330 const MemoryProperties &memoryProperties,
2331 VkMemoryPropertyFlags flags,
2332 MemoryAllocationType allocationType);
2333 angle::Result initExternalMemory(ErrorContext *context,
2334 const MemoryProperties &memoryProperties,
2335 const VkMemoryRequirements &memoryRequirements,
2336 uint32_t extraAllocationInfoCount,
2337 const void **extraAllocationInfo,
2338 DeviceQueueIndex currentDeviceQueueIndex,
2339 VkMemoryPropertyFlags flags);
2340
2341 static constexpr VkImageUsageFlags kDefaultImageViewUsageFlags = 0;
2342 angle::Result initLayerImageView(ErrorContext *context,
2343 gl::TextureType textureType,
2344 VkImageAspectFlags aspectMask,
2345 const gl::SwizzleState &swizzleMap,
2346 ImageView *imageViewOut,
2347 LevelIndex baseMipLevelVk,
2348 uint32_t levelCount,
2349 uint32_t baseArrayLayer,
2350 uint32_t layerCount) const;
2351 angle::Result initLayerImageViewWithUsage(ErrorContext *context,
2352 gl::TextureType textureType,
2353 VkImageAspectFlags aspectMask,
2354 const gl::SwizzleState &swizzleMap,
2355 ImageView *imageViewOut,
2356 LevelIndex baseMipLevelVk,
2357 uint32_t levelCount,
2358 uint32_t baseArrayLayer,
2359 uint32_t layerCount,
2360 VkImageUsageFlags imageUsageFlags) const;
2361 angle::Result initLayerImageViewWithYuvModeOverride(ErrorContext *context,
2362 gl::TextureType textureType,
2363 VkImageAspectFlags aspectMask,
2364 const gl::SwizzleState &swizzleMap,
2365 ImageView *imageViewOut,
2366 LevelIndex baseMipLevelVk,
2367 uint32_t levelCount,
2368 uint32_t baseArrayLayer,
2369 uint32_t layerCount,
2370 gl::YuvSamplingMode yuvSamplingMode,
2371 VkImageUsageFlags imageUsageFlags) const;
2372 angle::Result initReinterpretedLayerImageView(ErrorContext *context,
2373 gl::TextureType textureType,
2374 VkImageAspectFlags aspectMask,
2375 const gl::SwizzleState &swizzleMap,
2376 ImageView *imageViewOut,
2377 LevelIndex baseMipLevelVk,
2378 uint32_t levelCount,
2379 uint32_t baseArrayLayer,
2380 uint32_t layerCount,
2381 VkImageUsageFlags imageUsageFlags,
2382 angle::FormatID imageViewFormat) const;
2383 // Create a 2D[Array] for staging purposes. Used by:
2384 //
2385 // - TextureVk::copySubImageImplWithDraw
2386 // - FramebufferVk::readPixelsImpl
2387 //
2388 angle::Result init2DStaging(ErrorContext *context,
2389 bool hasProtectedContent,
2390 const MemoryProperties &memoryProperties,
2391 const gl::Extents &glExtents,
2392 angle::FormatID intendedFormatID,
2393 angle::FormatID actualFormatID,
2394 VkImageUsageFlags usage,
2395 uint32_t layerCount);
2396 // Create an image for staging purposes. Used by:
2397 //
2398 // - TextureVk::copyAndStageImageData
2399 //
2400 angle::Result initStaging(ErrorContext *context,
2401 bool hasProtectedContent,
2402 const MemoryProperties &memoryProperties,
2403 VkImageType imageType,
2404 const VkExtent3D &extents,
2405 angle::FormatID intendedFormatID,
2406 angle::FormatID actualFormatID,
2407 GLint samples,
2408 VkImageUsageFlags usage,
2409 uint32_t mipLevels,
2410 uint32_t layerCount);
2411 // Create a multisampled image for use as the implicit image in multisampled render to texture
2412 // rendering. If LAZILY_ALLOCATED memory is available, it will prefer that.
2413 angle::Result initImplicitMultisampledRenderToTexture(ErrorContext *context,
2414 bool hasProtectedContent,
2415 const MemoryProperties &memoryProperties,
2416 gl::TextureType textureType,
2417 GLint samples,
2418 const ImageHelper &resolveImage,
2419 const VkExtent3D &multisampleImageExtents,
2420 bool isRobustResourceInitEnabled);
2421
2422 // Helper for initExternal and users to automatically derive the appropriate VkImageCreateInfo
2423 // pNext chain based on the given parameters, and adjust create flags. In some cases, these
2424 // shouldn't be automatically derived, for example when importing images through
2425 // EXT_external_objects and ANGLE_external_objects_flags.
2426 static constexpr uint32_t kImageListFormatCount = 2;
2427 using ImageListFormats = std::array<VkFormat, kImageListFormatCount>;
2428 static const void *DeriveCreateInfoPNext(
2429 ErrorContext *context,
2430 VkImageUsageFlags usage,
2431 angle::FormatID actualFormatID,
2432 const void *pNext,
2433 VkImageFormatListCreateInfoKHR *imageFormatListInfoStorage,
2434 ImageListFormats *imageListFormatsStorage,
2435 VkImageCreateFlags *createFlagsOut);
2436
2437 // Check whether the given format supports the provided flags.
2438 enum class FormatSupportCheck
2439 {
2440 OnlyQuerySuccess,
2441 RequireMultisampling
2442 };
2443 static bool FormatSupportsUsage(Renderer *renderer,
2444 VkFormat format,
2445 VkImageType imageType,
2446 VkImageTiling tilingMode,
2447 VkImageUsageFlags usageFlags,
2448 VkImageCreateFlags createFlags,
2449 void *formatInfoPNext,
2450 void *propertiesPNext,
2451 const FormatSupportCheck formatSupportCheck);
2452
2453 // Image formats used for the creation of imageless framebuffers.
2454 using ImageFormats = angle::FixedVector<VkFormat, kImageListFormatCount>;
getViewFormats()2455 ImageFormats &getViewFormats() { return mViewFormats; }
getViewFormats()2456 const ImageFormats &getViewFormats() const { return mViewFormats; }
2457
2458 // Helper for initExternal and users to extract the view formats of the image from the pNext
2459 // chain in VkImageCreateInfo.
2460 void deriveImageViewFormatFromCreateInfoPNext(VkImageCreateInfo &imageInfo,
2461 ImageFormats &formatOut);
2462
2463 // Release the underlying VkImage object for garbage collection.
2464 void releaseImage(Renderer *renderer);
2465 // Similar to releaseImage, but also notify all contexts in the same share group to stop
2466 // accessing to it.
2467 void releaseImageFromShareContexts(Renderer *renderer,
2468 ContextVk *contextVk,
2469 UniqueSerial imageSiblingSerial);
2470 void finalizeImageLayoutInShareContexts(Renderer *renderer,
2471 ContextVk *contextVk,
2472 UniqueSerial imageSiblingSerial);
2473
2474 void releaseStagedUpdates(Renderer *renderer);
2475
valid()2476 bool valid() const { return mImage.valid(); }
2477
2478 VkImageAspectFlags getAspectFlags() const;
2479 // True if image contains both depth & stencil aspects
2480 bool isCombinedDepthStencilFormat() const;
2481 void destroy(Renderer *renderer);
release(Renderer * renderer)2482 void release(Renderer *renderer) { releaseImage(renderer); }
2483
2484 void init2DWeakReference(ErrorContext *context,
2485 VkImage handle,
2486 const gl::Extents &glExtents,
2487 bool rotatedAspectRatio,
2488 angle::FormatID intendedFormatID,
2489 angle::FormatID actualFormatID,
2490 VkImageCreateFlags createFlags,
2491 VkImageUsageFlags usage,
2492 GLint samples,
2493 bool isRobustResourceInitEnabled);
2494 void resetImageWeakReference();
2495
getImage()2496 const Image &getImage() const { return mImage; }
getDeviceMemory()2497 const DeviceMemory &getDeviceMemory() const { return mDeviceMemory; }
getAllocation()2498 const Allocation &getAllocation() const { return mVmaAllocation; }
2499
getVkImageCreateInfo()2500 const VkImageCreateInfo &getVkImageCreateInfo() const { return mVkImageCreateInfo; }
setTilingMode(VkImageTiling tilingMode)2501 void setTilingMode(VkImageTiling tilingMode) { mTilingMode = tilingMode; }
getTilingMode()2502 VkImageTiling getTilingMode() const { return mTilingMode; }
getCreateFlags()2503 VkImageCreateFlags getCreateFlags() const { return mCreateFlags; }
getUsage()2504 VkImageUsageFlags getUsage() const { return mUsage; }
getType()2505 VkImageType getType() const { return mImageType; }
getExtents()2506 const VkExtent3D &getExtents() const { return mExtents; }
2507 const VkExtent3D getRotatedExtents() const;
getLayerCount()2508 uint32_t getLayerCount() const
2509 {
2510 ASSERT(valid());
2511 return mLayerCount;
2512 }
getLevelCount()2513 uint32_t getLevelCount() const
2514 {
2515 ASSERT(valid());
2516 return mLevelCount;
2517 }
getIntendedFormatID()2518 angle::FormatID getIntendedFormatID() const
2519 {
2520 ASSERT(valid());
2521 return mIntendedFormatID;
2522 }
getIntendedFormat()2523 const angle::Format &getIntendedFormat() const
2524 {
2525 ASSERT(valid());
2526 return angle::Format::Get(mIntendedFormatID);
2527 }
getActualFormatID()2528 angle::FormatID getActualFormatID() const
2529 {
2530 ASSERT(valid());
2531 return mActualFormatID;
2532 }
getActualVkFormat(const Renderer * renderer)2533 VkFormat getActualVkFormat(const Renderer *renderer) const
2534 {
2535 ASSERT(valid());
2536 return GetVkFormatFromFormatID(renderer, mActualFormatID);
2537 }
getActualFormat()2538 const angle::Format &getActualFormat() const
2539 {
2540 ASSERT(valid());
2541 return angle::Format::Get(mActualFormatID);
2542 }
2543 bool hasEmulatedImageChannels() const;
2544 bool hasEmulatedDepthChannel() const;
2545 bool hasEmulatedStencilChannel() const;
hasEmulatedImageFormat()2546 bool hasEmulatedImageFormat() const { return mActualFormatID != mIntendedFormatID; }
2547 bool hasInefficientlyEmulatedImageFormat() const;
getSamples()2548 GLint getSamples() const { return mSamples; }
2549
getImageSerial()2550 ImageSerial getImageSerial() const
2551 {
2552 ASSERT(valid() && mImageSerial.valid());
2553 return mImageSerial;
2554 }
2555
2556 void setCurrentImageLayout(Renderer *renderer, ImageLayout newLayout);
getCurrentImageLayout()2557 ImageLayout getCurrentImageLayout() const { return mCurrentLayout; }
2558 VkImageLayout getCurrentLayout() const;
getBarrierQueueSerial()2559 const QueueSerial &getBarrierQueueSerial() const { return mBarrierQueueSerial; }
2560
2561 gl::Extents getLevelExtents(LevelIndex levelVk) const;
2562 // Helper function to calculate the extents of a render target created for a certain mip of the
2563 // image.
2564 gl::Extents getLevelExtents2D(LevelIndex levelVk) const;
2565 gl::Extents getRotatedLevelExtents2D(LevelIndex levelVk) const;
2566
2567 bool isDepthOrStencil() const;
2568
2569 void setRenderPassUsageFlag(RenderPassUsage flag);
2570 void clearRenderPassUsageFlag(RenderPassUsage flag);
2571 void resetRenderPassUsageFlags();
2572 bool hasRenderPassUsageFlag(RenderPassUsage flag) const;
2573 bool hasAnyRenderPassUsageFlags() const;
2574 bool usedByCurrentRenderPassAsAttachmentAndSampler(RenderPassUsage textureSamplerUsage) const;
2575
2576 static void Copy(Renderer *renderer,
2577 ImageHelper *srcImage,
2578 ImageHelper *dstImage,
2579 const gl::Offset &srcOffset,
2580 const gl::Offset &dstOffset,
2581 const gl::Extents ©Size,
2582 const VkImageSubresourceLayers &srcSubresources,
2583 const VkImageSubresourceLayers &dstSubresources,
2584 OutsideRenderPassCommandBuffer *commandBuffer);
2585
2586 static angle::Result CopyImageSubData(const gl::Context *context,
2587 ImageHelper *srcImage,
2588 GLint srcLevel,
2589 GLint srcX,
2590 GLint srcY,
2591 GLint srcZ,
2592 ImageHelper *dstImage,
2593 GLint dstLevel,
2594 GLint dstX,
2595 GLint dstY,
2596 GLint dstZ,
2597 GLsizei srcWidth,
2598 GLsizei srcHeight,
2599 GLsizei srcDepth);
2600
2601 // Generate mipmap from level 0 into the rest of the levels with blit.
2602 angle::Result generateMipmapsWithBlit(ContextVk *contextVk,
2603 LevelIndex baseLevel,
2604 LevelIndex maxLevel);
2605
2606 // Resolve this image into a destination image. This image should be in the TransferSrc layout.
2607 // The destination image is automatically transitioned into TransferDst.
2608 void resolve(ImageHelper *dst,
2609 const VkImageResolve ®ion,
2610 OutsideRenderPassCommandBuffer *commandBuffer);
2611
2612 // Data staging
2613 void removeSingleSubresourceStagedUpdates(ContextVk *contextVk,
2614 gl::LevelIndex levelIndexGL,
2615 uint32_t layerIndex,
2616 uint32_t layerCount);
2617 void removeSingleStagedClearAfterInvalidate(gl::LevelIndex levelIndexGL,
2618 uint32_t layerIndex,
2619 uint32_t layerCount);
2620 void removeStagedUpdates(ErrorContext *context,
2621 gl::LevelIndex levelGLStart,
2622 gl::LevelIndex levelGLEnd);
2623
2624 angle::Result stagePartialClear(ContextVk *contextVk,
2625 const gl::Box &clearArea,
2626 const ClearTextureMode clearMode,
2627 gl::TextureType textureType,
2628 uint32_t levelIndex,
2629 uint32_t layerIndex,
2630 uint32_t layerCount,
2631 GLenum type,
2632 const gl::InternalFormat &formatInfo,
2633 const Format &vkFormat,
2634 ImageAccess access,
2635 const uint8_t *data);
2636
2637 angle::Result stageSubresourceUpdateImpl(ContextVk *contextVk,
2638 const gl::ImageIndex &index,
2639 const gl::Extents &glExtents,
2640 const gl::Offset &offset,
2641 const gl::InternalFormat &formatInfo,
2642 const gl::PixelUnpackState &unpack,
2643 GLenum type,
2644 const uint8_t *pixels,
2645 const Format &vkFormat,
2646 ImageAccess access,
2647 const GLuint inputRowPitch,
2648 const GLuint inputDepthPitch,
2649 const GLuint inputSkipBytes,
2650 ApplyImageUpdate applyUpdate,
2651 bool *updateAppliedImmediatelyOut);
2652
2653 angle::Result stageSubresourceUpdate(ContextVk *contextVk,
2654 const gl::ImageIndex &index,
2655 const gl::Extents &glExtents,
2656 const gl::Offset &offset,
2657 const gl::InternalFormat &formatInfo,
2658 const gl::PixelUnpackState &unpack,
2659 GLenum type,
2660 const uint8_t *pixels,
2661 const Format &vkFormat,
2662 ImageAccess access,
2663 ApplyImageUpdate applyUpdate,
2664 bool *updateAppliedImmediatelyOut);
2665
2666 angle::Result stageSubresourceUpdateAndGetData(ContextVk *contextVk,
2667 size_t allocationSize,
2668 const gl::ImageIndex &imageIndex,
2669 const gl::Extents &glExtents,
2670 const gl::Offset &offset,
2671 uint8_t **destData,
2672 angle::FormatID formatID);
2673
2674 angle::Result stageSubresourceUpdateFromFramebuffer(const gl::Context *context,
2675 const gl::ImageIndex &index,
2676 const gl::Rectangle &sourceArea,
2677 const gl::Offset &dstOffset,
2678 const gl::Extents &dstExtent,
2679 const gl::InternalFormat &formatInfo,
2680 ImageAccess access,
2681 FramebufferVk *framebufferVk);
2682
2683 void stageSubresourceUpdateFromImage(RefCounted<ImageHelper> *image,
2684 const gl::ImageIndex &index,
2685 LevelIndex srcMipLevel,
2686 const gl::Offset &destOffset,
2687 const gl::Extents &glExtents,
2688 const VkImageType imageType);
2689
2690 // Takes an image and stages a subresource update for each level of it, including its full
2691 // extent and all its layers, at the specified GL level.
2692 void stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> *image,
2693 gl::LevelIndex baseLevel);
2694
2695 // Stage a clear to an arbitrary value.
2696 void stageClear(const gl::ImageIndex &index,
2697 VkImageAspectFlags aspectFlags,
2698 const VkClearValue &clearValue);
2699
2700 // Stage a clear based on robust resource init.
2701 angle::Result stageRobustResourceClearWithFormat(ContextVk *contextVk,
2702 const gl::ImageIndex &index,
2703 const gl::Extents &glExtents,
2704 const angle::Format &intendedFormat,
2705 const angle::Format &imageFormat);
2706 void stageRobustResourceClear(const gl::ImageIndex &index);
2707
2708 angle::Result stageResourceClearWithFormat(ContextVk *contextVk,
2709 const gl::ImageIndex &index,
2710 const gl::Extents &glExtents,
2711 const angle::Format &intendedFormat,
2712 const angle::Format &imageFormat,
2713 const VkClearValue &clearValue);
2714
2715 // Stage the currently allocated image as updates to base level and on, making this !valid().
2716 // This is used for:
2717 //
2718 // - Mipmap generation, where levelCount is 1 so only the base level is retained
2719 // - Image respecification, where every level (other than those explicitly skipped) is staged
2720 void stageSelfAsSubresourceUpdates(ContextVk *contextVk,
2721 uint32_t levelCount,
2722 gl::TextureType textureType,
2723 const gl::CubeFaceArray<gl::TexLevelMask> &skipLevels);
2724
2725 // Flush staged updates for a single subresource. Can optionally take a parameter to defer
2726 // clears to a subsequent RenderPass load op.
2727 angle::Result flushSingleSubresourceStagedUpdates(ContextVk *contextVk,
2728 gl::LevelIndex levelGL,
2729 uint32_t layer,
2730 uint32_t layerCount,
2731 ClearValuesArray *deferredClears,
2732 uint32_t deferredClearIndex);
2733
2734 // Flushes staged updates to a range of levels and layers from start to (but not including) end.
2735 // Due to the nature of updates (done wholly to a VkImageSubresourceLayers), some unsolicited
2736 // layers may also be updated.
2737 angle::Result flushStagedUpdates(ContextVk *contextVk,
2738 gl::LevelIndex levelGLStart,
2739 gl::LevelIndex levelGLEnd,
2740 uint32_t layerStart,
2741 uint32_t layerEnd,
2742 const gl::CubeFaceArray<gl::TexLevelMask> &skipLevels);
2743
2744 // Creates a command buffer and flushes all staged updates. This is used for one-time
2745 // initialization of resources that we don't expect to accumulate further staged updates, such
2746 // as with renderbuffers or surface images.
2747 angle::Result flushAllStagedUpdates(ContextVk *contextVk);
2748
2749 bool hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,
2750 uint32_t layer,
2751 uint32_t layerCount) const;
2752 bool hasStagedUpdatesInAllocatedLevels() const;
2753 bool hasBufferSourcedStagedUpdatesInAllLevels() const;
2754
2755 bool removeStagedClearUpdatesAndReturnColor(gl::LevelIndex levelGL,
2756 const VkClearColorValue **color);
2757
2758 void recordWriteBarrier(Context *context,
2759 VkImageAspectFlags aspectMask,
2760 ImageLayout newLayout,
2761 gl::LevelIndex levelStart,
2762 uint32_t levelCount,
2763 uint32_t layerStart,
2764 uint32_t layerCount,
2765 OutsideRenderPassCommandBufferHelper *commands);
2766
2767 void recordReadSubresourceBarrier(Context *context,
2768 VkImageAspectFlags aspectMask,
2769 ImageLayout newLayout,
2770 gl::LevelIndex levelStart,
2771 uint32_t levelCount,
2772 uint32_t layerStart,
2773 uint32_t layerCount,
2774 OutsideRenderPassCommandBufferHelper *commands);
2775
recordWriteBarrierOneOff(Renderer * renderer,ImageLayout newLayout,PrimaryCommandBuffer * commandBuffer,VkSemaphore * acquireNextImageSemaphoreOut)2776 void recordWriteBarrierOneOff(Renderer *renderer,
2777 ImageLayout newLayout,
2778 PrimaryCommandBuffer *commandBuffer,
2779 VkSemaphore *acquireNextImageSemaphoreOut)
2780 {
2781 recordBarrierOneOffImpl(renderer, getAspectFlags(), newLayout, mCurrentDeviceQueueIndex,
2782 commandBuffer, acquireNextImageSemaphoreOut);
2783 }
2784
2785 // This function can be used to prevent issuing redundant layout transition commands.
2786 bool isReadBarrierNecessary(Renderer *renderer, ImageLayout newLayout) const;
2787 bool isReadSubresourceBarrierNecessary(ImageLayout newLayout,
2788 gl::LevelIndex levelStart,
2789 uint32_t levelCount,
2790 uint32_t layerStart,
2791 uint32_t layerCount) const;
2792 bool isWriteBarrierNecessary(ImageLayout newLayout,
2793 gl::LevelIndex levelStart,
2794 uint32_t levelCount,
2795 uint32_t layerStart,
2796 uint32_t layerCount) const;
2797
2798 void recordReadBarrier(Context *context,
2799 VkImageAspectFlags aspectMask,
2800 ImageLayout newLayout,
2801 OutsideRenderPassCommandBufferHelper *commands);
2802
isQueueFamilyChangeNeccesary(DeviceQueueIndex newDeviceQueueIndex)2803 bool isQueueFamilyChangeNeccesary(DeviceQueueIndex newDeviceQueueIndex) const
2804 {
2805 return mCurrentDeviceQueueIndex.familyIndex() != newDeviceQueueIndex.familyIndex();
2806 }
2807
2808 void changeLayoutAndQueue(Context *context,
2809 VkImageAspectFlags aspectMask,
2810 ImageLayout newLayout,
2811 DeviceQueueIndex newDeviceQueueIndex,
2812 OutsideRenderPassCommandBuffer *commandBuffer);
2813
2814 // Returns true if barrier has been generated
2815 void updateLayoutAndBarrier(Context *context,
2816 VkImageAspectFlags aspectMask,
2817 ImageLayout newLayout,
2818 BarrierType barrierType,
2819 const QueueSerial &queueSerial,
2820 PipelineBarrierArray *pipelineBarriers,
2821 EventBarrierArray *eventBarriers,
2822 RefCountedEventCollector *eventCollector,
2823 VkSemaphore *semaphoreOut);
2824
2825 // Performs an ownership transfer from an external instance or API.
2826 void acquireFromExternal(Context *context,
2827 DeviceQueueIndex externalQueueIndex,
2828 DeviceQueueIndex newDeviceQueueIndex,
2829 ImageLayout currentLayout,
2830 OutsideRenderPassCommandBuffer *commandBuffer);
2831
2832 // Performs an ownership transfer to an external instance or API.
2833 void releaseToExternal(Context *context,
2834 DeviceQueueIndex externalQueueIndex,
2835 ImageLayout desiredLayout,
2836 OutsideRenderPassCommandBuffer *commandBuffer);
2837
2838 // Returns true if the image is owned by an external API or instance.
isReleasedToExternal()2839 bool isReleasedToExternal() const { return mIsReleasedToExternal; }
2840 // Returns true if the image was sourced from the FOREIGN queue.
isForeignImage()2841 bool isForeignImage() const { return mIsForeignImage; }
2842 // Returns true if the image is owned by a foreign entity.
isReleasedToForeign()2843 bool isReleasedToForeign() const
2844 {
2845 return mCurrentDeviceQueueIndex == kForeignDeviceQueueIndex;
2846 }
2847
2848 // Marks the image as having been used by the FOREIGN queue. On the next barrier, it is
2849 // acquired from the FOREIGN queue again automatically.
2850 VkImageMemoryBarrier releaseToForeign(Renderer *renderer);
2851
getFirstAllocatedLevel()2852 gl::LevelIndex getFirstAllocatedLevel() const
2853 {
2854 ASSERT(valid());
2855 return mFirstAllocatedLevel;
2856 }
2857 gl::LevelIndex getLastAllocatedLevel() const;
2858 LevelIndex toVkLevel(gl::LevelIndex levelIndexGL) const;
2859 gl::LevelIndex toGLLevel(LevelIndex levelIndexVk) const;
2860
2861 angle::Result copyImageDataToBuffer(ContextVk *contextVk,
2862 gl::LevelIndex sourceLevelGL,
2863 uint32_t layerCount,
2864 uint32_t baseLayer,
2865 const gl::Box &sourceArea,
2866 BufferHelper *dstBuffer,
2867 uint8_t **outDataPtr);
2868
2869 angle::Result copySurfaceImageToBuffer(DisplayVk *displayVk,
2870 gl::LevelIndex sourceLevelGL,
2871 uint32_t layerCount,
2872 uint32_t baseLayer,
2873 const gl::Box &sourceArea,
2874 vk::BufferHelper *bufferHelperOut);
2875
2876 angle::Result copyBufferToSurfaceImage(DisplayVk *displayVk,
2877 gl::LevelIndex destLevelGL,
2878 uint32_t layerCount,
2879 uint32_t baseLayer,
2880 const gl::Box &destArea,
2881 vk::BufferHelper *bufferHelper);
2882
2883 static angle::Result GetReadPixelsParams(ContextVk *contextVk,
2884 const gl::PixelPackState &packState,
2885 gl::Buffer *packBuffer,
2886 GLenum format,
2887 GLenum type,
2888 const gl::Rectangle &area,
2889 const gl::Rectangle &clippedArea,
2890 PackPixelsParams *paramsOut,
2891 GLuint *skipBytesOut);
2892
2893 angle::Result readPixelsForGetImage(ContextVk *contextVk,
2894 const gl::PixelPackState &packState,
2895 gl::Buffer *packBuffer,
2896 gl::LevelIndex levelGL,
2897 uint32_t layer,
2898 uint32_t layerCount,
2899 GLenum format,
2900 GLenum type,
2901 void *pixels);
2902
2903 angle::Result readPixelsForCompressedGetImage(ContextVk *contextVk,
2904 const gl::PixelPackState &packState,
2905 gl::Buffer *packBuffer,
2906 gl::LevelIndex levelGL,
2907 uint32_t layer,
2908 uint32_t layerCount,
2909 void *pixels);
2910
2911 angle::Result readPixelsWithCompute(ContextVk *contextVk,
2912 ImageHelper *src,
2913 const PackPixelsParams &packPixelsParams,
2914 const VkOffset3D &srcOffset,
2915 const VkExtent3D &srcExtent,
2916 ptrdiff_t pixelsOffset,
2917 const VkImageSubresourceLayers &srcSubresource);
2918
2919 angle::Result readPixels(ContextVk *contextVk,
2920 const gl::Rectangle &area,
2921 const PackPixelsParams &packPixelsParams,
2922 VkImageAspectFlagBits copyAspectFlags,
2923 gl::LevelIndex levelGL,
2924 uint32_t layer,
2925 void *pixels);
2926
2927 angle::Result calculateBufferInfo(ContextVk *contextVk,
2928 const gl::Extents &glExtents,
2929 const gl::InternalFormat &formatInfo,
2930 const gl::PixelUnpackState &unpack,
2931 GLenum type,
2932 bool is3D,
2933 GLuint *inputRowPitch,
2934 GLuint *inputDepthPitch,
2935 GLuint *inputSkipBytes);
2936
2937 void onRenderPassAttach(const QueueSerial &queueSerial);
2938
2939 // Mark a given subresource as written to. The subresource is identified by [levelStart,
2940 // levelStart + levelCount) and [layerStart, layerStart + layerCount).
2941 void onWrite(gl::LevelIndex levelStart,
2942 uint32_t levelCount,
2943 uint32_t layerStart,
2944 uint32_t layerCount,
2945 VkImageAspectFlags aspectFlags);
hasImmutableSampler()2946 bool hasImmutableSampler() const { return mYcbcrConversionDesc.valid(); }
getExternalFormat()2947 uint64_t getExternalFormat() const { return mYcbcrConversionDesc.getExternalFormat(); }
isYuvResolve()2948 bool isYuvResolve() const { return mYcbcrConversionDesc.getExternalFormat() != 0; }
updateChromaFilter(Renderer * renderer,VkFilter filter)2949 bool updateChromaFilter(Renderer *renderer, VkFilter filter)
2950 {
2951 return mYcbcrConversionDesc.updateChromaFilter(renderer, filter);
2952 }
getYcbcrConversionDesc()2953 const YcbcrConversionDesc &getYcbcrConversionDesc() const { return mYcbcrConversionDesc; }
getY2YConversionDesc()2954 const YcbcrConversionDesc getY2YConversionDesc() const
2955 {
2956 YcbcrConversionDesc y2yDesc = mYcbcrConversionDesc;
2957 y2yDesc.updateConversionModel(VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY);
2958 return y2yDesc;
2959 }
2960
2961 static YcbcrConversionDesc deriveConversionDesc(ErrorContext *context,
2962 angle::FormatID actualFormatID,
2963 angle::FormatID intendedFormatID);
2964
2965 // Used by framebuffer and render pass functions to decide loadOps and invalidate/un-invalidate
2966 // render target contents.
2967 bool hasSubresourceDefinedContent(gl::LevelIndex level,
2968 uint32_t layerIndex,
2969 uint32_t layerCount) const;
2970 bool hasSubresourceDefinedStencilContent(gl::LevelIndex level,
2971 uint32_t layerIndex,
2972 uint32_t layerCount) const;
2973 void invalidateEntireLevelContent(vk::ErrorContext *context, gl::LevelIndex level);
2974 void invalidateSubresourceContent(ContextVk *contextVk,
2975 gl::LevelIndex level,
2976 uint32_t layerIndex,
2977 uint32_t layerCount,
2978 bool *preferToKeepContentsDefinedOut);
2979 void invalidateEntireLevelStencilContent(vk::ErrorContext *context, gl::LevelIndex level);
2980 void invalidateSubresourceStencilContent(ContextVk *contextVk,
2981 gl::LevelIndex level,
2982 uint32_t layerIndex,
2983 uint32_t layerCount,
2984 bool *preferToKeepContentsDefinedOut);
2985 void restoreSubresourceContent(gl::LevelIndex level, uint32_t layerIndex, uint32_t layerCount);
2986 void restoreSubresourceStencilContent(gl::LevelIndex level,
2987 uint32_t layerIndex,
2988 uint32_t layerCount);
2989 angle::Result reformatStagedBufferUpdates(ContextVk *contextVk,
2990 angle::FormatID srcFormatID,
2991 angle::FormatID dstFormatID);
2992 bool hasStagedImageUpdatesWithMismatchedFormat(gl::LevelIndex levelStart,
2993 gl::LevelIndex levelEnd,
2994 angle::FormatID formatID) const;
2995
setAcquireNextImageSemaphore(VkSemaphore semaphore)2996 void setAcquireNextImageSemaphore(VkSemaphore semaphore)
2997 {
2998 ASSERT(semaphore != VK_NULL_HANDLE);
2999 ASSERT(!mAcquireNextImageSemaphore.valid());
3000 mAcquireNextImageSemaphore.setHandle(semaphore);
3001 }
getAcquireNextImageSemaphore()3002 const Semaphore &getAcquireNextImageSemaphore() const { return mAcquireNextImageSemaphore; }
resetAcquireNextImageSemaphore()3003 void resetAcquireNextImageSemaphore() { mAcquireNextImageSemaphore.release(); }
isBackedByExternalMemory()3004 bool isBackedByExternalMemory() const
3005 {
3006 return mMemoryAllocationType == MemoryAllocationType::ImageExternal;
3007 }
3008
3009 angle::Result initializeNonZeroMemory(ErrorContext *context,
3010 bool hasProtectedContent,
3011 VkMemoryPropertyFlags flags,
3012 VkDeviceSize size);
3013
3014 size_t getLevelUpdateCount(gl::LevelIndex level) const;
3015
3016 // Create event if needed and record the event in ImageHelper::mCurrentEvent.
3017 void setCurrentRefCountedEvent(Context *context, RefCountedEventArray *refCountedEventArray);
releaseCurrentRefCountedEvent(Context * context)3018 void releaseCurrentRefCountedEvent(Context *context)
3019 {
3020 // This will also force next barrier use pipelineBarrier
3021 mCurrentEvent.release(context);
3022 mLastNonShaderReadOnlyEvent.release(context);
3023 }
3024 void updatePipelineStageAccessHistory();
3025
3026 private:
3027 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
3028 struct ClearUpdate
3029 {
3030 bool operator==(const ClearUpdate &rhs) const
3031 {
3032 return memcmp(this, &rhs, sizeof(ClearUpdate)) == 0;
3033 }
3034 VkImageAspectFlags aspectFlags;
3035 VkClearValue value;
3036 uint32_t levelIndex;
3037 uint32_t layerIndex;
3038 uint32_t layerCount;
3039 // For ClearEmulatedChannelsOnly, mask of which channels to clear.
3040 VkColorComponentFlags colorMaskFlags;
3041 };
3042 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
3043 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
3044 struct ClearPartialUpdate
3045 {
3046 bool operator==(const ClearPartialUpdate &rhs) const
3047 {
3048 return memcmp(this, &rhs, sizeof(ClearPartialUpdate)) == 0;
3049 }
3050 VkImageAspectFlags aspectFlags;
3051 VkClearValue clearValue;
3052 uint32_t levelIndex;
3053 uint32_t layerIndex;
3054 uint32_t layerCount;
3055 VkOffset3D offset;
3056 VkExtent3D extent;
3057 gl::TextureType textureType;
3058 uint8_t _padding[3];
3059 };
3060 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
3061 struct BufferUpdate
3062 {
3063 BufferHelper *bufferHelper;
3064 VkBufferImageCopy copyRegion;
3065 angle::FormatID formatID;
3066 };
3067 struct ImageUpdate
3068 {
3069 VkImageCopy copyRegion;
3070 angle::FormatID formatID;
3071 };
3072
3073 struct SubresourceUpdate : angle::NonCopyable
3074 {
3075 SubresourceUpdate();
3076 ~SubresourceUpdate();
3077 SubresourceUpdate(RefCounted<BufferHelper> *bufferIn,
3078 BufferHelper *bufferHelperIn,
3079 const VkBufferImageCopy ©Region,
3080 angle::FormatID formatID);
3081 SubresourceUpdate(RefCounted<ImageHelper> *imageIn,
3082 const VkImageCopy ©Region,
3083 angle::FormatID formatID);
3084 SubresourceUpdate(VkImageAspectFlags aspectFlags,
3085 const VkClearValue &clearValue,
3086 const gl::ImageIndex &imageIndex);
3087 SubresourceUpdate(const VkImageAspectFlags aspectFlags,
3088 const VkClearValue &clearValue,
3089 const gl::TextureType textureType,
3090 const uint32_t levelIndex,
3091 const uint32_t layerIndex,
3092 const uint32_t layerCount,
3093 const gl::Box &clearArea);
3094 SubresourceUpdate(VkImageAspectFlags aspectFlags,
3095 const VkClearValue &clearValue,
3096 gl::LevelIndex level,
3097 uint32_t layerIndex,
3098 uint32_t layerCount);
3099 SubresourceUpdate(VkColorComponentFlags colorMaskFlags,
3100 const VkClearColorValue &clearValue,
3101 const gl::ImageIndex &imageIndex);
3102
3103 SubresourceUpdate(const SubresourceUpdate &other);
3104 SubresourceUpdate(SubresourceUpdate &&other);
3105
3106 SubresourceUpdate &operator=(SubresourceUpdate &&other);
3107
3108 void release(Renderer *renderer);
3109
3110 // Returns true if the update's layer range exact matches [layerIndex,
3111 // layerIndex+layerCount) range
3112 bool matchesLayerRange(uint32_t layerIndex, uint32_t layerCount) const;
3113 // Returns true if the update is to any layer within range of [layerIndex,
3114 // layerIndex+layerCount)
3115 bool intersectsLayerRange(uint32_t layerIndex, uint32_t layerCount) const;
3116 void getDestSubresource(uint32_t imageLayerCount,
3117 uint32_t *baseLayerOut,
3118 uint32_t *layerCountOut) const;
3119 VkImageAspectFlags getDestAspectFlags() const;
3120
3121 UpdateSource updateSource;
3122 union
3123 {
3124 ClearUpdate clear;
3125 ClearPartialUpdate clearPartial;
3126 BufferUpdate buffer;
3127 ImageUpdate image;
3128 } data;
3129 union
3130 {
3131 RefCounted<ImageHelper> *image;
3132 RefCounted<BufferHelper> *buffer;
3133 } refCounted;
3134 };
3135 using SubresourceUpdates = std::deque<SubresourceUpdate>;
3136
3137 // Up to 8 layers are tracked per level for whether contents are defined, above which the
3138 // contents are considered unconditionally defined. This handles the more likely scenarios of:
3139 //
3140 // - Single layer framebuffer attachments,
3141 // - Cube map framebuffer attachments,
3142 // - Multi-view rendering.
3143 //
3144 // If there arises a need to optimize an application that invalidates layer >= 8, this can
3145 // easily be raised to 32 to 64 bits. Beyond that, an additional hash map can be used to track
3146 // such subresources.
3147 static constexpr uint32_t kMaxContentDefinedLayerCount = 8;
3148 using LevelContentDefinedMask = angle::BitSet8<kMaxContentDefinedLayerCount>;
3149
3150 void deriveExternalImageTiling(const void *createInfoChain);
3151
3152 // Used to initialize ImageFormats from actual format, with no pNext from a VkImageCreateInfo
3153 // object.
3154 void setImageFormatsFromActualFormat(VkFormat actualFormat, ImageFormats &imageFormatsOut);
3155
3156 // Called from flushStagedUpdates, removes updates that are later superseded by another. This
3157 // cannot be done at the time the updates were staged, as the image is not created (and thus the
3158 // extents are not known).
3159 void removeSupersededUpdates(ContextVk *contextVk, const gl::TexLevelMask skipLevelsAllFaces);
3160
3161 void initImageMemoryBarrierStruct(Renderer *renderer,
3162 VkImageAspectFlags aspectMask,
3163 ImageLayout newLayout,
3164 uint32_t newQueueFamilyIndex,
3165 VkImageMemoryBarrier *imageMemoryBarrier) const;
3166
3167 // Generalized to accept both "primary" and "secondary" command buffers.
3168 template <typename CommandBufferT>
3169 void barrierImpl(Renderer *renderer,
3170 VkImageAspectFlags aspectMask,
3171 ImageLayout newLayout,
3172 DeviceQueueIndex newDeviceQueueIndex,
3173 RefCountedEventCollector *eventCollector,
3174 CommandBufferT *commandBuffer,
3175 VkSemaphore *acquireNextImageSemaphoreOut);
3176
3177 template <typename CommandBufferT>
3178 void recordBarrierImpl(Context *context,
3179 VkImageAspectFlags aspectMask,
3180 ImageLayout newLayout,
3181 DeviceQueueIndex newDeviceQueueIndex,
3182 RefCountedEventCollector *eventCollector,
3183 CommandBufferT *commandBuffer,
3184 VkSemaphore *acquireNextImageSemaphoreOut);
3185
3186 void recordBarrierOneOffImpl(Renderer *renderer,
3187 VkImageAspectFlags aspectMask,
3188 ImageLayout newLayout,
3189 DeviceQueueIndex newDeviceQueueIndex,
3190 PrimaryCommandBuffer *commandBuffer,
3191 VkSemaphore *acquireNextImageSemaphoreOut);
3192
3193 void setSubresourcesWrittenSinceBarrier(gl::LevelIndex levelStart,
3194 uint32_t levelCount,
3195 uint32_t layerStart,
3196 uint32_t layerCount);
3197
3198 void resetSubresourcesWrittenSinceBarrier();
areLevelSubresourcesWrittenWithinMaskRange(uint32_t level,ImageLayerWriteMask & layerMask)3199 bool areLevelSubresourcesWrittenWithinMaskRange(uint32_t level,
3200 ImageLayerWriteMask &layerMask) const
3201 {
3202 return (mSubresourcesWrittenSinceBarrier[level] & layerMask) != 0;
3203 }
3204
3205 // If the image has emulated channels, we clear them once so as not to leave garbage on those
3206 // channels.
3207 VkColorComponentFlags getEmulatedChannelsMask() const;
3208 void stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled, bool isExternalImage);
3209 bool verifyEmulatedClearsAreBeforeOtherUpdates(const SubresourceUpdates &updates);
3210
3211 // Clear either color or depth/stencil based on image format.
3212 void clear(Renderer *renderer,
3213 VkImageAspectFlags aspectFlags,
3214 const VkClearValue &value,
3215 LevelIndex mipLevel,
3216 uint32_t baseArrayLayer,
3217 uint32_t layerCount,
3218 OutsideRenderPassCommandBuffer *commandBuffer);
3219
3220 void clearColor(Renderer *renderer,
3221 const VkClearColorValue &color,
3222 LevelIndex baseMipLevelVk,
3223 uint32_t levelCount,
3224 uint32_t baseArrayLayer,
3225 uint32_t layerCount,
3226 OutsideRenderPassCommandBuffer *commandBuffer);
3227
3228 void clearDepthStencil(Renderer *renderer,
3229 VkImageAspectFlags clearAspectFlags,
3230 const VkClearDepthStencilValue &depthStencil,
3231 LevelIndex baseMipLevelVk,
3232 uint32_t levelCount,
3233 uint32_t baseArrayLayer,
3234 uint32_t layerCount,
3235 OutsideRenderPassCommandBuffer *commandBuffer);
3236
3237 angle::Result clearEmulatedChannels(ContextVk *contextVk,
3238 VkColorComponentFlags colorMaskFlags,
3239 const VkClearValue &value,
3240 LevelIndex mipLevel,
3241 uint32_t baseArrayLayer,
3242 uint32_t layerCount);
3243
3244 angle::Result updateSubresourceOnHost(ErrorContext *context,
3245 ApplyImageUpdate applyUpdate,
3246 const gl::ImageIndex &index,
3247 const gl::Extents &glExtents,
3248 const gl::Offset &offset,
3249 const uint8_t *source,
3250 const GLuint rowPitch,
3251 const GLuint depthPitch,
3252 bool *copiedOut);
3253
3254 // ClearEmulatedChannels updates are expected in the beginning of the level update list. They
3255 // can be processed first and removed. By doing so, if this is the only update for the image,
3256 // an unnecessary layout transition can be avoided.
3257 angle::Result flushStagedClearEmulatedChannelsUpdates(ContextVk *contextVk,
3258 gl::LevelIndex levelGLStart,
3259 gl::LevelIndex levelGLLimit,
3260 bool *otherUpdatesToFlushOut);
3261
3262 // Flushes staged updates to a range of levels and layers from start to end. The updates do not
3263 // include ClearEmulatedChannelsOnly, which are processed in a separate function.
3264 angle::Result flushStagedUpdatesImpl(ContextVk *contextVk,
3265 gl::LevelIndex levelGLStart,
3266 gl::LevelIndex levelGLEnd,
3267 uint32_t layerStart,
3268 uint32_t layerEnd,
3269 const gl::TexLevelMask &skipLevelsAllFaces);
3270
3271 // Limit the input level to the number of levels in subresource update list.
3272 void clipLevelToUpdateListUpperLimit(gl::LevelIndex *level) const;
3273
3274 SubresourceUpdates *getLevelUpdates(gl::LevelIndex level);
3275 const SubresourceUpdates *getLevelUpdates(gl::LevelIndex level) const;
3276
3277 void appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
3278 void prependSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
3279
3280 enum class PruneReason
3281 {
3282 MemoryOptimization,
3283 MinimizeWorkBeforeFlush
3284 };
3285 void pruneSupersededUpdatesForLevel(ContextVk *contextVk,
3286 const gl::LevelIndex level,
3287 const PruneReason reason);
3288
3289 // Whether there are any updates in [start, end).
3290 bool hasStagedUpdatesInLevels(gl::LevelIndex levelStart, gl::LevelIndex levelEnd) const;
3291
3292 // Used only for assertions, these functions verify that
3293 // SubresourceUpdate::refcountedObject::image or buffer references have the correct ref count.
3294 // This is to prevent accidental leaks.
3295 bool validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> *image) const;
3296 bool validateSubresourceUpdateBufferRefConsistent(RefCounted<BufferHelper> *buffer) const;
3297 bool validateSubresourceUpdateRefCountsConsistent() const;
3298
3299 void resetCachedProperties();
3300 void setEntireContentDefined();
3301 void setEntireContentUndefined();
3302 void setContentDefined(LevelIndex levelStart,
3303 uint32_t levelCount,
3304 uint32_t layerStart,
3305 uint32_t layerCount,
3306 VkImageAspectFlags aspectFlags);
3307 void invalidateSubresourceContentImpl(vk::ErrorContext *context,
3308 gl::LevelIndex level,
3309 uint32_t layerIndex,
3310 uint32_t layerCount,
3311 VkImageAspectFlagBits aspect,
3312 LevelContentDefinedMask *contentDefinedMask,
3313 bool *preferToKeepContentsDefinedOut,
3314 bool *layerLimitReachedOut);
3315 void restoreSubresourceContentImpl(gl::LevelIndex level,
3316 uint32_t layerIndex,
3317 uint32_t layerCount,
3318 VkImageAspectFlagBits aspect,
3319 LevelContentDefinedMask *contentDefinedMask);
3320
3321 // Use the following functions to access m*ContentDefined to make sure the correct level index
3322 // is used (i.e. vk::LevelIndex and not gl::LevelIndex).
3323 LevelContentDefinedMask &getLevelContentDefined(LevelIndex level);
3324 LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level);
3325 const LevelContentDefinedMask &getLevelContentDefined(LevelIndex level) const;
3326 const LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level) const;
3327
3328 angle::Result initLayerImageViewImpl(ErrorContext *context,
3329 gl::TextureType textureType,
3330 VkImageAspectFlags aspectMask,
3331 const gl::SwizzleState &swizzleMap,
3332 ImageView *imageViewOut,
3333 LevelIndex baseMipLevelVk,
3334 uint32_t levelCount,
3335 uint32_t baseArrayLayer,
3336 uint32_t layerCount,
3337 VkFormat imageFormat,
3338 VkImageUsageFlags usageFlags,
3339 gl::YuvSamplingMode yuvSamplingMode) const;
3340
3341 angle::Result readPixelsImpl(ContextVk *contextVk,
3342 const gl::Rectangle &area,
3343 const PackPixelsParams &packPixelsParams,
3344 VkImageAspectFlagBits copyAspectFlags,
3345 gl::LevelIndex levelGL,
3346 uint32_t layer,
3347 void *pixels);
3348
3349 angle::Result packReadPixelBuffer(ContextVk *contextVk,
3350 const gl::Rectangle &area,
3351 const PackPixelsParams &packPixelsParams,
3352 const angle::Format &readFormat,
3353 const angle::Format &aspectFormat,
3354 const uint8_t *readPixelBuffer,
3355 gl::LevelIndex levelGL,
3356 void *pixels);
3357
3358 bool canCopyWithTransformForReadPixels(const PackPixelsParams &packPixelsParams,
3359 const VkExtent3D &srcExtent,
3360 const angle::Format *readFormat,
3361 ptrdiff_t pixelsOffset);
3362 bool canCopyWithComputeForReadPixels(const PackPixelsParams &packPixelsParams,
3363 const VkExtent3D &srcExtent,
3364 const angle::Format *readFormat,
3365 ptrdiff_t pixelsOffset);
3366
3367 // Returns true if source data and actual image format matches except color space differences.
isDataFormatMatchForCopy(angle::FormatID srcDataFormatID)3368 bool isDataFormatMatchForCopy(angle::FormatID srcDataFormatID) const
3369 {
3370 if (mActualFormatID == srcDataFormatID)
3371 {
3372 return true;
3373 }
3374 angle::FormatID actualFormatLinear =
3375 getActualFormat().isSRGB ? ConvertToLinear(mActualFormatID) : mActualFormatID;
3376 angle::FormatID srcDataFormatIDLinear = angle::Format::Get(srcDataFormatID).isSRGB
3377 ? ConvertToLinear(srcDataFormatID)
3378 : srcDataFormatID;
3379 return actualFormatLinear == srcDataFormatIDLinear;
3380 }
3381
3382 static constexpr int kThreadholdForComputeTransCoding = 4096;
shouldUseComputeForTransCoding(LevelIndex level)3383 bool shouldUseComputeForTransCoding(LevelIndex level)
3384 {
3385 // Using texture size instead of extent size to simplify the problem.
3386 gl::Extents ext = getLevelExtents2D(level);
3387 return ext.width * ext.height > kThreadholdForComputeTransCoding;
3388 }
3389
3390 void adjustLayerRange(const SubresourceUpdates &levelUpdates,
3391 uint32_t *layerStart,
3392 uint32_t *layerEnd);
3393
3394 // Vulkan objects.
3395 Image mImage;
3396 DeviceMemory mDeviceMemory;
3397 Allocation mVmaAllocation;
3398
3399 // Image properties.
3400 VkImageCreateInfo mVkImageCreateInfo;
3401 VkImageType mImageType;
3402 VkImageTiling mTilingMode;
3403 VkImageCreateFlags mCreateFlags;
3404 VkImageUsageFlags mUsage;
3405 // For Android swapchain images, the Vulkan VkImage must be "rotated". However, most of ANGLE
3406 // uses non-rotated extents (i.e. the way the application views the extents--see "Introduction
3407 // to Android rotation and pre-rotation" in "SurfaceVk.cpp"). Thus, mExtents are non-rotated.
3408 // The rotated extents are also stored along with a bool that indicates if the aspect ratio is
3409 // different between the rotated and non-rotated extents.
3410 VkExtent3D mExtents;
3411 bool mRotatedAspectRatio;
3412 angle::FormatID mIntendedFormatID;
3413 angle::FormatID mActualFormatID;
3414 GLint mSamples;
3415 ImageSerial mImageSerial;
3416
3417 // Current state.
3418 ImageLayout mCurrentLayout;
3419 DeviceQueueIndex mCurrentDeviceQueueIndex;
3420 // For optimizing transition between different shader readonly layouts
3421 ImageLayout mLastNonShaderReadOnlyLayout;
3422 VkPipelineStageFlags mCurrentShaderReadStageMask;
3423 // Track how it is being used by current open renderpass.
3424 RenderPassUsageFlags mRenderPassUsageFlags;
3425 // The QueueSerial that associated with the last barrier.
3426 QueueSerial mBarrierQueueSerial;
3427
3428 // The current refCounted event. When barrier or layout change is needed, we should wait for
3429 // this event.
3430 RefCountedEvent mCurrentEvent;
3431 RefCountedEvent mLastNonShaderReadOnlyEvent;
3432 // Track history of pipeline stages being used. Each bit represents the fragment or
3433 // attachment usage, i.e, a bit is set if the layout indicates a fragment or colorAttachment
3434 // pipeline stages, and bit is 0 if used by other stages like vertex shader or compute or
3435 // transfer. Every use of image update the usage history by shifting the bitfields left and new
3436 // bit that represents the new pipeline usage is added to the right most bit. This way we track
3437 // if there is any non-fragment pipeline usage during the past usages (i.e., the window of
3438 // usage history is number of bits in mPipelineStageAccessHeuristic). This information provides
3439 // heuristic for making decisions if a VkEvent should be used to track the operation.
3440 PipelineStageAccessHeuristic mPipelineStageAccessHeuristic;
3441
3442 // Whether ANGLE currently has ownership of this resource or it's released to external.
3443 bool mIsReleasedToExternal;
3444 // Whether this image came from a foreign source.
3445 bool mIsForeignImage;
3446
3447 // For imported images
3448 YcbcrConversionDesc mYcbcrConversionDesc;
3449
3450 // The first level that has been allocated. For mutable textures, this should be same as
3451 // mBaseLevel since we always reallocate VkImage based on mBaseLevel change. But for immutable
3452 // textures, we always allocate from level 0 regardless of mBaseLevel change.
3453 gl::LevelIndex mFirstAllocatedLevel;
3454
3455 // Cached properties.
3456 uint32_t mLayerCount;
3457 uint32_t mLevelCount;
3458
3459 // Image formats used for imageless framebuffers.
3460 ImageFormats mViewFormats;
3461
3462 std::vector<SubresourceUpdates> mSubresourceUpdates;
3463 VkDeviceSize mTotalStagedBufferUpdateSize;
3464
3465 // Optimization for repeated clear with the same value. If this pointer is not null, the entire
3466 // image it has been cleared to the specified clear value. If another clear call is made with
3467 // the exact same clear value, we will detect and skip the clear call.
3468 Optional<ClearUpdate> mCurrentSingleClearValue;
3469
3470 // Track whether each subresource has defined contents. Up to 8 layers are tracked per level,
3471 // above which the contents are considered unconditionally defined.
3472 gl::TexLevelArray<LevelContentDefinedMask> mContentDefined;
3473 gl::TexLevelArray<LevelContentDefinedMask> mStencilContentDefined;
3474
3475 // Used for memory allocation tracking.
3476 // Memory size allocated for the image in the memory during the initialization.
3477 VkDeviceSize mAllocationSize;
3478 // Type of the memory allocation for the image (Image or ImageExternal).
3479 MemoryAllocationType mMemoryAllocationType;
3480 // Memory type index used for the allocation. It can be used to determine the heap index.
3481 uint32_t mMemoryTypeIndex;
3482
3483 // Only used for swapChain images. This is set when an image is acquired and is waited on
3484 // by the next submission (which uses this image), at which point it is released.
3485 Semaphore mAcquireNextImageSemaphore;
3486
3487 // Used to track subresource writes per level/layer. This can help parallelize writes to
3488 // different levels or layers of the image, such as data uploads.
3489 // See comment on kMaxParallelLayerWrites.
3490 gl::TexLevelArray<ImageLayerWriteMask> mSubresourcesWrittenSinceBarrier;
3491 };
3492
usesImage(const ImageHelper & image)3493 ANGLE_INLINE bool RenderPassCommandBufferHelper::usesImage(const ImageHelper &image) const
3494 {
3495 return image.usedByCommandBuffer(mQueueSerial);
3496 }
3497
startedAndUsesImageWithBarrier(const ImageHelper & image)3498 ANGLE_INLINE bool RenderPassCommandBufferHelper::startedAndUsesImageWithBarrier(
3499 const ImageHelper &image) const
3500 {
3501 return mRenderPassStarted && image.getBarrierQueueSerial() == mQueueSerial;
3502 }
3503
3504 // A vector of image views, such as one per level or one per layer.
3505 using ImageViewVector = std::vector<ImageView>;
3506
3507 // A vector of vector of image views. Primary index is layer, secondary index is level.
3508 using LayerLevelImageViewVector = std::vector<ImageViewVector>;
3509
3510 using SubresourceImageViewMap = angle::HashMap<ImageSubresourceRange, std::unique_ptr<ImageView>>;
3511
3512 // Address mode for layers: only possible to access either all layers, or up to
3513 // IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS layers. This enum uses 0 for all layers and the rest of
3514 // the values conveniently alias the number of layers.
3515 enum LayerMode
3516 {
3517 All,
3518 _1,
3519 _2,
3520 _3,
3521 _4,
3522 };
3523 static_assert(gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS == 4, "Update LayerMode");
3524
3525 LayerMode GetLayerMode(const vk::ImageHelper &image, uint32_t layerCount);
3526
3527 // The colorspace of image views derived from angle::ColorspaceState
3528 enum class ImageViewColorspace
3529 {
3530 Invalid = 0,
3531 Linear,
3532 SRGB,
3533 };
3534
3535 class ImageViewHelper final : angle::NonCopyable
3536 {
3537 public:
3538 ImageViewHelper();
3539 ImageViewHelper(ImageViewHelper &&other);
3540 ~ImageViewHelper();
3541
3542 void init(Renderer *renderer);
3543 void destroy(VkDevice device);
3544
getLinearReadImageView()3545 const ImageView &getLinearReadImageView() const
3546 {
3547 return getValidReadViewImpl(mPerLevelRangeLinearReadImageViews);
3548 }
getSRGBReadImageView()3549 const ImageView &getSRGBReadImageView() const
3550 {
3551 return getValidReadViewImpl(mPerLevelRangeSRGBReadImageViews);
3552 }
getLinearCopyImageView()3553 const ImageView &getLinearCopyImageView() const
3554 {
3555 return mIsCopyImageViewShared ? getValidReadViewImpl(mPerLevelRangeLinearReadImageViews)
3556 : getValidReadViewImpl(mPerLevelRangeLinearCopyImageViews);
3557 }
getSRGBCopyImageView()3558 const ImageView &getSRGBCopyImageView() const
3559 {
3560 return mIsCopyImageViewShared ? getValidReadViewImpl(mPerLevelRangeSRGBReadImageViews)
3561 : getValidReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
3562 }
getStencilReadImageView()3563 const ImageView &getStencilReadImageView() const
3564 {
3565 return getValidReadViewImpl(mPerLevelRangeStencilReadImageViews);
3566 }
3567
getReadImageView()3568 const ImageView &getReadImageView() const
3569 {
3570 return mReadColorspace == ImageViewColorspace::Linear
3571 ? getReadViewImpl(mPerLevelRangeLinearReadImageViews)
3572 : getReadViewImpl(mPerLevelRangeSRGBReadImageViews);
3573 }
3574
getCopyImageView()3575 const ImageView &getCopyImageView() const
3576 {
3577 return mReadColorspace == ImageViewColorspace::Linear ? getLinearCopyImageView()
3578 : getSRGBCopyImageView();
3579 }
3580
getSamplerExternal2DY2YEXTImageView()3581 ImageView &getSamplerExternal2DY2YEXTImageView()
3582 {
3583 return getReadViewImpl(mPerLevelRangeSamplerExternal2DY2YEXTImageViews);
3584 }
3585
getSamplerExternal2DY2YEXTImageView()3586 const ImageView &getSamplerExternal2DY2YEXTImageView() const
3587 {
3588 return getValidReadViewImpl(mPerLevelRangeSamplerExternal2DY2YEXTImageViews);
3589 }
3590
getFragmentShadingRateImageView()3591 const ImageView &getFragmentShadingRateImageView() const
3592 {
3593 return mFragmentShadingRateImageView;
3594 }
3595
3596 // Used when initialized RenderTargets.
hasStencilReadImageView()3597 bool hasStencilReadImageView() const
3598 {
3599 return mCurrentBaseMaxLevelHash < mPerLevelRangeStencilReadImageViews.size()
3600 ? mPerLevelRangeStencilReadImageViews[mCurrentBaseMaxLevelHash].valid()
3601 : false;
3602 }
3603
hasCopyImageView()3604 bool hasCopyImageView() const
3605 {
3606 if ((mReadColorspace == ImageViewColorspace::Linear &&
3607 mCurrentBaseMaxLevelHash < mPerLevelRangeLinearCopyImageViews.size()) ||
3608 (mReadColorspace == ImageViewColorspace::SRGB &&
3609 mCurrentBaseMaxLevelHash < mPerLevelRangeSRGBCopyImageViews.size()))
3610 {
3611 return getCopyImageView().valid();
3612 }
3613 else
3614 {
3615 return false;
3616 }
3617 }
3618
3619 // For applications that frequently switch a texture's max level, and make no other changes to
3620 // the texture, change the currently-used max level, and potentially create new "read views"
3621 // for the new max-level
3622 angle::Result initReadViews(ContextVk *contextVk,
3623 gl::TextureType viewType,
3624 const ImageHelper &image,
3625 const gl::SwizzleState &formatSwizzle,
3626 const gl::SwizzleState &readSwizzle,
3627 LevelIndex baseLevel,
3628 uint32_t levelCount,
3629 uint32_t baseLayer,
3630 uint32_t layerCount,
3631 bool requiresSRGBViews,
3632 VkImageUsageFlags imageUsageFlags);
3633
3634 // Creates a storage view with all layers of the level.
3635 angle::Result getLevelStorageImageView(ErrorContext *context,
3636 gl::TextureType viewType,
3637 const ImageHelper &image,
3638 LevelIndex levelVk,
3639 uint32_t layer,
3640 VkImageUsageFlags imageUsageFlags,
3641 angle::FormatID formatID,
3642 const ImageView **imageViewOut);
3643
3644 // Creates a storage view with a single layer of the level.
3645 angle::Result getLevelLayerStorageImageView(ErrorContext *context,
3646 const ImageHelper &image,
3647 LevelIndex levelVk,
3648 uint32_t layer,
3649 VkImageUsageFlags imageUsageFlags,
3650 angle::FormatID formatID,
3651 const ImageView **imageViewOut);
3652
3653 // Creates a draw view with a range of layers of the level.
3654 angle::Result getLevelDrawImageView(ErrorContext *context,
3655 const ImageHelper &image,
3656 LevelIndex levelVk,
3657 uint32_t layer,
3658 uint32_t layerCount,
3659 const ImageView **imageViewOut);
3660
3661 // Creates a draw view with a single layer of the level.
3662 angle::Result getLevelLayerDrawImageView(ErrorContext *context,
3663 const ImageHelper &image,
3664 LevelIndex levelVk,
3665 uint32_t layer,
3666 const ImageView **imageViewOut);
3667
3668 // Creates a depth-xor-stencil view with a range of layers of the level.
3669 angle::Result getLevelDepthOrStencilImageView(ErrorContext *context,
3670 const ImageHelper &image,
3671 LevelIndex levelVk,
3672 uint32_t layer,
3673 uint32_t layerCount,
3674 VkImageAspectFlagBits aspect,
3675 const ImageView **imageViewOut);
3676
3677 // Creates a depth-xor-stencil view with a single layer of the level.
3678 angle::Result getLevelLayerDepthOrStencilImageView(ErrorContext *context,
3679 const ImageHelper &image,
3680 LevelIndex levelVk,
3681 uint32_t layer,
3682 VkImageAspectFlagBits aspect,
3683 const ImageView **imageViewOut);
3684
3685 // Creates a fragment shading rate view.
3686 angle::Result initFragmentShadingRateView(ContextVk *contextVk, ImageHelper *image);
3687
3688 // Return unique Serial for an imageView.
3689 ImageOrBufferViewSubresourceSerial getSubresourceSerial(gl::LevelIndex levelGL,
3690 uint32_t levelCount,
3691 uint32_t layer,
3692 LayerMode layerMode) const;
3693
3694 // Return unique Serial for an imageView for a specific colorspace.
3695 ImageOrBufferViewSubresourceSerial getSubresourceSerialForColorspace(
3696 gl::LevelIndex levelGL,
3697 uint32_t levelCount,
3698 uint32_t layer,
3699 LayerMode layerMode,
3700 ImageViewColorspace readColorspace) const;
3701
3702 ImageSubresourceRange getSubresourceDrawRange(gl::LevelIndex level,
3703 uint32_t layer,
3704 LayerMode layerMode) const;
3705
3706 bool isImageViewGarbageEmpty() const;
3707
3708 void release(Renderer *renderer, const ResourceUse &use);
3709
3710 // Helpers for colorspace state
getColorspaceForRead()3711 ImageViewColorspace getColorspaceForRead() const { return mReadColorspace; }
hasColorspaceOverrideForRead(const ImageHelper & image)3712 bool hasColorspaceOverrideForRead(const ImageHelper &image) const
3713 {
3714 ASSERT(image.valid());
3715 return (!image.getActualFormat().isSRGB &&
3716 mReadColorspace == vk::ImageViewColorspace::SRGB) ||
3717 (image.getActualFormat().isSRGB &&
3718 mReadColorspace == vk::ImageViewColorspace::Linear);
3719 }
3720
hasColorspaceOverrideForWrite(const ImageHelper & image)3721 bool hasColorspaceOverrideForWrite(const ImageHelper &image) const
3722 {
3723 ASSERT(image.valid());
3724 return (!image.getActualFormat().isSRGB &&
3725 mWriteColorspace == vk::ImageViewColorspace::SRGB) ||
3726 (image.getActualFormat().isSRGB &&
3727 mWriteColorspace == vk::ImageViewColorspace::Linear);
3728 }
3729 angle::FormatID getColorspaceOverrideFormatForWrite(angle::FormatID format) const;
updateStaticTexelFetch(const ImageHelper & image,bool staticTexelFetchAccess)3730 void updateStaticTexelFetch(const ImageHelper &image, bool staticTexelFetchAccess) const
3731 {
3732 if (mColorspaceState.hasStaticTexelFetchAccess != staticTexelFetchAccess)
3733 {
3734 mColorspaceState.hasStaticTexelFetchAccess = staticTexelFetchAccess;
3735 updateColorspace(image);
3736 }
3737 }
updateSrgbDecode(const ImageHelper & image,gl::SrgbDecode srgbDecode)3738 void updateSrgbDecode(const ImageHelper &image, gl::SrgbDecode srgbDecode) const
3739 {
3740 if (mColorspaceState.srgbDecode != srgbDecode)
3741 {
3742 mColorspaceState.srgbDecode = srgbDecode;
3743 updateColorspace(image);
3744 }
3745 }
updateSrgbOverride(const ImageHelper & image,gl::SrgbOverride srgbOverride)3746 void updateSrgbOverride(const ImageHelper &image, gl::SrgbOverride srgbOverride) const
3747 {
3748 if (mColorspaceState.srgbOverride != srgbOverride)
3749 {
3750 mColorspaceState.srgbOverride = srgbOverride;
3751 updateColorspace(image);
3752 }
3753 }
updateSrgbWiteControlMode(const ImageHelper & image,gl::SrgbWriteControlMode srgbWriteControl)3754 void updateSrgbWiteControlMode(const ImageHelper &image,
3755 gl::SrgbWriteControlMode srgbWriteControl) const
3756 {
3757 if (mColorspaceState.srgbWriteControl != srgbWriteControl)
3758 {
3759 mColorspaceState.srgbWriteControl = srgbWriteControl;
3760 updateColorspace(image);
3761 }
3762 }
updateEglImageColorspace(const ImageHelper & image,egl::ImageColorspace eglImageColorspace)3763 void updateEglImageColorspace(const ImageHelper &image,
3764 egl::ImageColorspace eglImageColorspace) const
3765 {
3766 if (mColorspaceState.eglImageColorspace != eglImageColorspace)
3767 {
3768 mColorspaceState.eglImageColorspace = eglImageColorspace;
3769 updateColorspace(image);
3770 }
3771 }
3772
3773 private:
getReadImageView()3774 ImageView &getReadImageView()
3775 {
3776 return mReadColorspace == ImageViewColorspace::Linear
3777 ? getReadViewImpl(mPerLevelRangeLinearReadImageViews)
3778 : getReadViewImpl(mPerLevelRangeSRGBReadImageViews);
3779 }
getCopyImageView()3780 ImageView &getCopyImageView()
3781 {
3782 if (mReadColorspace == ImageViewColorspace::Linear)
3783 {
3784 return mIsCopyImageViewShared ? getReadViewImpl(mPerLevelRangeLinearReadImageViews)
3785 : getReadViewImpl(mPerLevelRangeLinearCopyImageViews);
3786 }
3787
3788 return mIsCopyImageViewShared ? getReadViewImpl(mPerLevelRangeSRGBReadImageViews)
3789 : getReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
3790 }
getCopyImageViewStorage()3791 ImageView &getCopyImageViewStorage()
3792 {
3793 return mReadColorspace == ImageViewColorspace::Linear
3794 ? getReadViewImpl(mPerLevelRangeLinearCopyImageViews)
3795 : getReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
3796 }
3797
3798 // Used by public get*ImageView() methods to do proper assert based on vector size and validity
getValidReadViewImpl(const ImageViewVector & imageViewVector)3799 inline const ImageView &getValidReadViewImpl(const ImageViewVector &imageViewVector) const
3800 {
3801 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size() &&
3802 imageViewVector[mCurrentBaseMaxLevelHash].valid());
3803 return imageViewVector[mCurrentBaseMaxLevelHash];
3804 }
3805
3806 // Used by public get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(const ImageViewVector & imageViewVector)3807 inline const ImageView &getReadViewImpl(const ImageViewVector &imageViewVector) const
3808 {
3809 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size());
3810 return imageViewVector[mCurrentBaseMaxLevelHash];
3811 }
3812
3813 // Used by private get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(ImageViewVector & imageViewVector)3814 inline ImageView &getReadViewImpl(ImageViewVector &imageViewVector)
3815 {
3816 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size());
3817 return imageViewVector[mCurrentBaseMaxLevelHash];
3818 }
3819
3820 angle::Result getLevelLayerDrawImageViewImpl(ErrorContext *context,
3821 const ImageHelper &image,
3822 LevelIndex levelVk,
3823 uint32_t layer,
3824 uint32_t layerCount,
3825 ImageView *imageViewOut);
3826 angle::Result getLevelLayerDepthOrStencilImageViewImpl(ErrorContext *context,
3827 const ImageHelper &image,
3828 LevelIndex levelVk,
3829 uint32_t layer,
3830 uint32_t layerCount,
3831 VkImageAspectFlagBits aspect,
3832 ImageView *imageViewOut);
3833
3834 // Creates views with multiple layers and levels.
3835 angle::Result initReadViewsImpl(ContextVk *contextVk,
3836 gl::TextureType viewType,
3837 const ImageHelper &image,
3838 const gl::SwizzleState &formatSwizzle,
3839 const gl::SwizzleState &readSwizzle,
3840 LevelIndex baseLevel,
3841 uint32_t levelCount,
3842 uint32_t baseLayer,
3843 uint32_t layerCount,
3844 VkImageUsageFlags imageUsageFlags);
3845
3846 // Create linear and srgb read views
3847 angle::Result initLinearAndSrgbReadViewsImpl(ContextVk *contextVk,
3848 gl::TextureType viewType,
3849 const ImageHelper &image,
3850 const gl::SwizzleState &formatSwizzle,
3851 const gl::SwizzleState &readSwizzle,
3852 LevelIndex baseLevel,
3853 uint32_t levelCount,
3854 uint32_t baseLayer,
3855 uint32_t layerCount,
3856 VkImageUsageFlags imageUsageFlags);
3857
3858 void updateColorspace(const ImageHelper &image) const;
3859
3860 // For applications that frequently switch a texture's base/max level, and make no other changes
3861 // to the texture, keep track of the currently-used base and max levels, and keep one "read
3862 // view" per each combination. The value stored here is base<<4|max, used to look up the view
3863 // in a vector.
3864 static_assert(gl::IMPLEMENTATION_MAX_TEXTURE_LEVELS <= 16,
3865 "Not enough bits in mCurrentBaseMaxLevelHash");
3866 uint8_t mCurrentBaseMaxLevelHash;
3867
3868 // This flag is set when copy views are identical to read views, and we share the views instead
3869 // of creating new ones.
3870 bool mIsCopyImageViewShared;
3871
3872 mutable ImageViewColorspace mReadColorspace;
3873 mutable ImageViewColorspace mWriteColorspace;
3874 mutable angle::ColorspaceState mColorspaceState;
3875
3876 // Read views (one per [base, max] level range)
3877 ImageViewVector mPerLevelRangeLinearReadImageViews;
3878 ImageViewVector mPerLevelRangeSRGBReadImageViews;
3879 ImageViewVector mPerLevelRangeLinearCopyImageViews;
3880 ImageViewVector mPerLevelRangeSRGBCopyImageViews;
3881 ImageViewVector mPerLevelRangeStencilReadImageViews;
3882 ImageViewVector mPerLevelRangeSamplerExternal2DY2YEXTImageViews;
3883
3884 // Draw views
3885 LayerLevelImageViewVector mLayerLevelDrawImageViews;
3886 LayerLevelImageViewVector mLayerLevelDrawImageViewsLinear;
3887 SubresourceImageViewMap mSubresourceDrawImageViews;
3888
3889 // Depth- or stencil-only input attachment views
3890 LayerLevelImageViewVector mLayerLevelDepthOnlyImageViews;
3891 LayerLevelImageViewVector mLayerLevelStencilOnlyImageViews;
3892 SubresourceImageViewMap mSubresourceDepthOnlyImageViews;
3893 SubresourceImageViewMap mSubresourceStencilOnlyImageViews;
3894
3895 // Storage views
3896 ImageViewVector mLevelStorageImageViews;
3897 LayerLevelImageViewVector mLayerLevelStorageImageViews;
3898
3899 // Fragment shading rate view
3900 ImageView mFragmentShadingRateImageView;
3901
3902 // Serial for the image view set. getSubresourceSerial combines it with subresource info.
3903 ImageOrBufferViewSerial mImageViewSerial;
3904 };
3905
3906 class BufferViewHelper final : public Resource
3907 {
3908 public:
3909 BufferViewHelper();
3910 BufferViewHelper(BufferViewHelper &&other);
3911 ~BufferViewHelper() override;
3912
3913 void init(Renderer *renderer, VkDeviceSize offset, VkDeviceSize size);
isInitialized()3914 bool isInitialized() const { return mInitialized; }
3915 void release(ContextVk *contextVk);
3916 void release(Renderer *renderer);
3917 void destroy(VkDevice device);
3918
3919 angle::Result getView(ErrorContext *context,
3920 const BufferHelper &buffer,
3921 VkDeviceSize bufferOffset,
3922 const Format &format,
3923 const BufferView **viewOut);
3924
3925 // Return unique Serial for a bufferView.
3926 ImageOrBufferViewSubresourceSerial getSerial() const;
3927
3928 private:
3929 bool mInitialized;
3930
3931 // To support format reinterpretation, additional views for formats other than the one specified
3932 // to glTexBuffer may need to be created. On draw/dispatch, the format layout qualifier of the
3933 // imageBuffer is used (if provided) to create a potentially different view of the buffer.
3934 angle::HashMap<VkFormat, BufferView> mViews;
3935
3936 // View properties:
3937 //
3938 // Offset and size specified to glTexBufferRange
3939 VkDeviceSize mOffset;
3940 VkDeviceSize mSize;
3941
3942 // Serial for the buffer view. An ImageOrBufferViewSerial is used for texture buffers so that
3943 // they fit together with the other texture types.
3944 ImageOrBufferViewSerial mViewSerial;
3945 };
3946
3947 class ShaderProgramHelper : angle::NonCopyable
3948 {
3949 public:
3950 ShaderProgramHelper();
3951 ~ShaderProgramHelper();
3952
3953 bool valid(const gl::ShaderType shaderType) const;
3954 void destroy(Renderer *renderer);
3955 void release(ContextVk *contextVk);
3956
3957 void setShader(gl::ShaderType shaderType, const ShaderModulePtr &shader);
3958
3959 // Create a graphics pipeline and place it in the cache. Must not be called if the pipeline
3960 // exists in cache.
3961 template <typename PipelineHash>
createGraphicsPipeline(vk::ErrorContext * context,GraphicsPipelineCache<PipelineHash> * graphicsPipelines,PipelineCacheAccess * pipelineCache,const RenderPass & compatibleRenderPass,const PipelineLayout & pipelineLayout,PipelineSource source,const GraphicsPipelineDesc & pipelineDesc,const SpecializationConstants & specConsts,const GraphicsPipelineDesc ** descPtrOut,PipelineHelper ** pipelineOut)3962 ANGLE_INLINE angle::Result createGraphicsPipeline(
3963 vk::ErrorContext *context,
3964 GraphicsPipelineCache<PipelineHash> *graphicsPipelines,
3965 PipelineCacheAccess *pipelineCache,
3966 const RenderPass &compatibleRenderPass,
3967 const PipelineLayout &pipelineLayout,
3968 PipelineSource source,
3969 const GraphicsPipelineDesc &pipelineDesc,
3970 const SpecializationConstants &specConsts,
3971 const GraphicsPipelineDesc **descPtrOut,
3972 PipelineHelper **pipelineOut) const
3973 {
3974 return graphicsPipelines->createPipeline(
3975 context, pipelineCache, compatibleRenderPass, pipelineLayout,
3976 GraphicsPipelineShadersInfo(&mShaders, &specConsts), source, pipelineDesc, descPtrOut,
3977 pipelineOut);
3978 }
3979
3980 void createMonolithicPipelineCreationTask(vk::ErrorContext *context,
3981 PipelineCacheAccess *pipelineCache,
3982 const GraphicsPipelineDesc &desc,
3983 const PipelineLayout &pipelineLayout,
3984 const SpecializationConstants &specConsts,
3985 PipelineHelper *pipeline) const;
3986
3987 angle::Result getOrCreateComputePipeline(vk::ErrorContext *context,
3988 ComputePipelineCache *computePipelines,
3989 PipelineCacheAccess *pipelineCache,
3990 const PipelineLayout &pipelineLayout,
3991 ComputePipelineOptions pipelineOptions,
3992 PipelineSource source,
3993 PipelineHelper **pipelineOut,
3994 const char *shaderName,
3995 VkSpecializationInfo *specializationInfo) const;
3996
3997 private:
3998 ShaderModuleMap mShaders;
3999 };
4000
4001 // Tracks current handle allocation counts in the back-end. Useful for debugging and profiling.
4002 // Note: not all handle types are currently implemented.
4003 class ActiveHandleCounter final : angle::NonCopyable
4004 {
4005 public:
4006 ActiveHandleCounter();
4007 ~ActiveHandleCounter();
4008
onAllocate(HandleType handleType)4009 void onAllocate(HandleType handleType)
4010 {
4011 mActiveCounts[handleType]++;
4012 mAllocatedCounts[handleType]++;
4013 }
4014
onDeallocate(HandleType handleType,uint32_t count)4015 void onDeallocate(HandleType handleType, uint32_t count) { mActiveCounts[handleType] -= count; }
4016
getActive(HandleType handleType)4017 uint32_t getActive(HandleType handleType) const { return mActiveCounts[handleType]; }
getAllocated(HandleType handleType)4018 uint32_t getAllocated(HandleType handleType) const { return mAllocatedCounts[handleType]; }
4019
4020 private:
4021 angle::PackedEnumMap<HandleType, uint32_t> mActiveCounts;
4022 angle::PackedEnumMap<HandleType, uint32_t> mAllocatedCounts;
4023 };
4024
4025 // Sometimes ANGLE issues a command internally, such as copies, draws and dispatches that do not
4026 // directly correspond to the application draw/dispatch call. Before the command is recorded in the
4027 // command buffer, the render pass may need to be broken and/or appropriate barriers may need to be
4028 // inserted. The following struct aggregates all resources that such internal commands need.
4029 struct CommandBufferBufferAccess
4030 {
4031 BufferHelper *buffer;
4032 VkAccessFlags accessType;
4033 PipelineStage stage;
4034 };
4035 struct CommandBufferImageAccess
4036 {
4037 ImageHelper *image;
4038 VkImageAspectFlags aspectFlags;
4039 ImageLayout imageLayout;
4040 };
4041 struct CommandBufferImageSubresourceAccess
4042 {
4043 CommandBufferImageAccess access;
4044 gl::LevelIndex levelStart;
4045 uint32_t levelCount;
4046 uint32_t layerStart;
4047 uint32_t layerCount;
4048 };
4049 struct CommandBufferBufferExternalAcquireRelease
4050 {
4051 BufferHelper *buffer;
4052 };
4053 struct CommandBufferResourceAccess
4054 {
4055 Resource *resource;
4056 };
4057 class CommandBufferAccess : angle::NonCopyable
4058 {
4059 public:
4060 CommandBufferAccess();
4061 ~CommandBufferAccess();
4062
onBufferTransferRead(BufferHelper * buffer)4063 void onBufferTransferRead(BufferHelper *buffer)
4064 {
4065 onBufferRead(VK_ACCESS_TRANSFER_READ_BIT, PipelineStage::Transfer, buffer);
4066 }
onBufferTransferWrite(BufferHelper * buffer)4067 void onBufferTransferWrite(BufferHelper *buffer)
4068 {
4069 onBufferWrite(VK_ACCESS_TRANSFER_WRITE_BIT, PipelineStage::Transfer, buffer);
4070 }
onBufferSelfCopy(BufferHelper * buffer)4071 void onBufferSelfCopy(BufferHelper *buffer)
4072 {
4073 onBufferWrite(VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
4074 PipelineStage::Transfer, buffer);
4075 }
onBufferComputeShaderRead(BufferHelper * buffer)4076 void onBufferComputeShaderRead(BufferHelper *buffer)
4077 {
4078 onBufferRead(VK_ACCESS_SHADER_READ_BIT, PipelineStage::ComputeShader, buffer);
4079 }
onBufferComputeShaderWrite(BufferHelper * buffer)4080 void onBufferComputeShaderWrite(BufferHelper *buffer)
4081 {
4082 onBufferWrite(VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
4083 PipelineStage::ComputeShader, buffer);
4084 }
4085
onImageTransferRead(VkImageAspectFlags aspectFlags,ImageHelper * image)4086 void onImageTransferRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
4087 {
4088 onImageRead(aspectFlags, ImageLayout::TransferSrc, image);
4089 }
onImageTransferWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4090 void onImageTransferWrite(gl::LevelIndex levelStart,
4091 uint32_t levelCount,
4092 uint32_t layerStart,
4093 uint32_t layerCount,
4094 VkImageAspectFlags aspectFlags,
4095 ImageHelper *image)
4096 {
4097 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
4098 ImageLayout::TransferDst, image);
4099 }
onImageSelfCopy(gl::LevelIndex readLevelStart,uint32_t readLevelCount,uint32_t readLayerStart,uint32_t readLayerCount,gl::LevelIndex writeLevelStart,uint32_t writeLevelCount,uint32_t writeLayerStart,uint32_t writeLayerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4100 void onImageSelfCopy(gl::LevelIndex readLevelStart,
4101 uint32_t readLevelCount,
4102 uint32_t readLayerStart,
4103 uint32_t readLayerCount,
4104 gl::LevelIndex writeLevelStart,
4105 uint32_t writeLevelCount,
4106 uint32_t writeLayerStart,
4107 uint32_t writeLayerCount,
4108 VkImageAspectFlags aspectFlags,
4109 ImageHelper *image)
4110 {
4111 onImageReadSubresources(readLevelStart, readLevelCount, readLayerStart, readLayerCount,
4112 aspectFlags, ImageLayout::TransferSrcDst, image);
4113 onImageWrite(writeLevelStart, writeLevelCount, writeLayerStart, writeLayerCount,
4114 aspectFlags, ImageLayout::TransferSrcDst, image);
4115 }
onImageDrawMipmapGenerationWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4116 void onImageDrawMipmapGenerationWrite(gl::LevelIndex levelStart,
4117 uint32_t levelCount,
4118 uint32_t layerStart,
4119 uint32_t layerCount,
4120 VkImageAspectFlags aspectFlags,
4121 ImageHelper *image)
4122 {
4123 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
4124 ImageLayout::ColorWrite, image);
4125 }
onImageComputeShaderRead(VkImageAspectFlags aspectFlags,ImageHelper * image)4126 void onImageComputeShaderRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
4127 {
4128 onImageRead(aspectFlags, ImageLayout::ComputeShaderReadOnly, image);
4129 }
onImageComputeMipmapGenerationRead(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4130 void onImageComputeMipmapGenerationRead(gl::LevelIndex levelStart,
4131 uint32_t levelCount,
4132 uint32_t layerStart,
4133 uint32_t layerCount,
4134 VkImageAspectFlags aspectFlags,
4135 ImageHelper *image)
4136 {
4137 onImageReadSubresources(levelStart, levelCount, layerStart, layerCount, aspectFlags,
4138 ImageLayout::ComputeShaderWrite, image);
4139 }
onImageComputeShaderWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4140 void onImageComputeShaderWrite(gl::LevelIndex levelStart,
4141 uint32_t levelCount,
4142 uint32_t layerStart,
4143 uint32_t layerCount,
4144 VkImageAspectFlags aspectFlags,
4145 ImageHelper *image)
4146 {
4147 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
4148 ImageLayout::ComputeShaderWrite, image);
4149 }
onImageTransferDstAndComputeWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)4150 void onImageTransferDstAndComputeWrite(gl::LevelIndex levelStart,
4151 uint32_t levelCount,
4152 uint32_t layerStart,
4153 uint32_t layerCount,
4154 VkImageAspectFlags aspectFlags,
4155 ImageHelper *image)
4156 {
4157 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
4158 ImageLayout::TransferDstAndComputeWrite, image);
4159 }
onExternalAcquireRelease(ImageHelper * image)4160 void onExternalAcquireRelease(ImageHelper *image) { onResourceAccess(image); }
onQueryAccess(QueryHelper * query)4161 void onQueryAccess(QueryHelper *query) { onResourceAccess(query); }
4162 void onBufferExternalAcquireRelease(BufferHelper *buffer);
4163
4164 // The limits reflect the current maximum concurrent usage of each resource type. ASSERTs will
4165 // fire if this limit is exceeded in the future.
4166 using ReadBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
4167 using WriteBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
4168 using ReadImages = angle::FixedVector<CommandBufferImageAccess, 2>;
4169 using WriteImages = angle::FixedVector<CommandBufferImageSubresourceAccess,
4170 gl::IMPLEMENTATION_MAX_DRAW_BUFFERS>;
4171 using ReadImageSubresources = angle::FixedVector<CommandBufferImageSubresourceAccess, 1>;
4172
4173 using ExternalAcquireReleaseBuffers =
4174 angle::FixedVector<CommandBufferBufferExternalAcquireRelease, 1>;
4175 using AccessResources = angle::FixedVector<CommandBufferResourceAccess, 1>;
4176
getReadBuffers()4177 const ReadBuffers &getReadBuffers() const { return mReadBuffers; }
getWriteBuffers()4178 const WriteBuffers &getWriteBuffers() const { return mWriteBuffers; }
getReadImages()4179 const ReadImages &getReadImages() const { return mReadImages; }
getWriteImages()4180 const WriteImages &getWriteImages() const { return mWriteImages; }
getReadImageSubresources()4181 const ReadImageSubresources &getReadImageSubresources() const { return mReadImageSubresources; }
getExternalAcquireReleaseBuffers()4182 const ExternalAcquireReleaseBuffers &getExternalAcquireReleaseBuffers() const
4183 {
4184 return mExternalAcquireReleaseBuffers;
4185 }
getAccessResources()4186 const AccessResources &getAccessResources() const { return mAccessResources; }
4187
4188 private:
4189 void onBufferRead(VkAccessFlags readAccessType, PipelineStage readStage, BufferHelper *buffer);
4190 void onBufferWrite(VkAccessFlags writeAccessType,
4191 PipelineStage writeStage,
4192 BufferHelper *buffer);
4193
4194 void onImageRead(VkImageAspectFlags aspectFlags, ImageLayout imageLayout, ImageHelper *image);
4195 void onImageWrite(gl::LevelIndex levelStart,
4196 uint32_t levelCount,
4197 uint32_t layerStart,
4198 uint32_t layerCount,
4199 VkImageAspectFlags aspectFlags,
4200 ImageLayout imageLayout,
4201 ImageHelper *image);
4202
4203 void onImageReadSubresources(gl::LevelIndex levelStart,
4204 uint32_t levelCount,
4205 uint32_t layerStart,
4206 uint32_t layerCount,
4207 VkImageAspectFlags aspectFlags,
4208 ImageLayout imageLayout,
4209 ImageHelper *image);
4210
4211 void onResourceAccess(Resource *resource);
4212
4213 ReadBuffers mReadBuffers;
4214 WriteBuffers mWriteBuffers;
4215 ReadImages mReadImages;
4216 WriteImages mWriteImages;
4217 ReadImageSubresources mReadImageSubresources;
4218 ExternalAcquireReleaseBuffers mExternalAcquireReleaseBuffers;
4219 AccessResources mAccessResources;
4220 };
4221
4222 enum class PresentMode
4223 {
4224 ImmediateKHR = VK_PRESENT_MODE_IMMEDIATE_KHR,
4225 MailboxKHR = VK_PRESENT_MODE_MAILBOX_KHR,
4226 FifoKHR = VK_PRESENT_MODE_FIFO_KHR,
4227 FifoRelaxedKHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
4228 SharedDemandRefreshKHR = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR,
4229 SharedContinuousRefreshKHR = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR,
4230
4231 InvalidEnum,
4232 EnumCount = InvalidEnum,
4233 };
4234
4235 VkPresentModeKHR ConvertPresentModeToVkPresentMode(PresentMode presentMode);
4236 PresentMode ConvertVkPresentModeToPresentMode(VkPresentModeKHR vkPresentMode);
4237 } // namespace vk
4238 } // namespace rx
4239
4240 #endif // LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
4241