1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_helpers:
7 // Helper utility classes that manage Vulkan resources.
8
9 #ifndef LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
10 #define LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
11
12 #include "common/MemoryBuffer.h"
13 #include "libANGLE/renderer/vulkan/vk_cache_utils.h"
14 #include "libANGLE/renderer/vulkan/vk_format_utils.h"
15
16 #include <functional>
17
18 namespace gl
19 {
20 class ImageIndex;
21 } // namespace gl
22
23 namespace rx
24 {
25 namespace vk
26 {
27 constexpr VkBufferUsageFlags kVertexBufferUsageFlags =
28 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
29 constexpr VkBufferUsageFlags kIndexBufferUsageFlags =
30 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
31 constexpr VkBufferUsageFlags kIndirectBufferUsageFlags =
32 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
33 constexpr size_t kVertexBufferAlignment = 4;
34 constexpr size_t kIndexBufferAlignment = 4;
35 constexpr size_t kIndirectBufferAlignment = 4;
36
37 constexpr VkBufferUsageFlags kStagingBufferFlags =
38 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
39 constexpr size_t kStagingBufferSize = 1024 * 16;
40
41 constexpr VkImageCreateFlags kVkImageCreateFlagsNone = 0;
42
43 using StagingBufferOffsetArray = std::array<VkDeviceSize, 2>;
44
45 struct TextureUnit final
46 {
47 TextureVk *texture;
48 const SamplerHelper *sampler;
49 GLenum srgbDecode;
50 };
51
52 // A dynamic buffer is conceptually an infinitely long buffer. Each time you write to the buffer,
53 // you will always write to a previously unused portion. After a series of writes, you must flush
54 // the buffer data to the device. Buffer lifetime currently assumes that each new allocation will
55 // last as long or longer than each prior allocation.
56 //
57 // Dynamic buffers are used to implement a variety of data streaming operations in Vulkan, such
58 // as for immediate vertex array and element array data, uniform updates, and other dynamic data.
59 //
60 // Internally dynamic buffers keep a collection of VkBuffers. When we write past the end of a
61 // currently active VkBuffer we keep it until it is no longer in use. We then mark it available
62 // for future allocations in a free list.
63 class BufferHelper;
64 using BufferHelperPointerVector = std::vector<std::unique_ptr<BufferHelper>>;
65
66 class DynamicBuffer : angle::NonCopyable
67 {
68 public:
69 DynamicBuffer();
70 DynamicBuffer(DynamicBuffer &&other);
71 ~DynamicBuffer();
72
73 void init(RendererVk *renderer,
74 VkBufferUsageFlags usage,
75 size_t alignment,
76 size_t initialSize,
77 bool hostVisible);
78
79 // This call will allocate a new region at the end of the current buffer. If it can't find
80 // enough space in the current buffer, it returns false. This gives caller a chance to deal with
81 // buffer switch that may occur with allocate call.
82 bool allocateFromCurrentBuffer(size_t sizeInBytes, BufferHelper **bufferHelperOut);
83
84 // This call will allocate a new region at the end of the buffer with default alignment. It
85 // internally may trigger a new buffer to be created (which is returned in the optional
86 // parameter `newBufferAllocatedOut`). The new region will be in the returned buffer at given
87 // offset.
88 angle::Result allocate(Context *context,
89 size_t sizeInBytes,
90 BufferHelper **bufferHelperOut,
91 bool *newBufferAllocatedOut);
92
93 // This releases resources when they might currently be in use.
94 void release(RendererVk *renderer);
95
96 // This adds in-flight buffers to the context's mResourceUseList and then releases them
97 void releaseInFlightBuffersToResourceUseList(ContextVk *contextVk);
98
99 // This frees resources immediately.
100 void destroy(RendererVk *renderer);
101
getCurrentBuffer()102 BufferHelper *getCurrentBuffer() const { return mBuffer.get(); }
103
104 // **Accumulate** an alignment requirement. A dynamic buffer is used as the staging buffer for
105 // image uploads, which can contain updates to unrelated mips, possibly with different formats.
106 // The staging buffer should have an alignment that can satisfy all those formats, i.e. it's the
107 // lcm of all alignments set in its lifetime.
108 void requireAlignment(RendererVk *renderer, size_t alignment);
getAlignment()109 size_t getAlignment() const { return mAlignment; }
110
111 // For testing only!
112 void setMinimumSizeForTesting(size_t minSize);
113
isCoherent()114 bool isCoherent() const
115 {
116 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
117 }
118
valid()119 bool valid() const { return mSize != 0; }
120
121 private:
122 void reset();
123 angle::Result allocateNewBuffer(Context *context);
124
125 VkBufferUsageFlags mUsage;
126 bool mHostVisible;
127 size_t mInitialSize;
128 std::unique_ptr<BufferHelper> mBuffer;
129 uint32_t mNextAllocationOffset;
130 size_t mSize;
131 size_t mAlignment;
132 VkMemoryPropertyFlags mMemoryPropertyFlags;
133
134 BufferHelperPointerVector mInFlightBuffers;
135 BufferHelperPointerVector mBufferFreeList;
136 };
137
138 // Uses DescriptorPool to allocate descriptor sets as needed. If a descriptor pool becomes full, we
139 // allocate new pools internally as needed. RendererVk takes care of the lifetime of the discarded
140 // pools. Note that we used a fixed layout for descriptor pools in ANGLE.
141
142 // Shared handle to a descriptor pool. Each helper is allocated from the dynamic descriptor pool.
143 // Can be used to share descriptor pools between multiple ProgramVks and the ContextVk.
144 class DescriptorPoolHelper : public Resource
145 {
146 public:
147 DescriptorPoolHelper();
148 ~DescriptorPoolHelper() override;
149
valid()150 bool valid() { return mDescriptorPool.valid(); }
151
152 bool hasCapacity(uint32_t descriptorSetCount) const;
153 angle::Result init(Context *context,
154 const std::vector<VkDescriptorPoolSize> &poolSizesIn,
155 uint32_t maxSets);
156 void destroy(VkDevice device);
157 void release(ContextVk *contextVk);
158
159 angle::Result allocateDescriptorSets(Context *context,
160 ResourceUseList *resourceUseList,
161 const DescriptorSetLayout &descriptorSetLayout,
162 uint32_t descriptorSetCount,
163 VkDescriptorSet *descriptorSetsOut);
164
165 private:
166 uint32_t mFreeDescriptorSets;
167 DescriptorPool mDescriptorPool;
168 };
169
170 using RefCountedDescriptorPoolHelper = RefCounted<DescriptorPoolHelper>;
171 using RefCountedDescriptorPoolBinding = BindingPointer<DescriptorPoolHelper>;
172
173 class DynamicDescriptorPool final : angle::NonCopyable
174 {
175 public:
176 DynamicDescriptorPool();
177 ~DynamicDescriptorPool();
178
179 // The DynamicDescriptorPool only handles one pool size at this time.
180 // Note that setSizes[i].descriptorCount is expected to be the number of descriptors in
181 // an individual set. The pool size will be calculated accordingly.
182 angle::Result init(Context *context,
183 const VkDescriptorPoolSize *setSizes,
184 size_t setSizeCount,
185 VkDescriptorSetLayout descriptorSetLayout);
186 void destroy(VkDevice device);
187 void release(ContextVk *contextVk);
188
189 // We use the descriptor type to help count the number of free sets.
190 // By convention, sets are indexed according to the constants in vk_cache_utils.h.
allocateDescriptorSets(Context * context,ResourceUseList * resourceUseList,const DescriptorSetLayout & descriptorSetLayout,uint32_t descriptorSetCount,RefCountedDescriptorPoolBinding * bindingOut,VkDescriptorSet * descriptorSetsOut)191 ANGLE_INLINE angle::Result allocateDescriptorSets(
192 Context *context,
193 ResourceUseList *resourceUseList,
194 const DescriptorSetLayout &descriptorSetLayout,
195 uint32_t descriptorSetCount,
196 RefCountedDescriptorPoolBinding *bindingOut,
197 VkDescriptorSet *descriptorSetsOut)
198 {
199 bool ignoreNewPoolAllocated;
200 return allocateSetsAndGetInfo(context, resourceUseList, descriptorSetLayout,
201 descriptorSetCount, bindingOut, descriptorSetsOut,
202 &ignoreNewPoolAllocated);
203 }
204
205 // We use the descriptor type to help count the number of free sets.
206 // By convention, sets are indexed according to the constants in vk_cache_utils.h.
207 angle::Result allocateSetsAndGetInfo(Context *context,
208 ResourceUseList *resourceUseList,
209 const DescriptorSetLayout &descriptorSetLayout,
210 uint32_t descriptorSetCount,
211 RefCountedDescriptorPoolBinding *bindingOut,
212 VkDescriptorSet *descriptorSetsOut,
213 bool *newPoolAllocatedOut);
214
215 // For testing only!
216 static uint32_t GetMaxSetsPerPoolForTesting();
217 static void SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
218 static uint32_t GetMaxSetsPerPoolMultiplierForTesting();
219 static void SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPool);
220
valid()221 bool valid() const { return !mDescriptorPools.empty(); }
222
223 private:
224 angle::Result allocateNewPool(Context *context);
225
226 static constexpr uint32_t kMaxSetsPerPoolMax = 512;
227 static uint32_t mMaxSetsPerPool;
228 static uint32_t mMaxSetsPerPoolMultiplier;
229 size_t mCurrentPoolIndex;
230 std::vector<RefCountedDescriptorPoolHelper *> mDescriptorPools;
231 std::vector<VkDescriptorPoolSize> mPoolSizes;
232 // This cached handle is used for verifying the layout being used to allocate descriptor sets
233 // from the pool matches the layout that the pool was created for, to ensure that the free
234 // descriptor count is accurate and new pools are created appropriately.
235 VkDescriptorSetLayout mCachedDescriptorSetLayout;
236 };
237
238 template <typename Pool>
239 class DynamicallyGrowingPool : angle::NonCopyable
240 {
241 public:
242 DynamicallyGrowingPool();
243 virtual ~DynamicallyGrowingPool();
244
isValid()245 bool isValid() { return mPoolSize > 0; }
246
247 protected:
248 angle::Result initEntryPool(Context *contextVk, uint32_t poolSize);
249
250 virtual void destroyPoolImpl(VkDevice device, Pool &poolToDestroy) = 0;
251 void destroyEntryPool(VkDevice device);
252
253 // Checks to see if any pool is already free, in which case it sets it as current pool and
254 // returns true.
255 bool findFreeEntryPool(ContextVk *contextVk);
256
257 // Allocates a new entry and initializes it with the given pool.
258 angle::Result allocateNewEntryPool(ContextVk *contextVk, Pool &&pool);
259
260 // Called by the implementation whenever an entry is freed.
261 void onEntryFreed(ContextVk *contextVk, size_t poolIndex);
262
getPool(size_t index)263 const Pool &getPool(size_t index) const
264 {
265 return const_cast<DynamicallyGrowingPool *>(this)->getPool(index);
266 }
267
getPool(size_t index)268 Pool &getPool(size_t index)
269 {
270 ASSERT(index < mPools.size());
271 return mPools[index].pool;
272 }
273
getPoolSize()274 uint32_t getPoolSize() const { return mPoolSize; }
275
276 virtual angle::Result allocatePoolImpl(ContextVk *contextVk,
277 Pool &poolToAllocate,
278 uint32_t entriesToAllocate) = 0;
279 angle::Result allocatePoolEntries(ContextVk *contextVk,
280 uint32_t entryCount,
281 uint32_t *poolIndexOut,
282 uint32_t *currentEntryOut);
283
284 private:
285 // The pool size, to know when a pool is completely freed.
286 uint32_t mPoolSize;
287
288 struct PoolResource : public Resource
289 {
290 PoolResource(Pool &&poolIn, uint32_t freedCountIn);
291 PoolResource(PoolResource &&other);
292
293 Pool pool;
294
295 // A count corresponding to each pool indicating how many of its allocated entries
296 // have been freed. Once that value reaches mPoolSize for each pool, that pool is considered
297 // free and reusable. While keeping a bitset would allow allocation of each index, the
298 // slight runtime overhead of finding free indices is not worth the slight memory overhead
299 // of creating new pools when unnecessary.
300 uint32_t freedCount;
301 };
302 std::vector<PoolResource> mPools;
303
304 // Index into mPools indicating pool we are currently allocating from.
305 size_t mCurrentPool;
306 // Index inside mPools[mCurrentPool] indicating which index can be allocated next.
307 uint32_t mCurrentFreeEntry;
308 };
309
310 // DynamicQueryPool allocates indices out of QueryPool as needed. Once a QueryPool is exhausted,
311 // another is created. The query pools live permanently, but are recycled as indices get freed.
312
313 // These are arbitrary default sizes for query pools.
314 constexpr uint32_t kDefaultOcclusionQueryPoolSize = 64;
315 constexpr uint32_t kDefaultTimestampQueryPoolSize = 64;
316 constexpr uint32_t kDefaultTransformFeedbackQueryPoolSize = 128;
317 constexpr uint32_t kDefaultPrimitivesGeneratedQueryPoolSize = 128;
318
319 class QueryHelper;
320
321 class DynamicQueryPool final : public DynamicallyGrowingPool<QueryPool>
322 {
323 public:
324 DynamicQueryPool();
325 ~DynamicQueryPool() override;
326
327 angle::Result init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize);
328 void destroy(VkDevice device);
329
330 angle::Result allocateQuery(ContextVk *contextVk, QueryHelper *queryOut, uint32_t queryCount);
331 void freeQuery(ContextVk *contextVk, QueryHelper *query);
332
getQueryPool(size_t index)333 const QueryPool &getQueryPool(size_t index) const { return getPool(index); }
334
335 private:
336 angle::Result allocatePoolImpl(ContextVk *contextVk,
337 QueryPool &poolToAllocate,
338 uint32_t entriesToAllocate) override;
339 void destroyPoolImpl(VkDevice device, QueryPool &poolToDestroy) override;
340
341 // Information required to create new query pools
342 VkQueryType mQueryType;
343 };
344
345 // Stores the result of a Vulkan query call. XFB queries in particular store two result values.
346 class QueryResult final
347 {
348 public:
QueryResult(uint32_t intsPerResult)349 QueryResult(uint32_t intsPerResult) : mIntsPerResult(intsPerResult), mResults{} {}
350
351 void operator+=(const QueryResult &rhs)
352 {
353 mResults[0] += rhs.mResults[0];
354 mResults[1] += rhs.mResults[1];
355 }
356
getDataSize()357 size_t getDataSize() const { return mIntsPerResult * sizeof(uint64_t); }
358 void setResults(uint64_t *results, uint32_t queryCount);
getResult(size_t index)359 uint64_t getResult(size_t index) const
360 {
361 ASSERT(index < mIntsPerResult);
362 return mResults[index];
363 }
364
365 static constexpr size_t kDefaultResultIndex = 0;
366 static constexpr size_t kTransformFeedbackPrimitivesWrittenIndex = 0;
367 static constexpr size_t kPrimitivesGeneratedIndex = 1;
368
369 private:
370 uint32_t mIntsPerResult;
371 std::array<uint64_t, 2> mResults;
372 };
373
374 // Queries in Vulkan are identified by the query pool and an index for a query within that pool.
375 // Unlike other pools, such as descriptor pools where an allocation returns an independent object
376 // from the pool, the query allocations are not done through a Vulkan function and are only an
377 // integer index.
378 //
379 // Furthermore, to support arbitrarily large number of queries, DynamicQueryPool creates query pools
380 // of a fixed size as needed and allocates indices within those pools.
381 //
382 // The QueryHelper class below keeps the pool and index pair together. For multiview, multiple
383 // consecutive query indices are implicitly written to by the driver, so the query count is
384 // additionally kept.
385 class QueryHelper final : public Resource
386 {
387 public:
388 QueryHelper();
389 ~QueryHelper() override;
390 QueryHelper(QueryHelper &&rhs);
391 QueryHelper &operator=(QueryHelper &&rhs);
392 void init(const DynamicQueryPool *dynamicQueryPool,
393 const size_t queryPoolIndex,
394 uint32_t query,
395 uint32_t queryCount);
396 void deinit();
397
valid()398 bool valid() const { return mDynamicQueryPool != nullptr; }
399
400 // Begin/end queries. These functions break the render pass.
401 angle::Result beginQuery(ContextVk *contextVk);
402 angle::Result endQuery(ContextVk *contextVk);
403 // Begin/end queries within a started render pass.
404 angle::Result beginRenderPassQuery(ContextVk *contextVk);
405 void endRenderPassQuery(ContextVk *contextVk);
406
407 angle::Result flushAndWriteTimestamp(ContextVk *contextVk);
408 // When syncing gpu/cpu time, main thread accesses primary directly
409 void writeTimestampToPrimary(ContextVk *contextVk, PrimaryCommandBuffer *primary);
410 // All other timestamp accesses should be made on outsideRenderPassCommandBuffer
411 void writeTimestamp(ContextVk *contextVk,
412 OutsideRenderPassCommandBuffer *outsideRenderPassCommandBuffer);
413
414 // Whether this query helper has generated and submitted any commands.
415 bool hasSubmittedCommands() const;
416
417 angle::Result getUint64ResultNonBlocking(ContextVk *contextVk,
418 QueryResult *resultOut,
419 bool *availableOut);
420 angle::Result getUint64Result(ContextVk *contextVk, QueryResult *resultOut);
421
422 private:
423 friend class DynamicQueryPool;
getQueryPool()424 const QueryPool &getQueryPool() const
425 {
426 ASSERT(valid());
427 return mDynamicQueryPool->getQueryPool(mQueryPoolIndex);
428 }
429
430 // Reset needs to always be done outside a render pass, which may be different from the
431 // passed-in command buffer (which could be the render pass').
432 template <typename CommandBufferT>
433 void beginQueryImpl(ContextVk *contextVk,
434 OutsideRenderPassCommandBuffer *resetCommandBuffer,
435 CommandBufferT *commandBuffer);
436 template <typename CommandBufferT>
437 void endQueryImpl(ContextVk *contextVk, CommandBufferT *commandBuffer);
438 template <typename CommandBufferT>
439 void resetQueryPoolImpl(ContextVk *contextVk,
440 const QueryPool &queryPool,
441 CommandBufferT *commandBuffer);
442 VkResult getResultImpl(ContextVk *contextVk,
443 const VkQueryResultFlags flags,
444 QueryResult *resultOut);
445
446 const DynamicQueryPool *mDynamicQueryPool;
447 size_t mQueryPoolIndex;
448 uint32_t mQuery;
449 uint32_t mQueryCount;
450
451 enum class QueryStatus
452 {
453 Inactive,
454 Active,
455 Ended
456 };
457 QueryStatus mStatus;
458 };
459
460 // DynamicSemaphorePool allocates semaphores as needed. It uses a std::vector
461 // as a pool to allocate many semaphores at once. The pools live permanently,
462 // but are recycled as semaphores get freed.
463
464 // These are arbitrary default sizes for semaphore pools.
465 constexpr uint32_t kDefaultSemaphorePoolSize = 64;
466
467 class SemaphoreHelper;
468
469 class DynamicSemaphorePool final : public DynamicallyGrowingPool<std::vector<Semaphore>>
470 {
471 public:
472 DynamicSemaphorePool();
473 ~DynamicSemaphorePool() override;
474
475 angle::Result init(ContextVk *contextVk, uint32_t poolSize);
476 void destroy(VkDevice device);
477
478 // autoFree can be used to allocate a semaphore that's expected to be freed at the end of the
479 // frame. This renders freeSemaphore unnecessary and saves an eventual search.
480 angle::Result allocateSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphoreOut);
481 void freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore);
482
483 private:
484 angle::Result allocatePoolImpl(ContextVk *contextVk,
485 std::vector<Semaphore> &poolToAllocate,
486 uint32_t entriesToAllocate) override;
487 void destroyPoolImpl(VkDevice device, std::vector<Semaphore> &poolToDestroy) override;
488 };
489
490 // Semaphores that are allocated from the semaphore pool are encapsulated in a helper object,
491 // keeping track of where in the pool they are allocated from.
492 class SemaphoreHelper final : angle::NonCopyable
493 {
494 public:
495 SemaphoreHelper();
496 ~SemaphoreHelper();
497
498 SemaphoreHelper(SemaphoreHelper &&other);
499 SemaphoreHelper &operator=(SemaphoreHelper &&other);
500
501 void init(const size_t semaphorePoolIndex, const Semaphore *semaphore);
502 void deinit();
503
getSemaphore()504 const Semaphore *getSemaphore() const { return mSemaphore; }
505
506 // Used only by DynamicSemaphorePool.
getSemaphorePoolIndex()507 size_t getSemaphorePoolIndex() const { return mSemaphorePoolIndex; }
508
509 private:
510 size_t mSemaphorePoolIndex;
511 const Semaphore *mSemaphore;
512 };
513
514 // This defines enum for VkPipelineStageFlagBits so that we can use it to compare and index into
515 // array.
516 enum class PipelineStage : uint16_t
517 {
518 // Bellow are ordered based on Graphics Pipeline Stages
519 TopOfPipe = 0,
520 DrawIndirect = 1,
521 VertexInput = 2,
522 VertexShader = 3,
523 TessellationControl = 4,
524 TessellationEvaluation = 5,
525 GeometryShader = 6,
526 TransformFeedback = 7,
527 EarlyFragmentTest = 8,
528 FragmentShader = 9,
529 LateFragmentTest = 10,
530 ColorAttachmentOutput = 11,
531
532 // Compute specific pipeline Stage
533 ComputeShader = 12,
534
535 // Transfer specific pipeline Stage
536 Transfer = 13,
537 BottomOfPipe = 14,
538
539 // Host specific pipeline stage
540 Host = 15,
541
542 InvalidEnum = 16,
543 EnumCount = InvalidEnum,
544 };
545 using PipelineStagesMask = angle::PackedEnumBitSet<PipelineStage, uint16_t>;
546
547 PipelineStage GetPipelineStage(gl::ShaderType stage);
548
549 // This wraps data and API for vkCmdPipelineBarrier call
550 class PipelineBarrier : angle::NonCopyable
551 {
552 public:
PipelineBarrier()553 PipelineBarrier()
554 : mSrcStageMask(0),
555 mDstStageMask(0),
556 mMemoryBarrierSrcAccess(0),
557 mMemoryBarrierDstAccess(0),
558 mImageMemoryBarriers()
559 {}
560 ~PipelineBarrier() = default;
561
isEmpty()562 bool isEmpty() const { return mImageMemoryBarriers.empty() && mMemoryBarrierDstAccess == 0; }
563
execute(PrimaryCommandBuffer * primary)564 void execute(PrimaryCommandBuffer *primary)
565 {
566 if (isEmpty())
567 {
568 return;
569 }
570
571 // Issue vkCmdPipelineBarrier call
572 VkMemoryBarrier memoryBarrier = {};
573 uint32_t memoryBarrierCount = 0;
574 if (mMemoryBarrierDstAccess != 0)
575 {
576 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
577 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
578 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
579 memoryBarrierCount++;
580 }
581 primary->pipelineBarrier(
582 mSrcStageMask, mDstStageMask, 0, memoryBarrierCount, &memoryBarrier, 0, nullptr,
583 static_cast<uint32_t>(mImageMemoryBarriers.size()), mImageMemoryBarriers.data());
584
585 reset();
586 }
587
executeIndividually(PrimaryCommandBuffer * primary)588 void executeIndividually(PrimaryCommandBuffer *primary)
589 {
590 if (isEmpty())
591 {
592 return;
593 }
594
595 // Issue vkCmdPipelineBarrier call
596 VkMemoryBarrier memoryBarrier = {};
597 uint32_t memoryBarrierCount = 0;
598 if (mMemoryBarrierDstAccess != 0)
599 {
600 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
601 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
602 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
603 memoryBarrierCount++;
604 }
605
606 for (const VkImageMemoryBarrier &imageBarrier : mImageMemoryBarriers)
607 {
608 primary->pipelineBarrier(mSrcStageMask, mDstStageMask, 0, memoryBarrierCount,
609 &memoryBarrier, 0, nullptr, 1, &imageBarrier);
610 }
611
612 reset();
613 }
614
615 // merge two barriers into one
merge(PipelineBarrier * other)616 void merge(PipelineBarrier *other)
617 {
618 mSrcStageMask |= other->mSrcStageMask;
619 mDstStageMask |= other->mDstStageMask;
620 mMemoryBarrierSrcAccess |= other->mMemoryBarrierSrcAccess;
621 mMemoryBarrierDstAccess |= other->mMemoryBarrierDstAccess;
622 mImageMemoryBarriers.insert(mImageMemoryBarriers.end(), other->mImageMemoryBarriers.begin(),
623 other->mImageMemoryBarriers.end());
624 other->reset();
625 }
626
mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkAccessFlags srcAccess,VkAccessFlags dstAccess)627 void mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,
628 VkPipelineStageFlags dstStageMask,
629 VkAccessFlags srcAccess,
630 VkAccessFlags dstAccess)
631 {
632 mSrcStageMask |= srcStageMask;
633 mDstStageMask |= dstStageMask;
634 mMemoryBarrierSrcAccess |= srcAccess;
635 mMemoryBarrierDstAccess |= dstAccess;
636 }
637
mergeImageBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)638 void mergeImageBarrier(VkPipelineStageFlags srcStageMask,
639 VkPipelineStageFlags dstStageMask,
640 const VkImageMemoryBarrier &imageMemoryBarrier)
641 {
642 ASSERT(imageMemoryBarrier.pNext == nullptr);
643 mSrcStageMask |= srcStageMask;
644 mDstStageMask |= dstStageMask;
645 mImageMemoryBarriers.push_back(imageMemoryBarrier);
646 }
647
reset()648 void reset()
649 {
650 mSrcStageMask = 0;
651 mDstStageMask = 0;
652 mMemoryBarrierSrcAccess = 0;
653 mMemoryBarrierDstAccess = 0;
654 mImageMemoryBarriers.clear();
655 }
656
657 void addDiagnosticsString(std::ostringstream &out) const;
658
659 private:
660 VkPipelineStageFlags mSrcStageMask;
661 VkPipelineStageFlags mDstStageMask;
662 VkAccessFlags mMemoryBarrierSrcAccess;
663 VkAccessFlags mMemoryBarrierDstAccess;
664 std::vector<VkImageMemoryBarrier> mImageMemoryBarriers;
665 };
666 using PipelineBarrierArray = angle::PackedEnumMap<PipelineStage, PipelineBarrier>;
667
668 class FramebufferHelper;
669
670 enum class MemoryCoherency
671 {
672 NonCoherent,
673 Coherent
674 };
675
676 enum class MemoryHostVisibility
677 {
678 NonVisible,
679 Visible
680 };
681
682 class BufferHelper : public ReadWriteResource
683 {
684 public:
685 BufferHelper();
686 ~BufferHelper() override;
687
688 BufferHelper(BufferHelper &&other);
689 BufferHelper &operator=(BufferHelper &&other);
690
691 angle::Result init(vk::Context *context,
692 const VkBufferCreateInfo &createInfo,
693 VkMemoryPropertyFlags memoryPropertyFlags);
694 angle::Result initExternal(ContextVk *contextVk,
695 VkMemoryPropertyFlags memoryProperties,
696 const VkBufferCreateInfo &requestedCreateInfo,
697 GLeglClientBufferEXT clientBuffer);
698 angle::Result initSuballocation(ContextVk *contextVk,
699 uint32_t memoryTypeIndex,
700 size_t size,
701 size_t alignment);
702
703 // Helper functions to initialize a buffer for a specific usage
704 // Suballocate a buffer with alignment good for shader storage or copyBuffer .
705 angle::Result allocateForVertexConversion(ContextVk *contextVk,
706 size_t size,
707 MemoryHostVisibility hostVisibility);
708 // Suballocate a host visible buffer with alignment good for copyBuffer .
709 angle::Result allocateForCopyBuffer(ContextVk *contextVk,
710 size_t size,
711 MemoryCoherency coherency);
712 // Suballocate a host visible buffer with alignment good for copyImage .
713 angle::Result allocateForCopyImage(ContextVk *contextVk,
714 size_t size,
715 MemoryCoherency coherency,
716 angle::FormatID formatId,
717 VkDeviceSize *offset,
718 uint8_t **dataPtr);
719
720 void destroy(RendererVk *renderer);
721 void release(RendererVk *renderer);
722
getBufferSerial()723 BufferSerial getBufferSerial() const { return mSerial; }
getBlockSerial()724 BufferSerial getBlockSerial() const
725 {
726 ASSERT(mSuballocation.valid());
727 return mSuballocation.getBlockSerial();
728 }
valid()729 bool valid() const { return mSuballocation.valid(); }
getBuffer()730 const Buffer &getBuffer() const { return mSuballocation.getBuffer(); }
getOffset()731 VkDeviceSize getOffset() const { return mSuballocation.getOffset(); }
getSize()732 VkDeviceSize getSize() const { return mSuballocation.getSize(); }
getMemoryPropertyFlags()733 VkMemoryMapFlags getMemoryPropertyFlags() const
734 {
735 return mSuballocation.getMemoryPropertyFlags();
736 }
getMappedMemory()737 uint8_t *getMappedMemory() const
738 {
739 ASSERT(isMapped());
740 return mSuballocation.getMappedMemory();
741 }
742 // Returns the main buffer block's pointer.
getBlockMemory()743 uint8_t *getBlockMemory() const { return mSuballocation.getBlockMemory(); }
getBlockMemorySize()744 VkDeviceSize getBlockMemorySize() const { return mSuballocation.getBlockMemorySize(); }
isHostVisible()745 bool isHostVisible() const { return mSuballocation.isHostVisible(); }
isCoherent()746 bool isCoherent() const { return mSuballocation.isCoherent(); }
747
isMapped()748 bool isMapped() const { return mSuballocation.isMapped(); }
749
750 // Also implicitly sets up the correct barriers.
751 angle::Result copyFromBuffer(ContextVk *contextVk,
752 BufferHelper *srcBuffer,
753 uint32_t regionCount,
754 const VkBufferCopy *copyRegions);
755
756 angle::Result map(Context *context, uint8_t **ptrOut);
757 angle::Result mapWithOffset(ContextVk *contextVk, uint8_t **ptrOut, size_t offset);
unmap(RendererVk * renderer)758 void unmap(RendererVk *renderer) {}
759 // After a sequence of writes, call flush to ensure the data is visible to the device.
760 angle::Result flush(RendererVk *renderer);
761 angle::Result flush(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
762 // After a sequence of writes, call invalidate to ensure the data is visible to the host.
763 angle::Result invalidate(RendererVk *renderer);
764 angle::Result invalidate(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
765
766 void changeQueue(uint32_t newQueueFamilyIndex, OutsideRenderPassCommandBuffer *commandBuffer);
767
768 // Performs an ownership transfer from an external instance or API.
769 void acquireFromExternal(ContextVk *contextVk,
770 uint32_t externalQueueFamilyIndex,
771 uint32_t rendererQueueFamilyIndex,
772 OutsideRenderPassCommandBuffer *commandBuffer);
773
774 // Performs an ownership transfer to an external instance or API.
775 void releaseToExternal(ContextVk *contextVk,
776 uint32_t rendererQueueFamilyIndex,
777 uint32_t externalQueueFamilyIndex,
778 OutsideRenderPassCommandBuffer *commandBuffer);
779
780 // Returns true if the image is owned by an external API or instance.
781 bool isReleasedToExternal() const;
782
783 bool recordReadBarrier(VkAccessFlags readAccessType,
784 VkPipelineStageFlags readStage,
785 PipelineBarrier *barrier);
786
787 bool recordWriteBarrier(VkAccessFlags writeAccessType,
788 VkPipelineStageFlags writeStage,
789 PipelineBarrier *barrier);
790 void fillWithColor(const angle::Color<uint8_t> &color,
791 const gl::InternalFormat &internalFormat);
792
793 // Special handling for VertexArray code so that we can create a dedicated VkBuffer for the
794 // sub-range of memory of the actual buffer data size that user requested (i.e, excluding extra
795 // paddings that we added for alignment, which will not get zero filled).
796 const Buffer &getBufferForVertexArray(ContextVk *contextVk,
797 VkDeviceSize actualDataSize,
798 VkDeviceSize *offsetOut);
799
800 private:
801 void initializeBarrierTracker(Context *context);
802 angle::Result initializeNonZeroMemory(Context *context,
803 VkBufferUsageFlags usage,
804 VkDeviceSize size);
805
806 // Only called by DynamicBuffer.
807 friend class DynamicBuffer;
setSuballocationOffsetAndSize(VkDeviceSize offset,VkDeviceSize size)808 void setSuballocationOffsetAndSize(VkDeviceSize offset, VkDeviceSize size)
809 {
810 mSuballocation.setOffsetAndSize(offset, size);
811 }
812
813 // Suballocation object.
814 BufferSuballocation mSuballocation;
815 // This normally is invalid. We always use the BufferBlock's buffer and offset combination. But
816 // when robust resource init is enabled, we may want to create a dedicated VkBuffer for the
817 // suballocation so that vulkan driver will ensure no access beyond this sub-range. In that
818 // case, this VkBuffer will be created lazily as needed.
819 Buffer mBufferForVertexArray;
820
821 // For memory barriers.
822 uint32_t mCurrentQueueFamilyIndex;
823 VkFlags mCurrentWriteAccess;
824 VkFlags mCurrentReadAccess;
825 VkPipelineStageFlags mCurrentWriteStages;
826 VkPipelineStageFlags mCurrentReadStages;
827
828 BufferSerial mSerial;
829 };
830
831 class BufferPool : angle::NonCopyable
832 {
833 public:
834 BufferPool();
835 BufferPool(BufferPool &&other);
836 ~BufferPool();
837
838 // Init that gives the ability to pass in specified memory property flags for the buffer.
839 void initWithFlags(RendererVk *renderer,
840 vma::VirtualBlockCreateFlags flags,
841 VkBufferUsageFlags usage,
842 VkDeviceSize initialSize,
843 uint32_t memoryTypeIndex,
844 VkMemoryPropertyFlags memoryProperty);
845
846 angle::Result allocateBuffer(Context *context,
847 VkDeviceSize sizeInBytes,
848 VkDeviceSize alignment,
849 BufferSuballocation *suballocation);
850
851 // Frees resources immediately, or orphan the non-empty BufferBlocks if allowed. If orphan is
852 // not allowed, it will assert if BufferBlock is still not empty.
853 void destroy(RendererVk *renderer, bool orphanAllowed);
854 // Remove and destroy empty BufferBlocks
855 void pruneEmptyBuffers(RendererVk *renderer);
856
valid()857 bool valid() const { return mSize != 0; }
858
859 private:
860 angle::Result allocateNewBuffer(Context *context, VkDeviceSize sizeInBytes);
861
862 vma::VirtualBlockCreateFlags mVirtualBlockCreateFlags;
863 VkBufferUsageFlags mUsage;
864 bool mHostVisible;
865 VkDeviceSize mSize;
866 uint32_t mMemoryTypeIndex;
867 BufferBlockPointerVector mBufferBlocks;
868 // When pruneDefaultBufferPools gets called, we do not immediately free all empty buffers. Only
869 // buffers that we found are empty for kMaxCountRemainsEmpty number of times consecutively, or
870 // we have more than kMaxEmptyBufferCount number of empty buffers, we will actually free it.
871 // That way we avoid the situation that a buffer just becomes empty and gets freed right after
872 // and only to find out that we have to allocate a new one next frame.
873 static constexpr int32_t kMaxCountRemainsEmpty = 4;
874 static constexpr int32_t kMaxEmptyBufferCount = 16;
875 // max size to go down the suballocation code path. Any allocation greater or equal this size
876 // will call into vulkan directly to allocate a dedicated VkDeviceMemory.
877 static constexpr size_t kMaxBufferSizeForSuballocation = 4 * 1024 * 1024;
878 };
879 using BufferPoolPointerArray = std::array<std::unique_ptr<BufferPool>, VK_MAX_MEMORY_TYPES>;
880
881 enum class BufferAccess
882 {
883 Read,
884 Write,
885 };
886
887 enum class AliasingMode
888 {
889 Allowed,
890 Disallowed,
891 };
892
893 // Stores clear value In packed attachment index
894 class PackedClearValuesArray final
895 {
896 public:
897 PackedClearValuesArray();
898 ~PackedClearValuesArray();
899
900 PackedClearValuesArray(const PackedClearValuesArray &other);
901 PackedClearValuesArray &operator=(const PackedClearValuesArray &rhs);
902 void store(PackedAttachmentIndex index,
903 VkImageAspectFlags aspectFlags,
904 const VkClearValue &clearValue);
905 void storeNoDepthStencil(PackedAttachmentIndex index, const VkClearValue &clearValue);
906 const VkClearValue &operator[](PackedAttachmentIndex index) const
907 {
908 return mValues[index.get()];
909 }
data()910 const VkClearValue *data() const { return mValues.data(); }
911
912 private:
913 gl::AttachmentArray<VkClearValue> mValues;
914 };
915
916 // Reference to a render pass attachment (color or depth/stencil) alongside render-pass-related
917 // tracking such as when the attachment is last written to or invalidated. This is used to
918 // determine loadOp and storeOp of the attachment, and enables optimizations that need to know
919 // how the attachment has been used.
920 class RenderPassAttachment final
921 {
922 public:
923 RenderPassAttachment();
924 ~RenderPassAttachment() = default;
925
926 void init(ImageHelper *image,
927 gl::LevelIndex levelIndex,
928 uint32_t layerIndex,
929 uint32_t layerCount,
930 VkImageAspectFlagBits aspect);
931 void reset();
932
933 void onAccess(ResourceAccess access, uint32_t currentCmdCount);
934 void invalidate(const gl::Rectangle &invalidateArea,
935 bool isAttachmentEnabled,
936 uint32_t currentCmdCount);
937 void onRenderAreaGrowth(ContextVk *contextVk, const gl::Rectangle &newRenderArea);
938 void finalizeLoadStore(Context *context,
939 uint32_t currentCmdCount,
940 bool hasUnresolveAttachment,
941 RenderPassLoadOp *loadOp,
942 RenderPassStoreOp *storeOp,
943 bool *isInvalidatedOut);
944 void restoreContent();
hasAnyAccess()945 bool hasAnyAccess() const { return mAccess != ResourceAccess::Unused; }
hasWriteAccess()946 bool hasWriteAccess() const { return mAccess == ResourceAccess::Write; }
947
getImage()948 ImageHelper *getImage() { return mImage; }
949
950 private:
951 bool hasWriteAfterInvalidate(uint32_t currentCmdCount) const;
952 bool isInvalidated(uint32_t currentCmdCount) const;
953 bool onAccessImpl(ResourceAccess access, uint32_t currentCmdCount);
954
955 // The attachment image itself
956 ImageHelper *mImage;
957 // The subresource used in the render pass
958 gl::LevelIndex mLevelIndex;
959 uint32_t mLayerIndex;
960 uint32_t mLayerCount;
961 VkImageAspectFlagBits mAspect;
962 // Tracks the highest access during the entire render pass (Write being the highest), excluding
963 // clear through loadOp. This allows loadOp=Clear to be optimized out when we find out that the
964 // attachment is not used in the render pass at all and storeOp=DontCare, or that a
965 // mid-render-pass clear could be hoisted to loadOp=Clear.
966 ResourceAccess mAccess;
967 // The index of the last draw command after which the attachment is invalidated
968 uint32_t mInvalidatedCmdCount;
969 // The index of the last draw command after which the attachment output is disabled
970 uint32_t mDisabledCmdCount;
971 // The area that has been invalidated
972 gl::Rectangle mInvalidateArea;
973 };
974
975 // Stores RenderPassAttachment In packed attachment index
976 class PackedRenderPassAttachmentArray final
977 {
978 public:
PackedRenderPassAttachmentArray()979 PackedRenderPassAttachmentArray() : mAttachments{} {}
980 ~PackedRenderPassAttachmentArray() = default;
981 RenderPassAttachment &operator[](PackedAttachmentIndex index)
982 {
983 return mAttachments[index.get()];
984 }
reset()985 void reset()
986 {
987 for (RenderPassAttachment &attachment : mAttachments)
988 {
989 attachment.reset();
990 }
991 }
992
993 private:
994 gl::AttachmentArray<RenderPassAttachment> mAttachments;
995 };
996
997 // The following are used to help track the state of an invalidated attachment.
998 // This value indicates an "infinite" CmdCount that is not valid for comparing
999 constexpr uint32_t kInfiniteCmdCount = 0xFFFFFFFF;
1000
1001 // CommandBufferHelperCommon and derivatives OutsideRenderPassCommandBufferHelper and
1002 // RenderPassCommandBufferHelper wrap the outside/inside render pass secondary command buffers,
1003 // together with other information such as barriers to issue before the command buffer, tracking of
1004 // resource usages, etc. When the asyncCommandQueue feature is enabled, objects of these classes
1005 // are handed off to the worker thread to be executed on the primary command buffer.
1006 class CommandBufferHelperCommon : angle::NonCopyable
1007 {
1008 public:
getCommandPool()1009 CommandPool *getCommandPool() { return mCommandPool; }
1010
1011 void bufferRead(ContextVk *contextVk,
1012 VkAccessFlags readAccessType,
1013 PipelineStage readStage,
1014 BufferHelper *buffer);
1015 void bufferWrite(ContextVk *contextVk,
1016 VkAccessFlags writeAccessType,
1017 PipelineStage writeStage,
1018 AliasingMode aliasingMode,
1019 BufferHelper *buffer);
1020
1021 bool usesBuffer(const BufferHelper &buffer) const;
1022 bool usesBufferForWrite(const BufferHelper &buffer) const;
getUsedBuffersCount()1023 size_t getUsedBuffersCount() const { return mUsedBuffers.size(); }
1024
1025 void executeBarriers(const angle::FeaturesVk &features, PrimaryCommandBuffer *primary);
1026
1027 // The markOpen and markClosed functions are to aid in proper use of the *CommandBufferHelper.
1028 // saw invalid use due to threading issues that can be easily caught by marking when it's safe
1029 // (open) to write to the commandbuffer.
1030 #if !defined(ANGLE_ENABLE_ASSERTS)
markOpen()1031 void markOpen() {}
markClosed()1032 void markClosed() {}
1033 #endif
1034
setHasShaderStorageOutput()1035 void setHasShaderStorageOutput() { mHasShaderStorageOutput = true; }
hasShaderStorageOutput()1036 bool hasShaderStorageOutput() const { return mHasShaderStorageOutput; }
1037
hasGLMemoryBarrierIssued()1038 bool hasGLMemoryBarrierIssued() const { return mHasGLMemoryBarrierIssued; }
1039
getResourceUseList()1040 vk::ResourceUseList &getResourceUseList() { return mResourceUseList; }
1041
1042 // Dumping the command stream is disabled by default.
1043 static constexpr bool kEnableCommandStreamDiagnostics = false;
1044
1045 protected:
1046 CommandBufferHelperCommon();
1047 ~CommandBufferHelperCommon();
1048
1049 void initializeImpl(Context *context, CommandPool *commandPool);
1050
1051 void resetImpl();
1052
1053 void imageReadImpl(ContextVk *contextVk,
1054 VkImageAspectFlags aspectFlags,
1055 ImageLayout imageLayout,
1056 ImageHelper *image);
1057 void imageWriteImpl(ContextVk *contextVk,
1058 gl::LevelIndex level,
1059 uint32_t layerStart,
1060 uint32_t layerCount,
1061 VkImageAspectFlags aspectFlags,
1062 ImageLayout imageLayout,
1063 AliasingMode aliasingMode,
1064 ImageHelper *image);
1065
1066 void updateImageLayoutAndBarrier(Context *context,
1067 ImageHelper *image,
1068 VkImageAspectFlags aspectFlags,
1069 ImageLayout imageLayout);
1070
1071 void addCommandDiagnosticsCommon(std::ostringstream *out);
1072
1073 // Allocator used by this class. Using a pool allocator per CBH to avoid threading issues
1074 // that occur w/ shared allocator between multiple CBHs.
1075 angle::PoolAllocator mAllocator;
1076
1077 // Barriers to be executed before the command buffer.
1078 PipelineBarrierArray mPipelineBarriers;
1079 PipelineStagesMask mPipelineBarrierMask;
1080
1081 // The command pool *CommandBufferHelper::mCommandBuffer is allocated from. Only used with
1082 // Vulkan secondary command buffers (as opposed to ANGLE's SecondaryCommandBuffer).
1083 CommandPool *mCommandPool;
1084
1085 // Whether the command buffers contains any draw/dispatch calls that possibly output data
1086 // through storage buffers and images. This is used to determine whether glMemoryBarrier*
1087 // should flush the command buffer.
1088 bool mHasShaderStorageOutput;
1089 // Whether glMemoryBarrier has been called while commands are recorded in this command buffer.
1090 // This is used to know when to check and potentially flush the command buffer if storage
1091 // buffers and images are used in it.
1092 bool mHasGLMemoryBarrierIssued;
1093
1094 // Tracks resources used in the command buffer.
1095 // For Buffers, we track the read/write access type so we can enable simultaneous reads.
1096 static constexpr uint32_t kFlatMapSize = 16;
1097 angle::FlatUnorderedMap<BufferSerial, BufferAccess, kFlatMapSize> mUsedBuffers;
1098 vk::ResourceUseList mResourceUseList;
1099 };
1100
1101 class OutsideRenderPassCommandBufferHelper final : public CommandBufferHelperCommon
1102 {
1103 public:
1104 OutsideRenderPassCommandBufferHelper();
1105 ~OutsideRenderPassCommandBufferHelper();
1106
1107 angle::Result initialize(Context *context, CommandPool *commandPool);
1108
1109 angle::Result reset(Context *context);
1110
getCommandBuffer()1111 OutsideRenderPassCommandBuffer &getCommandBuffer() { return mCommandBuffer; }
1112
empty()1113 bool empty() const { return mCommandBuffer.empty(); }
1114
1115 #if defined(ANGLE_ENABLE_ASSERTS)
markOpen()1116 void markOpen() { mCommandBuffer.open(); }
markClosed()1117 void markClosed() { mCommandBuffer.close(); }
1118 #endif
1119
1120 void imageRead(ContextVk *contextVk,
1121 VkImageAspectFlags aspectFlags,
1122 ImageLayout imageLayout,
1123 ImageHelper *image);
1124 void imageWrite(ContextVk *contextVk,
1125 gl::LevelIndex level,
1126 uint32_t layerStart,
1127 uint32_t layerCount,
1128 VkImageAspectFlags aspectFlags,
1129 ImageLayout imageLayout,
1130 AliasingMode aliasingMode,
1131 ImageHelper *image);
1132
1133 angle::Result flushToPrimary(Context *context, PrimaryCommandBuffer *primary);
1134
setGLMemoryBarrierIssued()1135 void setGLMemoryBarrierIssued()
1136 {
1137 if (!mCommandBuffer.empty())
1138 {
1139 mHasGLMemoryBarrierIssued = true;
1140 }
1141 }
1142
1143 void addCommandDiagnostics(ContextVk *contextVk);
1144
1145 private:
1146 angle::Result initializeCommandBuffer(Context *context);
1147
1148 OutsideRenderPassCommandBuffer mCommandBuffer;
1149 };
1150
1151 class RenderPassCommandBufferHelper final : public CommandBufferHelperCommon
1152 {
1153 public:
1154 RenderPassCommandBufferHelper();
1155 ~RenderPassCommandBufferHelper();
1156
1157 angle::Result initialize(Context *context, CommandPool *commandPool);
1158
1159 angle::Result reset(Context *context);
1160
getCommandBuffer()1161 RenderPassCommandBuffer &getCommandBuffer() { return mCommandBuffers[mCurrentSubpass]; }
1162
empty()1163 bool empty() const { return !started(); }
1164
1165 #if defined(ANGLE_ENABLE_ASSERTS)
markOpen()1166 void markOpen() { getCommandBuffer().open(); }
markClosed()1167 void markClosed() { getCommandBuffer().close(); }
1168 #endif
1169
1170 void imageRead(ContextVk *contextVk,
1171 VkImageAspectFlags aspectFlags,
1172 ImageLayout imageLayout,
1173 ImageHelper *image);
1174 void imageWrite(ContextVk *contextVk,
1175 gl::LevelIndex level,
1176 uint32_t layerStart,
1177 uint32_t layerCount,
1178 VkImageAspectFlags aspectFlags,
1179 ImageLayout imageLayout,
1180 AliasingMode aliasingMode,
1181 ImageHelper *image);
1182
1183 void colorImagesDraw(ResourceUseList *resourceUseList,
1184 gl::LevelIndex level,
1185 uint32_t layerStart,
1186 uint32_t layerCount,
1187 ImageHelper *image,
1188 ImageHelper *resolveImage,
1189 PackedAttachmentIndex packedAttachmentIndex);
1190 void depthStencilImagesDraw(ResourceUseList *resourceUseList,
1191 gl::LevelIndex level,
1192 uint32_t layerStart,
1193 uint32_t layerCount,
1194 ImageHelper *image,
1195 ImageHelper *resolveImage);
1196
1197 bool usesImage(const ImageHelper &image) const;
1198
1199 angle::Result flushToPrimary(Context *context,
1200 PrimaryCommandBuffer *primary,
1201 const RenderPass *renderPass);
1202
started()1203 bool started() const { return mRenderPassStarted; }
1204
1205 // Finalize the layout if image has any deferred layout transition.
1206 void finalizeImageLayout(Context *context, const ImageHelper *image);
1207
1208 angle::Result beginRenderPass(ContextVk *contextVk,
1209 const Framebuffer &framebuffer,
1210 const gl::Rectangle &renderArea,
1211 const RenderPassDesc &renderPassDesc,
1212 const AttachmentOpsArray &renderPassAttachmentOps,
1213 const PackedAttachmentCount colorAttachmentCount,
1214 const PackedAttachmentIndex depthStencilAttachmentIndex,
1215 const PackedClearValuesArray &clearValues,
1216 RenderPassCommandBuffer **commandBufferOut);
1217
1218 angle::Result endRenderPass(ContextVk *contextVk);
1219
1220 angle::Result nextSubpass(ContextVk *contextVk, RenderPassCommandBuffer **commandBufferOut);
1221
1222 void updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode);
1223
1224 void beginTransformFeedback(size_t validBufferCount,
1225 const VkBuffer *counterBuffers,
1226 const VkDeviceSize *counterBufferOffsets,
1227 bool rebindBuffers);
1228
1229 void endTransformFeedback();
1230
1231 void invalidateRenderPassColorAttachment(const gl::State &state,
1232 size_t colorIndexGL,
1233 PackedAttachmentIndex attachmentIndex,
1234 const gl::Rectangle &invalidateArea);
1235 void invalidateRenderPassDepthAttachment(const gl::DepthStencilState &dsState,
1236 const gl::Rectangle &invalidateArea);
1237 void invalidateRenderPassStencilAttachment(const gl::DepthStencilState &dsState,
1238 const gl::Rectangle &invalidateArea);
1239
1240 void updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,
1241 const VkClearValue &colorClearValue);
1242 void updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,
1243 const VkClearValue &clearValue);
1244
getRenderArea()1245 const gl::Rectangle &getRenderArea() const { return mRenderArea; }
1246
1247 // If render pass is started with a small render area due to a small scissor, and if a new
1248 // larger scissor is specified, grow the render area to accomodate it.
1249 void growRenderArea(ContextVk *contextVk, const gl::Rectangle &newRenderArea);
1250
1251 void resumeTransformFeedback();
1252 void pauseTransformFeedback();
isTransformFeedbackStarted()1253 bool isTransformFeedbackStarted() const { return mValidTransformFeedbackBufferCount > 0; }
isTransformFeedbackActiveUnpaused()1254 bool isTransformFeedbackActiveUnpaused() const { return mIsTransformFeedbackActiveUnpaused; }
1255
getAndResetCounter()1256 uint32_t getAndResetCounter()
1257 {
1258 uint32_t count = mCounter;
1259 mCounter = 0;
1260 return count;
1261 }
1262
getFramebufferHandle()1263 VkFramebuffer getFramebufferHandle() const { return mFramebuffer.getHandle(); }
1264
1265 void onColorAccess(PackedAttachmentIndex packedAttachmentIndex, ResourceAccess access);
1266 void onDepthAccess(ResourceAccess access);
1267 void onStencilAccess(ResourceAccess access);
1268
hasAnyColorAccess(PackedAttachmentIndex packedAttachmentIndex)1269 bool hasAnyColorAccess(PackedAttachmentIndex packedAttachmentIndex)
1270 {
1271 ASSERT(packedAttachmentIndex < mColorAttachmentsCount);
1272 return mColorAttachments[packedAttachmentIndex].hasAnyAccess();
1273 }
hasAnyDepthAccess()1274 bool hasAnyDepthAccess() { return mDepthAttachment.hasAnyAccess(); }
hasAnyStencilAccess()1275 bool hasAnyStencilAccess() { return mStencilAttachment.hasAnyAccess(); }
1276
1277 void updateRenderPassForResolve(ContextVk *contextVk,
1278 Framebuffer *newFramebuffer,
1279 const RenderPassDesc &renderPassDesc);
1280
hasDepthStencilWriteOrClear()1281 bool hasDepthStencilWriteOrClear() const
1282 {
1283 return mDepthAttachment.hasWriteAccess() || mStencilAttachment.hasWriteAccess() ||
1284 mAttachmentOps[mDepthStencilAttachmentIndex].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR ||
1285 mAttachmentOps[mDepthStencilAttachmentIndex].stencilLoadOp ==
1286 VK_ATTACHMENT_LOAD_OP_CLEAR;
1287 }
1288
getRenderPassDesc()1289 const RenderPassDesc &getRenderPassDesc() const { return mRenderPassDesc; }
getAttachmentOps()1290 const AttachmentOpsArray &getAttachmentOps() const { return mAttachmentOps; }
1291
setImageOptimizeForPresent(ImageHelper * image)1292 void setImageOptimizeForPresent(ImageHelper *image) { mImageOptimizeForPresent = image; }
1293
setGLMemoryBarrierIssued()1294 void setGLMemoryBarrierIssued()
1295 {
1296 if (mRenderPassStarted)
1297 {
1298 mHasGLMemoryBarrierIssued = true;
1299 }
1300 }
1301 void addCommandDiagnostics(ContextVk *contextVk);
1302
1303 private:
1304 angle::Result initializeCommandBuffer(Context *context);
1305 angle::Result beginRenderPassCommandBuffer(ContextVk *contextVk);
1306 angle::Result endRenderPassCommandBuffer(ContextVk *contextVk);
1307
getRenderPassWriteCommandCount()1308 uint32_t getRenderPassWriteCommandCount()
1309 {
1310 // All subpasses are chained (no subpasses running in parallel), so the cmd count can be
1311 // considered continuous among subpasses.
1312 return mPreviousSubpassesCmdCount + getCommandBuffer().getRenderPassWriteCommandCount();
1313 }
1314
1315 // We can't determine the image layout at the renderpass start time since their full usage
1316 // aren't known until later time. We finalize the layout when either ImageHelper object is
1317 // released or when renderpass ends.
1318 void finalizeColorImageLayout(Context *context,
1319 ImageHelper *image,
1320 PackedAttachmentIndex packedAttachmentIndex,
1321 bool isResolveImage);
1322 void finalizeColorImageLoadStore(Context *context, PackedAttachmentIndex packedAttachmentIndex);
1323 void finalizeDepthStencilImageLayout(Context *context);
1324 void finalizeDepthStencilResolveImageLayout(Context *context);
1325 void finalizeDepthStencilLoadStore(Context *context);
1326
1327 void finalizeColorImageLayoutAndLoadStore(Context *context,
1328 PackedAttachmentIndex packedAttachmentIndex);
1329 void finalizeDepthStencilImageLayoutAndLoadStore(Context *context);
1330
1331 // When using Vulkan secondary command buffers, each subpass must be recorded in a separate
1332 // command buffer. Currently ANGLE produces render passes with at most 2 subpasses. Once
1333 // framebuffer-fetch is appropriately implemented to use subpasses, this array must be made
1334 // dynamic.
1335 static constexpr size_t kMaxSubpassCount = 2;
1336 std::array<RenderPassCommandBuffer, kMaxSubpassCount> mCommandBuffers;
1337 uint32_t mCurrentSubpass;
1338
1339 // RenderPass state
1340 uint32_t mCounter;
1341 RenderPassDesc mRenderPassDesc;
1342 AttachmentOpsArray mAttachmentOps;
1343 Framebuffer mFramebuffer;
1344 gl::Rectangle mRenderArea;
1345 PackedClearValuesArray mClearValues;
1346 bool mRenderPassStarted;
1347
1348 // Transform feedback state
1349 gl::TransformFeedbackBuffersArray<VkBuffer> mTransformFeedbackCounterBuffers;
1350 gl::TransformFeedbackBuffersArray<VkDeviceSize> mTransformFeedbackCounterBufferOffsets;
1351 uint32_t mValidTransformFeedbackBufferCount;
1352 bool mRebindTransformFeedbackBuffers;
1353 bool mIsTransformFeedbackActiveUnpaused;
1354
1355 // State tracking for whether to optimize the storeOp to DONT_CARE
1356 uint32_t mPreviousSubpassesCmdCount;
1357
1358 // Keep track of the depth/stencil attachment index
1359 PackedAttachmentIndex mDepthStencilAttachmentIndex;
1360
1361 // Tracks resources used in the command buffer.
1362 // Images have unique layouts unlike buffers therefore we can't support simultaneous reads with
1363 // different layout.
1364 angle::FlatUnorderedSet<ImageSerial, kFlatMapSize> mRenderPassUsedImages;
1365
1366 // Array size of mColorAttachments
1367 PackedAttachmentCount mColorAttachmentsCount;
1368 // Attached render target images. Color and depth resolve images always come last.
1369 PackedRenderPassAttachmentArray mColorAttachments;
1370 PackedRenderPassAttachmentArray mColorResolveAttachments;
1371
1372 RenderPassAttachment mDepthAttachment;
1373 RenderPassAttachment mDepthResolveAttachment;
1374
1375 RenderPassAttachment mStencilAttachment;
1376 RenderPassAttachment mStencilResolveAttachment;
1377
1378 // This is last renderpass before present and this is the image will be presented. We can use
1379 // final layout of the renderpass to transition it to the presentable layout
1380 ImageHelper *mImageOptimizeForPresent;
1381 };
1382
1383 // The following class helps support both Vulkan and ANGLE secondary command buffers by
1384 // encapsulating their differences.
1385 template <typename CommandBufferT, typename CommandBufferHelperT>
1386 class CommandBufferRecycler
1387 {
1388 public:
1389 CommandBufferRecycler() = default;
1390 ~CommandBufferRecycler() = default;
1391
1392 void onDestroy();
1393
1394 angle::Result getCommandBufferHelper(Context *context,
1395 CommandPool *commandPool,
1396 CommandBufferHelperT **commandBufferHelperOut);
1397
1398 void recycleCommandBufferHelper(VkDevice device, CommandBufferHelperT **commandBuffer);
1399
1400 void resetCommandBuffer(CommandBufferT &&commandBuffer);
1401
releaseCommandBuffersToReset()1402 std::vector<CommandBufferT> &&releaseCommandBuffersToReset()
1403 {
1404 return std::move(mSecondaryCommandBuffersToReset);
1405 }
1406
1407 private:
1408 std::vector<CommandBufferHelperT *> mCommandBufferHelperFreeList;
1409 std::vector<CommandBufferT> mSecondaryCommandBuffersToReset;
1410 };
1411
1412 // Imagine an image going through a few layout transitions:
1413 //
1414 // srcStage 1 dstStage 2 srcStage 2 dstStage 3
1415 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
1416 // srcAccess 1 dstAccess 2 srcAccess 2 dstAccess 3
1417 // \_________________ ___________________/
1418 // \/
1419 // A transition
1420 //
1421 // Every transition requires 6 pieces of information: from/to layouts, src/dst stage masks and
1422 // src/dst access masks. At the moment we decide to transition the image to Layout 2 (i.e.
1423 // Transition 1), we need to have Layout 1, srcStage 1 and srcAccess 1 stored as history of the
1424 // image. To perform the transition, we need to know Layout 2, dstStage 2 and dstAccess 2.
1425 // Additionally, we need to know srcStage 2 and srcAccess 2 to retain them for the next transition.
1426 //
1427 // That is, with the history kept, on every new transition we need 5 pieces of new information:
1428 // layout/dstStage/dstAccess to transition into the layout, and srcStage/srcAccess for the future
1429 // transition out from it. Given the small number of possible combinations of these values, an
1430 // enum is used were each value encapsulates these 5 pieces of information:
1431 //
1432 // +--------------------------------+
1433 // srcStage 1 | dstStage 2 srcStage 2 | dstStage 3
1434 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
1435 // srcAccess 1 |dstAccess 2 srcAccess 2| dstAccess 3
1436 // +--------------- ---------------+
1437 // \/
1438 // One enum value
1439 //
1440 // Note that, while generally dstStage for the to-transition and srcStage for the from-transition
1441 // are the same, they may occasionally be BOTTOM_OF_PIPE and TOP_OF_PIPE respectively.
1442 enum class ImageLayout
1443 {
1444 Undefined = 0,
1445 // Framebuffer attachment layouts are placed first, so they can fit in fewer bits in
1446 // PackedAttachmentOpsDesc.
1447 ColorAttachment,
1448 ColorAttachmentAndFragmentShaderRead,
1449 ColorAttachmentAndAllShadersRead,
1450 DSAttachmentWriteAndFragmentShaderRead,
1451 DSAttachmentWriteAndAllShadersRead,
1452 DSAttachmentReadAndFragmentShaderRead,
1453 DSAttachmentReadAndAllShadersRead,
1454 DepthStencilAttachmentReadOnly,
1455 DepthStencilAttachment,
1456 DepthStencilResolveAttachment,
1457 Present,
1458 SharedPresent,
1459 // The rest of the layouts.
1460 ExternalPreInitialized,
1461 ExternalShadersReadOnly,
1462 ExternalShadersWrite,
1463 TransferSrc,
1464 TransferDst,
1465 VertexShaderReadOnly,
1466 VertexShaderWrite,
1467 // PreFragment == Vertex, Tessellation and Geometry stages
1468 PreFragmentShadersReadOnly,
1469 PreFragmentShadersWrite,
1470 FragmentShaderReadOnly,
1471 FragmentShaderWrite,
1472 ComputeShaderReadOnly,
1473 ComputeShaderWrite,
1474 AllGraphicsShadersReadOnly,
1475 AllGraphicsShadersWrite,
1476
1477 InvalidEnum,
1478 EnumCount = InvalidEnum,
1479 };
1480
1481 VkImageCreateFlags GetImageCreateFlags(gl::TextureType textureType);
1482
1483 ImageLayout GetImageLayoutFromGLImageLayout(GLenum layout);
1484
1485 GLenum ConvertImageLayoutToGLImageLayout(ImageLayout imageLayout);
1486
1487 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout);
1488
1489 // How the ImageHelper object is being used by the renderpass
1490 enum class RenderPassUsage
1491 {
1492 // Attached to the render taget of the current renderpass commands. It could be read/write or
1493 // read only access.
1494 RenderTargetAttachment,
1495 // This is special case of RenderTargetAttachment where the render target access is read only.
1496 // Right now it is only tracked for depth stencil attachment
1497 ReadOnlyAttachment,
1498 // Attached to the texture sampler of the current renderpass commands
1499 TextureSampler,
1500
1501 InvalidEnum,
1502 EnumCount = InvalidEnum,
1503 };
1504 using RenderPassUsageFlags = angle::PackedEnumBitSet<RenderPassUsage, uint16_t>;
1505
1506 // The source of update to an ImageHelper
1507 enum class UpdateSource
1508 {
1509 // Clear an image subresource.
1510 Clear,
1511 // Clear only the emulated channels of the subresource. This operation is more expensive than
1512 // Clear, and so is only used for emulated color formats and only for external images. Color
1513 // only because depth or stencil clear is already per channel, so Clear works for them.
1514 // External only because they may contain data that needs to be preserved. Additionally, this
1515 // is a one-time only clear. Once the emulated channels are cleared, ANGLE ensures that they
1516 // remain untouched.
1517 ClearEmulatedChannelsOnly,
1518 // When an image with emulated channels is invalidated, a clear may be restaged to keep the
1519 // contents of the emulated channels defined. This is given a dedicated enum value, so it can
1520 // be removed if the invalidate is undone at the end of the render pass.
1521 ClearAfterInvalidate,
1522 // The source of the copy is a buffer.
1523 Buffer,
1524 // The source of the copy is an image.
1525 Image,
1526 };
1527
1528 bool FormatHasNecessaryFeature(RendererVk *renderer,
1529 angle::FormatID formatID,
1530 VkImageTiling tilingMode,
1531 VkFormatFeatureFlags featureBits);
1532
1533 bool CanCopyWithTransfer(RendererVk *renderer,
1534 angle::FormatID srcFormatID,
1535 VkImageTiling srcTilingMode,
1536 angle::FormatID dstFormatID,
1537 VkImageTiling dstTilingMode);
1538 class ImageViewHelper;
1539 class ImageHelper final : public Resource, public angle::Subject
1540 {
1541 public:
1542 ImageHelper();
1543 ImageHelper(ImageHelper &&other);
1544 ~ImageHelper() override;
1545
1546 angle::Result init(Context *context,
1547 gl::TextureType textureType,
1548 const VkExtent3D &extents,
1549 const Format &format,
1550 GLint samples,
1551 VkImageUsageFlags usage,
1552 gl::LevelIndex firstLevel,
1553 uint32_t mipLevels,
1554 uint32_t layerCount,
1555 bool isRobustResourceInitEnabled,
1556 bool hasProtectedContent);
1557 angle::Result initMSAASwapchain(Context *context,
1558 gl::TextureType textureType,
1559 const VkExtent3D &extents,
1560 bool rotatedAspectRatio,
1561 const Format &format,
1562 GLint samples,
1563 VkImageUsageFlags usage,
1564 gl::LevelIndex firstLevel,
1565 uint32_t mipLevels,
1566 uint32_t layerCount,
1567 bool isRobustResourceInitEnabled,
1568 bool hasProtectedContent);
1569 angle::Result initExternal(Context *context,
1570 gl::TextureType textureType,
1571 const VkExtent3D &extents,
1572 angle::FormatID intendedFormatID,
1573 angle::FormatID actualFormatID,
1574 GLint samples,
1575 VkImageUsageFlags usage,
1576 VkImageCreateFlags additionalCreateFlags,
1577 ImageLayout initialLayout,
1578 const void *externalImageCreateInfo,
1579 gl::LevelIndex firstLevel,
1580 uint32_t mipLevels,
1581 uint32_t layerCount,
1582 bool isRobustResourceInitEnabled,
1583 bool hasProtectedContent);
1584 angle::Result initMemory(Context *context,
1585 bool hasProtectedContent,
1586 const MemoryProperties &memoryProperties,
1587 VkMemoryPropertyFlags flags);
1588 angle::Result initExternalMemory(Context *context,
1589 const MemoryProperties &memoryProperties,
1590 const VkMemoryRequirements &memoryRequirements,
1591 uint32_t extraAllocationInfoCount,
1592 const void **extraAllocationInfo,
1593 uint32_t currentQueueFamilyIndex,
1594 VkMemoryPropertyFlags flags);
1595 angle::Result initLayerImageView(Context *context,
1596 gl::TextureType textureType,
1597 VkImageAspectFlags aspectMask,
1598 const gl::SwizzleState &swizzleMap,
1599 ImageView *imageViewOut,
1600 LevelIndex baseMipLevelVk,
1601 uint32_t levelCount,
1602 uint32_t baseArrayLayer,
1603 uint32_t layerCount,
1604 gl::SrgbWriteControlMode mode) const;
1605 angle::Result initLayerImageViewWithFormat(Context *context,
1606 gl::TextureType textureType,
1607 VkFormat imageFormat,
1608 VkImageAspectFlags aspectMask,
1609 const gl::SwizzleState &swizzleMap,
1610 ImageView *imageViewOut,
1611 LevelIndex baseMipLevelVk,
1612 uint32_t levelCount,
1613 uint32_t baseArrayLayer,
1614 uint32_t layerCount,
1615 const gl::SamplerState &samplerState) const;
1616 angle::Result initReinterpretedLayerImageView(Context *context,
1617 gl::TextureType textureType,
1618 VkImageAspectFlags aspectMask,
1619 const gl::SwizzleState &swizzleMap,
1620 ImageView *imageViewOut,
1621 LevelIndex baseMipLevelVk,
1622 uint32_t levelCount,
1623 uint32_t baseArrayLayer,
1624 uint32_t layerCount,
1625 VkImageUsageFlags imageUsageFlags,
1626 angle::FormatID imageViewFormat) const;
1627 angle::Result initImageView(Context *context,
1628 gl::TextureType textureType,
1629 VkImageAspectFlags aspectMask,
1630 const gl::SwizzleState &swizzleMap,
1631 ImageView *imageViewOut,
1632 LevelIndex baseMipLevelVk,
1633 uint32_t levelCount);
1634 // Create a 2D[Array] for staging purposes. Used by:
1635 //
1636 // - TextureVk::copySubImageImplWithDraw
1637 // - FramebufferVk::readPixelsImpl
1638 //
1639 angle::Result init2DStaging(Context *context,
1640 bool hasProtectedContent,
1641 const MemoryProperties &memoryProperties,
1642 const gl::Extents &glExtents,
1643 angle::FormatID intendedFormatID,
1644 angle::FormatID actualFormatID,
1645 VkImageUsageFlags usage,
1646 uint32_t layerCount);
1647 // Create an image for staging purposes. Used by:
1648 //
1649 // - TextureVk::copyAndStageImageData
1650 //
1651 angle::Result initStaging(Context *context,
1652 bool hasProtectedContent,
1653 const MemoryProperties &memoryProperties,
1654 VkImageType imageType,
1655 const VkExtent3D &extents,
1656 angle::FormatID intendedFormatID,
1657 angle::FormatID actualFormatID,
1658 GLint samples,
1659 VkImageUsageFlags usage,
1660 uint32_t mipLevels,
1661 uint32_t layerCount);
1662 // Create a multisampled image for use as the implicit image in multisampled render to texture
1663 // rendering. If LAZILY_ALLOCATED memory is available, it will prefer that.
1664 angle::Result initImplicitMultisampledRenderToTexture(Context *context,
1665 bool hasProtectedContent,
1666 const MemoryProperties &memoryProperties,
1667 gl::TextureType textureType,
1668 GLint samples,
1669 const ImageHelper &resolveImage,
1670 bool isRobustResourceInitEnabled);
1671
1672 // Helper for initExternal and users to automatically derive the appropriate VkImageCreateInfo
1673 // pNext chain based on the given parameters, and adjust create flags. In some cases, these
1674 // shouldn't be automatically derived, for example when importing images through
1675 // EXT_external_objects and ANGLE_external_objects_flags.
1676 static constexpr uint32_t kImageListFormatCount = 2;
1677 using ImageListFormats = std::array<VkFormat, kImageListFormatCount>;
1678 static const void *DeriveCreateInfoPNext(
1679 Context *context,
1680 angle::FormatID actualFormatID,
1681 const void *pNext,
1682 VkImageFormatListCreateInfoKHR *imageFormatListInfoStorage,
1683 ImageListFormats *imageListFormatsStorage,
1684 VkImageCreateFlags *createFlagsOut);
1685
1686 // Release the underlining VkImage object for garbage collection.
1687 void releaseImage(RendererVk *renderer);
1688 // Similar to releaseImage, but also notify all contexts in the same share group to stop
1689 // accessing to it.
1690 void releaseImageFromShareContexts(RendererVk *renderer, ContextVk *contextVk);
1691 void collectViewGarbage(RendererVk *renderer, vk::ImageViewHelper *imageView);
1692 void releaseStagedUpdates(RendererVk *renderer);
1693
valid()1694 bool valid() const { return mImage.valid(); }
1695
1696 VkImageAspectFlags getAspectFlags() const;
1697 // True if image contains both depth & stencil aspects
1698 bool isCombinedDepthStencilFormat() const;
1699 void destroy(RendererVk *renderer);
release(RendererVk * renderer)1700 void release(RendererVk *renderer) { destroy(renderer); }
1701
1702 void init2DWeakReference(Context *context,
1703 VkImage handle,
1704 const gl::Extents &glExtents,
1705 bool rotatedAspectRatio,
1706 angle::FormatID intendedFormatID,
1707 angle::FormatID actualFormatID,
1708 GLint samples,
1709 bool isRobustResourceInitEnabled);
1710 void resetImageWeakReference();
1711 void releaseImageAndViewGarbage(RendererVk *renderer);
1712
getImage()1713 const Image &getImage() const { return mImage; }
getDeviceMemory()1714 const DeviceMemory &getDeviceMemory() const { return mDeviceMemory; }
1715
getVkImageCreateInfo()1716 const VkImageCreateInfo &getVkImageCreateInfo() const { return mVkImageCreateInfo; }
setTilingMode(VkImageTiling tilingMode)1717 void setTilingMode(VkImageTiling tilingMode) { mTilingMode = tilingMode; }
getTilingMode()1718 VkImageTiling getTilingMode() const { return mTilingMode; }
getCreateFlags()1719 VkImageCreateFlags getCreateFlags() const { return mCreateFlags; }
getUsage()1720 VkImageUsageFlags getUsage() const { return mUsage; }
getType()1721 VkImageType getType() const { return mImageType; }
getExtents()1722 const VkExtent3D &getExtents() const { return mExtents; }
1723 const VkExtent3D getRotatedExtents() const;
getLayerCount()1724 uint32_t getLayerCount() const
1725 {
1726 ASSERT(valid());
1727 return mLayerCount;
1728 }
getLevelCount()1729 uint32_t getLevelCount() const
1730 {
1731 ASSERT(valid());
1732 return mLevelCount;
1733 }
getIntendedFormatID()1734 angle::FormatID getIntendedFormatID() const
1735 {
1736 ASSERT(valid());
1737 return mIntendedFormatID;
1738 }
getIntendedFormat()1739 const angle::Format &getIntendedFormat() const
1740 {
1741 ASSERT(valid());
1742 return angle::Format::Get(mIntendedFormatID);
1743 }
getActualFormatID()1744 angle::FormatID getActualFormatID() const
1745 {
1746 ASSERT(valid());
1747 return mActualFormatID;
1748 }
getActualVkFormat()1749 VkFormat getActualVkFormat() const
1750 {
1751 ASSERT(valid());
1752 return GetVkFormatFromFormatID(mActualFormatID);
1753 }
getActualFormat()1754 const angle::Format &getActualFormat() const
1755 {
1756 ASSERT(valid());
1757 return angle::Format::Get(mActualFormatID);
1758 }
1759 bool hasEmulatedImageChannels() const;
1760 bool hasEmulatedDepthChannel() const;
1761 bool hasEmulatedStencilChannel() const;
hasEmulatedImageFormat()1762 bool hasEmulatedImageFormat() const { return mActualFormatID != mIntendedFormatID; }
getSamples()1763 GLint getSamples() const { return mSamples; }
1764
getImageSerial()1765 ImageSerial getImageSerial() const
1766 {
1767 ASSERT(valid() && mImageSerial.valid());
1768 return mImageSerial;
1769 }
1770
setCurrentImageLayout(ImageLayout newLayout)1771 void setCurrentImageLayout(ImageLayout newLayout)
1772 {
1773 // Once you transition to ImageLayout::SharedPresent, you never transition out of it.
1774 if (mCurrentLayout == ImageLayout::SharedPresent)
1775 {
1776 return;
1777 }
1778 mCurrentLayout = newLayout;
1779 }
getCurrentImageLayout()1780 ImageLayout getCurrentImageLayout() const { return mCurrentLayout; }
1781 VkImageLayout getCurrentLayout() const;
1782
1783 gl::Extents getLevelExtents(LevelIndex levelVk) const;
1784 // Helper function to calculate the extents of a render target created for a certain mip of the
1785 // image.
1786 gl::Extents getLevelExtents2D(LevelIndex levelVk) const;
1787 gl::Extents getRotatedLevelExtents2D(LevelIndex levelVk) const;
1788
1789 bool isDepthOrStencil() const;
1790
1791 void setRenderPassUsageFlag(RenderPassUsage flag);
1792 void clearRenderPassUsageFlag(RenderPassUsage flag);
1793 void resetRenderPassUsageFlags();
1794 bool hasRenderPassUsageFlag(RenderPassUsage flag) const;
1795 bool usedByCurrentRenderPassAsAttachmentAndSampler() const;
1796
1797 static void Copy(ImageHelper *srcImage,
1798 ImageHelper *dstImage,
1799 const gl::Offset &srcOffset,
1800 const gl::Offset &dstOffset,
1801 const gl::Extents ©Size,
1802 const VkImageSubresourceLayers &srcSubresources,
1803 const VkImageSubresourceLayers &dstSubresources,
1804 OutsideRenderPassCommandBuffer *commandBuffer);
1805
1806 static angle::Result CopyImageSubData(const gl::Context *context,
1807 ImageHelper *srcImage,
1808 GLint srcLevel,
1809 GLint srcX,
1810 GLint srcY,
1811 GLint srcZ,
1812 ImageHelper *dstImage,
1813 GLint dstLevel,
1814 GLint dstX,
1815 GLint dstY,
1816 GLint dstZ,
1817 GLsizei srcWidth,
1818 GLsizei srcHeight,
1819 GLsizei srcDepth);
1820
1821 // Generate mipmap from level 0 into the rest of the levels with blit.
1822 angle::Result generateMipmapsWithBlit(ContextVk *contextVk,
1823 LevelIndex baseLevel,
1824 LevelIndex maxLevel);
1825
1826 // Resolve this image into a destination image. This image should be in the TransferSrc layout.
1827 // The destination image is automatically transitioned into TransferDst.
1828 void resolve(ImageHelper *dst,
1829 const VkImageResolve ®ion,
1830 OutsideRenderPassCommandBuffer *commandBuffer);
1831
1832 // Data staging
1833 void removeSingleSubresourceStagedUpdates(ContextVk *contextVk,
1834 gl::LevelIndex levelIndexGL,
1835 uint32_t layerIndex,
1836 uint32_t layerCount);
1837 void removeSingleStagedClearAfterInvalidate(gl::LevelIndex levelIndexGL,
1838 uint32_t layerIndex,
1839 uint32_t layerCount);
1840 void removeStagedUpdates(Context *context,
1841 gl::LevelIndex levelGLStart,
1842 gl::LevelIndex levelGLEnd);
1843
1844 angle::Result stageSubresourceUpdateImpl(ContextVk *contextVk,
1845 const gl::ImageIndex &index,
1846 const gl::Extents &glExtents,
1847 const gl::Offset &offset,
1848 const gl::InternalFormat &formatInfo,
1849 const gl::PixelUnpackState &unpack,
1850 GLenum type,
1851 const uint8_t *pixels,
1852 const Format &vkFormat,
1853 ImageAccess access,
1854 const GLuint inputRowPitch,
1855 const GLuint inputDepthPitch,
1856 const GLuint inputSkipBytes);
1857
1858 angle::Result stageSubresourceUpdate(ContextVk *contextVk,
1859 const gl::ImageIndex &index,
1860 const gl::Extents &glExtents,
1861 const gl::Offset &offset,
1862 const gl::InternalFormat &formatInfo,
1863 const gl::PixelUnpackState &unpack,
1864 GLenum type,
1865 const uint8_t *pixels,
1866 const Format &vkFormat,
1867 ImageAccess access);
1868
1869 angle::Result stageSubresourceUpdateAndGetData(ContextVk *contextVk,
1870 size_t allocationSize,
1871 const gl::ImageIndex &imageIndex,
1872 const gl::Extents &glExtents,
1873 const gl::Offset &offset,
1874 uint8_t **destData,
1875 angle::FormatID formatID);
1876
1877 angle::Result stageSubresourceUpdateFromFramebuffer(const gl::Context *context,
1878 const gl::ImageIndex &index,
1879 const gl::Rectangle &sourceArea,
1880 const gl::Offset &dstOffset,
1881 const gl::Extents &dstExtent,
1882 const gl::InternalFormat &formatInfo,
1883 ImageAccess access,
1884 FramebufferVk *framebufferVk);
1885
1886 void stageSubresourceUpdateFromImage(RefCounted<ImageHelper> *image,
1887 const gl::ImageIndex &index,
1888 LevelIndex srcMipLevel,
1889 const gl::Offset &destOffset,
1890 const gl::Extents &glExtents,
1891 const VkImageType imageType);
1892
1893 // Takes an image and stages a subresource update for each level of it, including its full
1894 // extent and all its layers, at the specified GL level.
1895 void stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> *image,
1896 gl::LevelIndex baseLevel);
1897
1898 // Stage a clear to an arbitrary value.
1899 void stageClear(const gl::ImageIndex &index,
1900 VkImageAspectFlags aspectFlags,
1901 const VkClearValue &clearValue);
1902
1903 // Stage a clear based on robust resource init.
1904 angle::Result stageRobustResourceClearWithFormat(ContextVk *contextVk,
1905 const gl::ImageIndex &index,
1906 const gl::Extents &glExtents,
1907 const angle::Format &intendedFormat,
1908 const angle::Format &actualFormat);
1909 void stageRobustResourceClear(const gl::ImageIndex &index);
1910
1911 // Stage the currently allocated image as updates to base level and on, making this !valid().
1912 // This is used for:
1913 //
1914 // - Mipmap generation, where levelCount is 1 so only the base level is retained
1915 // - Image respecification, where every level (other than those explicitly skipped) is staged
1916 void stageSelfAsSubresourceUpdates(ContextVk *contextVk,
1917 uint32_t levelCount,
1918 gl::TexLevelMask skipLevelsMask);
1919
1920 // Flush staged updates for a single subresource. Can optionally take a parameter to defer
1921 // clears to a subsequent RenderPass load op.
1922 angle::Result flushSingleSubresourceStagedUpdates(ContextVk *contextVk,
1923 gl::LevelIndex levelGL,
1924 uint32_t layer,
1925 uint32_t layerCount,
1926 ClearValuesArray *deferredClears,
1927 uint32_t deferredClearIndex);
1928
1929 // Flushes staged updates to a range of levels and layers from start to (but not including) end.
1930 // Due to the nature of updates (done wholly to a VkImageSubresourceLayers), some unsolicited
1931 // layers may also be updated.
1932 angle::Result flushStagedUpdates(ContextVk *contextVk,
1933 gl::LevelIndex levelGLStart,
1934 gl::LevelIndex levelGLEnd,
1935 uint32_t layerStart,
1936 uint32_t layerEnd,
1937 gl::TexLevelMask skipLevelsMask);
1938
1939 // Creates a command buffer and flushes all staged updates. This is used for one-time
1940 // initialization of resources that we don't expect to accumulate further staged updates, such
1941 // as with renderbuffers or surface images.
1942 angle::Result flushAllStagedUpdates(ContextVk *contextVk);
1943
1944 bool hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,
1945 uint32_t layer,
1946 uint32_t layerCount) const;
1947 bool hasStagedUpdatesInAllocatedLevels() const;
1948
1949 bool removeStagedClearUpdatesAndReturnColor(gl::LevelIndex levelGL,
1950 const VkClearColorValue **color);
1951
recordWriteBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,OutsideRenderPassCommandBuffer * commandBuffer)1952 void recordWriteBarrier(Context *context,
1953 VkImageAspectFlags aspectMask,
1954 ImageLayout newLayout,
1955 OutsideRenderPassCommandBuffer *commandBuffer)
1956 {
1957 barrierImpl(context, aspectMask, newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1958 }
1959
recordWriteBarrierOneOff(Context * context,ImageLayout newLayout,PrimaryCommandBuffer * commandBuffer)1960 void recordWriteBarrierOneOff(Context *context,
1961 ImageLayout newLayout,
1962 PrimaryCommandBuffer *commandBuffer)
1963 {
1964 barrierImpl(context, getAspectFlags(), newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1965 }
1966
1967 // This function can be used to prevent issuing redundant layout transition commands.
1968 bool isReadBarrierNecessary(ImageLayout newLayout) const;
1969
recordReadBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,OutsideRenderPassCommandBuffer * commandBuffer)1970 void recordReadBarrier(Context *context,
1971 VkImageAspectFlags aspectMask,
1972 ImageLayout newLayout,
1973 OutsideRenderPassCommandBuffer *commandBuffer)
1974 {
1975 if (!isReadBarrierNecessary(newLayout))
1976 {
1977 return;
1978 }
1979
1980 barrierImpl(context, aspectMask, newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1981 }
1982
isQueueChangeNeccesary(uint32_t newQueueFamilyIndex)1983 bool isQueueChangeNeccesary(uint32_t newQueueFamilyIndex) const
1984 {
1985 return mCurrentQueueFamilyIndex != newQueueFamilyIndex;
1986 }
1987
1988 void changeLayoutAndQueue(Context *context,
1989 VkImageAspectFlags aspectMask,
1990 ImageLayout newLayout,
1991 uint32_t newQueueFamilyIndex,
1992 OutsideRenderPassCommandBuffer *commandBuffer);
1993
1994 // Returns true if barrier has been generated
1995 bool updateLayoutAndBarrier(Context *context,
1996 VkImageAspectFlags aspectMask,
1997 ImageLayout newLayout,
1998 PipelineBarrier *barrier);
1999
2000 // Performs an ownership transfer from an external instance or API.
2001 void acquireFromExternal(ContextVk *contextVk,
2002 uint32_t externalQueueFamilyIndex,
2003 uint32_t rendererQueueFamilyIndex,
2004 ImageLayout currentLayout,
2005 OutsideRenderPassCommandBuffer *commandBuffer);
2006
2007 // Performs an ownership transfer to an external instance or API.
2008 void releaseToExternal(ContextVk *contextVk,
2009 uint32_t rendererQueueFamilyIndex,
2010 uint32_t externalQueueFamilyIndex,
2011 ImageLayout desiredLayout,
2012 OutsideRenderPassCommandBuffer *commandBuffer);
2013
2014 // Returns true if the image is owned by an external API or instance.
2015 bool isReleasedToExternal() const;
2016
getFirstAllocatedLevel()2017 gl::LevelIndex getFirstAllocatedLevel() const
2018 {
2019 ASSERT(valid());
2020 return mFirstAllocatedLevel;
2021 }
2022 gl::LevelIndex getLastAllocatedLevel() const;
2023 LevelIndex toVkLevel(gl::LevelIndex levelIndexGL) const;
2024 gl::LevelIndex toGLLevel(LevelIndex levelIndexVk) const;
2025
2026 angle::Result copyImageDataToBuffer(ContextVk *contextVk,
2027 gl::LevelIndex sourceLevelGL,
2028 uint32_t layerCount,
2029 uint32_t baseLayer,
2030 const gl::Box &sourceArea,
2031 BufferHelper *dstBuffer,
2032 uint8_t **outDataPtr);
2033
2034 angle::Result copySurfaceImageToBuffer(DisplayVk *displayVk,
2035 gl::LevelIndex sourceLevelGL,
2036 uint32_t layerCount,
2037 uint32_t baseLayer,
2038 const gl::Box &sourceArea,
2039 vk::BufferHelper *bufferHelperOut);
2040
2041 angle::Result copyBufferToSurfaceImage(DisplayVk *displayVk,
2042 gl::LevelIndex destLevelGL,
2043 uint32_t layerCount,
2044 uint32_t baseLayer,
2045 const gl::Box &destArea,
2046 vk::BufferHelper *bufferHelper);
2047
2048 static angle::Result GetReadPixelsParams(ContextVk *contextVk,
2049 const gl::PixelPackState &packState,
2050 gl::Buffer *packBuffer,
2051 GLenum format,
2052 GLenum type,
2053 const gl::Rectangle &area,
2054 const gl::Rectangle &clippedArea,
2055 PackPixelsParams *paramsOut,
2056 GLuint *skipBytesOut);
2057
2058 angle::Result readPixelsForGetImage(ContextVk *contextVk,
2059 const gl::PixelPackState &packState,
2060 gl::Buffer *packBuffer,
2061 gl::LevelIndex levelGL,
2062 uint32_t layer,
2063 uint32_t layerCount,
2064 GLenum format,
2065 GLenum type,
2066 void *pixels);
2067
2068 angle::Result readPixelsForCompressedGetImage(ContextVk *contextVk,
2069 const gl::PixelPackState &packState,
2070 gl::Buffer *packBuffer,
2071 gl::LevelIndex levelGL,
2072 uint32_t layer,
2073 uint32_t layerCount,
2074 void *pixels);
2075
2076 angle::Result readPixels(ContextVk *contextVk,
2077 const gl::Rectangle &area,
2078 const PackPixelsParams &packPixelsParams,
2079 VkImageAspectFlagBits copyAspectFlags,
2080 gl::LevelIndex levelGL,
2081 uint32_t layer,
2082 void *pixels);
2083
2084 angle::Result CalculateBufferInfo(ContextVk *contextVk,
2085 const gl::Extents &glExtents,
2086 const gl::InternalFormat &formatInfo,
2087 const gl::PixelUnpackState &unpack,
2088 GLenum type,
2089 bool is3D,
2090 GLuint *inputRowPitch,
2091 GLuint *inputDepthPitch,
2092 GLuint *inputSkipBytes);
2093
2094 // Mark a given subresource as written to. The subresource is identified by [levelStart,
2095 // levelStart + levelCount) and [layerStart, layerStart + layerCount).
2096 void onWrite(gl::LevelIndex levelStart,
2097 uint32_t levelCount,
2098 uint32_t layerStart,
2099 uint32_t layerCount,
2100 VkImageAspectFlags aspectFlags);
hasImmutableSampler()2101 bool hasImmutableSampler() const { return mYcbcrConversionDesc.valid(); }
getExternalFormat()2102 uint64_t getExternalFormat() const
2103 {
2104 return mYcbcrConversionDesc.mIsExternalFormat ? mYcbcrConversionDesc.mExternalOrVkFormat
2105 : 0;
2106 }
getYcbcrConversionDesc()2107 const YcbcrConversionDesc &getYcbcrConversionDesc() const { return mYcbcrConversionDesc; }
updateYcbcrConversionDesc(RendererVk * rendererVk,uint64_t externalFormat,VkSamplerYcbcrModelConversion conversionModel,VkSamplerYcbcrRange colorRange,VkChromaLocation xChromaOffset,VkChromaLocation yChromaOffset,VkFilter chromaFilter,VkComponentMapping components,angle::FormatID intendedFormatID)2108 void updateYcbcrConversionDesc(RendererVk *rendererVk,
2109 uint64_t externalFormat,
2110 VkSamplerYcbcrModelConversion conversionModel,
2111 VkSamplerYcbcrRange colorRange,
2112 VkChromaLocation xChromaOffset,
2113 VkChromaLocation yChromaOffset,
2114 VkFilter chromaFilter,
2115 VkComponentMapping components,
2116 angle::FormatID intendedFormatID)
2117 {
2118 mYcbcrConversionDesc.update(rendererVk, externalFormat, conversionModel, colorRange,
2119 xChromaOffset, yChromaOffset, chromaFilter, components,
2120 intendedFormatID);
2121 }
2122
2123 // Used by framebuffer and render pass functions to decide loadOps and invalidate/un-invalidate
2124 // render target contents.
2125 bool hasSubresourceDefinedContent(gl::LevelIndex level,
2126 uint32_t layerIndex,
2127 uint32_t layerCount) const;
2128 bool hasSubresourceDefinedStencilContent(gl::LevelIndex level,
2129 uint32_t layerIndex,
2130 uint32_t layerCount) const;
2131 void invalidateSubresourceContent(ContextVk *contextVk,
2132 gl::LevelIndex level,
2133 uint32_t layerIndex,
2134 uint32_t layerCount,
2135 bool *preferToKeepContentsDefinedOut);
2136 void invalidateSubresourceStencilContent(ContextVk *contextVk,
2137 gl::LevelIndex level,
2138 uint32_t layerIndex,
2139 uint32_t layerCount,
2140 bool *preferToKeepContentsDefinedOut);
2141 void restoreSubresourceContent(gl::LevelIndex level, uint32_t layerIndex, uint32_t layerCount);
2142 void restoreSubresourceStencilContent(gl::LevelIndex level,
2143 uint32_t layerIndex,
2144 uint32_t layerCount);
2145 angle::Result reformatStagedBufferUpdates(ContextVk *contextVk,
2146 angle::FormatID srcFormatID,
2147 angle::FormatID dstFormatID);
2148 bool hasStagedImageUpdatesWithMismatchedFormat(gl::LevelIndex levelStart,
2149 gl::LevelIndex levelEnd,
2150 angle::FormatID formatID) const;
2151
2152 private:
2153 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
2154 struct ClearUpdate
2155 {
2156 bool operator==(const ClearUpdate &rhs)
2157 {
2158 return memcmp(this, &rhs, sizeof(ClearUpdate)) == 0;
2159 }
2160 VkImageAspectFlags aspectFlags;
2161 VkClearValue value;
2162 uint32_t levelIndex;
2163 uint32_t layerIndex;
2164 uint32_t layerCount;
2165 // For ClearEmulatedChannelsOnly, mask of which channels to clear.
2166 VkColorComponentFlags colorMaskFlags;
2167 };
2168 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
2169 struct BufferUpdate
2170 {
2171 BufferHelper *bufferHelper;
2172 VkBufferImageCopy copyRegion;
2173 angle::FormatID formatID;
2174 };
2175 struct ImageUpdate
2176 {
2177 VkImageCopy copyRegion;
2178 angle::FormatID formatID;
2179 };
2180
2181 struct SubresourceUpdate : angle::NonCopyable
2182 {
2183 SubresourceUpdate();
2184 ~SubresourceUpdate();
2185 SubresourceUpdate(RefCounted<BufferHelper> *bufferIn,
2186 BufferHelper *bufferHelperIn,
2187 const VkBufferImageCopy ©Region,
2188 angle::FormatID formatID);
2189 SubresourceUpdate(RefCounted<ImageHelper> *imageIn,
2190 const VkImageCopy ©Region,
2191 angle::FormatID formatID);
2192 SubresourceUpdate(VkImageAspectFlags aspectFlags,
2193 const VkClearValue &clearValue,
2194 const gl::ImageIndex &imageIndex);
2195 SubresourceUpdate(VkImageAspectFlags aspectFlags,
2196 const VkClearValue &clearValue,
2197 gl::LevelIndex level,
2198 uint32_t layerIndex,
2199 uint32_t layerCount);
2200 SubresourceUpdate(VkColorComponentFlags colorMaskFlags,
2201 const VkClearColorValue &clearValue,
2202 const gl::ImageIndex &imageIndex);
2203 SubresourceUpdate(SubresourceUpdate &&other);
2204
2205 SubresourceUpdate &operator=(SubresourceUpdate &&other);
2206
2207 void release(RendererVk *renderer);
2208
2209 bool isUpdateToLayers(uint32_t layerIndex, uint32_t layerCount) const;
2210 void getDestSubresource(uint32_t imageLayerCount,
2211 uint32_t *baseLayerOut,
2212 uint32_t *layerCountOut) const;
2213 VkImageAspectFlags getDestAspectFlags() const;
2214
2215 UpdateSource updateSource;
2216 union
2217 {
2218 ClearUpdate clear;
2219 BufferUpdate buffer;
2220 ImageUpdate image;
2221 } data;
2222 union
2223 {
2224 RefCounted<ImageHelper> *image;
2225 RefCounted<BufferHelper> *buffer;
2226 } refCounted;
2227 };
2228
2229 // Up to 8 layers are tracked per level for whether contents are defined, above which the
2230 // contents are considered unconditionally defined. This handles the more likely scenarios of:
2231 //
2232 // - Single layer framebuffer attachments,
2233 // - Cube map framebuffer attachments,
2234 // - Multi-view rendering.
2235 //
2236 // If there arises a need to optimize an application that invalidates layer >= 8, this can
2237 // easily be raised to 32 to 64 bits. Beyond that, an additional hash map can be used to track
2238 // such subresources.
2239 static constexpr uint32_t kMaxContentDefinedLayerCount = 8;
2240 using LevelContentDefinedMask = angle::BitSet8<kMaxContentDefinedLayerCount>;
2241
2242 void deriveExternalImageTiling(const void *createInfoChain);
2243
2244 // Called from flushStagedUpdates, removes updates that are later superseded by another. This
2245 // cannot be done at the time the updates were staged, as the image is not created (and thus the
2246 // extents are not known).
2247 void removeSupersededUpdates(ContextVk *contextVk, gl::TexLevelMask skipLevelsMask);
2248
2249 void initImageMemoryBarrierStruct(VkImageAspectFlags aspectMask,
2250 ImageLayout newLayout,
2251 uint32_t newQueueFamilyIndex,
2252 VkImageMemoryBarrier *imageMemoryBarrier) const;
2253
2254 // Generalized to accept both "primary" and "secondary" command buffers.
2255 template <typename CommandBufferT>
2256 void barrierImpl(Context *context,
2257 VkImageAspectFlags aspectMask,
2258 ImageLayout newLayout,
2259 uint32_t newQueueFamilyIndex,
2260 CommandBufferT *commandBuffer);
2261
2262 // If the image has emulated channels, we clear them once so as not to leave garbage on those
2263 // channels.
2264 VkColorComponentFlags getEmulatedChannelsMask() const;
2265 void stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled, bool isExternalImage);
2266 bool verifyEmulatedClearsAreBeforeOtherUpdates(const std::vector<SubresourceUpdate> &updates);
2267
2268 // Clear either color or depth/stencil based on image format.
2269 void clear(VkImageAspectFlags aspectFlags,
2270 const VkClearValue &value,
2271 LevelIndex mipLevel,
2272 uint32_t baseArrayLayer,
2273 uint32_t layerCount,
2274 OutsideRenderPassCommandBuffer *commandBuffer);
2275
2276 void clearColor(const VkClearColorValue &color,
2277 LevelIndex baseMipLevelVk,
2278 uint32_t levelCount,
2279 uint32_t baseArrayLayer,
2280 uint32_t layerCount,
2281 OutsideRenderPassCommandBuffer *commandBuffer);
2282
2283 void clearDepthStencil(VkImageAspectFlags clearAspectFlags,
2284 const VkClearDepthStencilValue &depthStencil,
2285 LevelIndex baseMipLevelVk,
2286 uint32_t levelCount,
2287 uint32_t baseArrayLayer,
2288 uint32_t layerCount,
2289 OutsideRenderPassCommandBuffer *commandBuffer);
2290
2291 angle::Result clearEmulatedChannels(ContextVk *contextVk,
2292 VkColorComponentFlags colorMaskFlags,
2293 const VkClearValue &value,
2294 LevelIndex mipLevel,
2295 uint32_t baseArrayLayer,
2296 uint32_t layerCount);
2297
2298 angle::Result initializeNonZeroMemory(Context *context,
2299 bool hasProtectedContent,
2300 VkDeviceSize size);
2301
2302 std::vector<SubresourceUpdate> *getLevelUpdates(gl::LevelIndex level);
2303 const std::vector<SubresourceUpdate> *getLevelUpdates(gl::LevelIndex level) const;
2304
2305 void appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
2306 void prependSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
2307 // Whether there are any updates in [start, end).
2308 bool hasStagedUpdatesInLevels(gl::LevelIndex levelStart, gl::LevelIndex levelEnd) const;
2309
2310 // Used only for assertions, these functions verify that
2311 // SubresourceUpdate::refcountedObject::image or buffer references have the correct ref count.
2312 // This is to prevent accidental leaks.
2313 bool validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> *image) const;
2314 bool validateSubresourceUpdateBufferRefConsistent(RefCounted<BufferHelper> *buffer) const;
2315 bool validateSubresourceUpdateRefCountsConsistent() const;
2316
2317 void resetCachedProperties();
2318 void setEntireContentDefined();
2319 void setEntireContentUndefined();
2320 void setContentDefined(LevelIndex levelStart,
2321 uint32_t levelCount,
2322 uint32_t layerStart,
2323 uint32_t layerCount,
2324 VkImageAspectFlags aspectFlags);
2325 void invalidateSubresourceContentImpl(ContextVk *contextVk,
2326 gl::LevelIndex level,
2327 uint32_t layerIndex,
2328 uint32_t layerCount,
2329 VkImageAspectFlagBits aspect,
2330 LevelContentDefinedMask *contentDefinedMask,
2331 bool *preferToKeepContentsDefinedOut);
2332 void restoreSubresourceContentImpl(gl::LevelIndex level,
2333 uint32_t layerIndex,
2334 uint32_t layerCount,
2335 VkImageAspectFlagBits aspect,
2336 LevelContentDefinedMask *contentDefinedMask);
2337
2338 // Use the following functions to access m*ContentDefined to make sure the correct level index
2339 // is used (i.e. vk::LevelIndex and not gl::LevelIndex).
2340 LevelContentDefinedMask &getLevelContentDefined(LevelIndex level);
2341 LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level);
2342 const LevelContentDefinedMask &getLevelContentDefined(LevelIndex level) const;
2343 const LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level) const;
2344
2345 angle::Result initLayerImageViewImpl(Context *context,
2346 gl::TextureType textureType,
2347 VkImageAspectFlags aspectMask,
2348 const gl::SwizzleState &swizzleMap,
2349 ImageView *imageViewOut,
2350 LevelIndex baseMipLevelVk,
2351 uint32_t levelCount,
2352 uint32_t baseArrayLayer,
2353 uint32_t layerCount,
2354 VkFormat imageFormat,
2355 const VkImageViewUsageCreateInfo *imageViewUsageCreateInfo,
2356 const gl::SamplerState *samplerState) const;
2357
2358 bool canCopyWithTransformForReadPixels(const PackPixelsParams &packPixelsParams,
2359 const angle::Format *readFormat);
2360 // Vulkan objects.
2361 Image mImage;
2362 DeviceMemory mDeviceMemory;
2363
2364 // Image properties.
2365 VkImageCreateInfo mVkImageCreateInfo;
2366 VkImageType mImageType;
2367 VkImageTiling mTilingMode;
2368 VkImageCreateFlags mCreateFlags;
2369 VkImageUsageFlags mUsage;
2370 // For Android swapchain images, the Vulkan VkImage must be "rotated". However, most of ANGLE
2371 // uses non-rotated extents (i.e. the way the application views the extents--see "Introduction
2372 // to Android rotation and pre-rotation" in "SurfaceVk.cpp"). Thus, mExtents are non-rotated.
2373 // The rotated extents are also stored along with a bool that indicates if the aspect ratio is
2374 // different between the rotated and non-rotated extents.
2375 VkExtent3D mExtents;
2376 bool mRotatedAspectRatio;
2377 angle::FormatID mIntendedFormatID;
2378 angle::FormatID mActualFormatID;
2379 GLint mSamples;
2380 ImageSerial mImageSerial;
2381
2382 // Current state.
2383 ImageLayout mCurrentLayout;
2384 uint32_t mCurrentQueueFamilyIndex;
2385 // For optimizing transition between different shader readonly layouts
2386 ImageLayout mLastNonShaderReadOnlyLayout;
2387 VkPipelineStageFlags mCurrentShaderReadStageMask;
2388 // Track how it is being used by current open renderpass.
2389 RenderPassUsageFlags mRenderPassUsageFlags;
2390
2391 // For imported images
2392 YcbcrConversionDesc mYcbcrConversionDesc;
2393
2394 // The first level that has been allocated. For mutable textures, this should be same as
2395 // mBaseLevel since we always reallocate VkImage based on mBaseLevel change. But for immutable
2396 // textures, we always allocate from level 0 regardless of mBaseLevel change.
2397 gl::LevelIndex mFirstAllocatedLevel;
2398
2399 // Cached properties.
2400 uint32_t mLayerCount;
2401 uint32_t mLevelCount;
2402
2403 std::vector<std::vector<SubresourceUpdate>> mSubresourceUpdates;
2404
2405 // Optimization for repeated clear with the same value. If this pointer is not null, the entire
2406 // image it has been cleared to the specified clear value. If another clear call is made with
2407 // the exact same clear value, we will detect and skip the clear call.
2408 Optional<ClearUpdate> mCurrentSingleClearValue;
2409
2410 // Track whether each subresource has defined contents. Up to 8 layers are tracked per level,
2411 // above which the contents are considered unconditionally defined.
2412 gl::TexLevelArray<LevelContentDefinedMask> mContentDefined;
2413 gl::TexLevelArray<LevelContentDefinedMask> mStencilContentDefined;
2414
2415 std::vector<vk::GarbageObject> mImageAndViewGarbage;
2416 };
2417
usesImage(const ImageHelper & image)2418 ANGLE_INLINE bool RenderPassCommandBufferHelper::usesImage(const ImageHelper &image) const
2419 {
2420 return mRenderPassUsedImages.contains(image.getImageSerial());
2421 }
2422
2423 // A vector of image views, such as one per level or one per layer.
2424 using ImageViewVector = std::vector<ImageView>;
2425
2426 // A vector of vector of image views. Primary index is layer, secondary index is level.
2427 using LayerLevelImageViewVector = std::vector<ImageViewVector>;
2428
2429 // Address mode for layers: only possible to access either all layers, or up to
2430 // IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS layers. This enum uses 0 for all layers and the rest of
2431 // the values conveniently alias the number of layers.
2432 enum LayerMode
2433 {
2434 All,
2435 _1,
2436 _2,
2437 _3,
2438 _4,
2439 };
2440 static_assert(gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS == 4, "Update LayerMode");
2441
2442 LayerMode GetLayerMode(const vk::ImageHelper &image, uint32_t layerCount);
2443
2444 // Sampler decode mode indicating if an attachment needs to be decoded in linear colorspace or sRGB
2445 enum class SrgbDecodeMode
2446 {
2447 SkipDecode,
2448 SrgbDecode
2449 };
2450
2451 class ImageViewHelper final : angle::NonCopyable
2452 {
2453 public:
2454 ImageViewHelper();
2455 ImageViewHelper(ImageViewHelper &&other);
2456 ~ImageViewHelper();
2457
2458 void init(RendererVk *renderer);
2459 void destroy(VkDevice device);
2460
getLinearReadImageView()2461 const ImageView &getLinearReadImageView() const
2462 {
2463 return getValidReadViewImpl(mPerLevelRangeLinearReadImageViews);
2464 }
getSRGBReadImageView()2465 const ImageView &getSRGBReadImageView() const
2466 {
2467 return getValidReadViewImpl(mPerLevelRangeSRGBReadImageViews);
2468 }
getLinearFetchImageView()2469 const ImageView &getLinearFetchImageView() const
2470 {
2471 return getValidReadViewImpl(mPerLevelRangeLinearFetchImageViews);
2472 }
getSRGBFetchImageView()2473 const ImageView &getSRGBFetchImageView() const
2474 {
2475 return getValidReadViewImpl(mPerLevelRangeSRGBFetchImageViews);
2476 }
getLinearCopyImageView()2477 const ImageView &getLinearCopyImageView() const
2478 {
2479 return getValidReadViewImpl(mPerLevelRangeLinearCopyImageViews);
2480 }
getSRGBCopyImageView()2481 const ImageView &getSRGBCopyImageView() const
2482 {
2483 return getValidReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
2484 }
getStencilReadImageView()2485 const ImageView &getStencilReadImageView() const
2486 {
2487 return getValidReadViewImpl(mPerLevelRangeStencilReadImageViews);
2488 }
2489
getReadImageView()2490 const ImageView &getReadImageView() const
2491 {
2492 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearReadImageViews)
2493 : getReadViewImpl(mPerLevelRangeSRGBReadImageViews);
2494 }
2495
getFetchImageView()2496 const ImageView &getFetchImageView() const
2497 {
2498 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearFetchImageViews)
2499 : getReadViewImpl(mPerLevelRangeSRGBFetchImageViews);
2500 }
2501
getCopyImageView()2502 const ImageView &getCopyImageView() const
2503 {
2504 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearCopyImageViews)
2505 : getReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
2506 }
2507
2508 // Used when initialized RenderTargets.
hasStencilReadImageView()2509 bool hasStencilReadImageView() const
2510 {
2511 return mCurrentBaseMaxLevelHash < mPerLevelRangeStencilReadImageViews.size()
2512 ? mPerLevelRangeStencilReadImageViews[mCurrentBaseMaxLevelHash].valid()
2513 : false;
2514 }
2515
hasFetchImageView()2516 bool hasFetchImageView() const
2517 {
2518 if ((mLinearColorspace &&
2519 mCurrentBaseMaxLevelHash < mPerLevelRangeLinearFetchImageViews.size()) ||
2520 (!mLinearColorspace &&
2521 mCurrentBaseMaxLevelHash < mPerLevelRangeSRGBFetchImageViews.size()))
2522 {
2523 return getFetchImageView().valid();
2524 }
2525 else
2526 {
2527 return false;
2528 }
2529 }
2530
hasCopyImageView()2531 bool hasCopyImageView() const
2532 {
2533 if ((mLinearColorspace &&
2534 mCurrentBaseMaxLevelHash < mPerLevelRangeLinearCopyImageViews.size()) ||
2535 (!mLinearColorspace &&
2536 mCurrentBaseMaxLevelHash < mPerLevelRangeSRGBCopyImageViews.size()))
2537 {
2538 return getCopyImageView().valid();
2539 }
2540 else
2541 {
2542 return false;
2543 }
2544 }
2545
2546 // For applications that frequently switch a texture's max level, and make no other changes to
2547 // the texture, change the currently-used max level, and potentially create new "read views"
2548 // for the new max-level
2549 angle::Result initReadViews(ContextVk *contextVk,
2550 gl::TextureType viewType,
2551 const ImageHelper &image,
2552 const angle::Format &format,
2553 const gl::SwizzleState &formatSwizzle,
2554 const gl::SwizzleState &readSwizzle,
2555 LevelIndex baseLevel,
2556 uint32_t levelCount,
2557 uint32_t baseLayer,
2558 uint32_t layerCount,
2559 bool requiresSRGBViews,
2560 VkImageUsageFlags imageUsageFlags,
2561 const gl::SamplerState &samplerState);
2562
2563 // Creates a storage view with all layers of the level.
2564 angle::Result getLevelStorageImageView(Context *context,
2565 gl::TextureType viewType,
2566 const ImageHelper &image,
2567 LevelIndex levelVk,
2568 uint32_t layer,
2569 VkImageUsageFlags imageUsageFlags,
2570 angle::FormatID formatID,
2571 const ImageView **imageViewOut);
2572
2573 // Creates a storage view with a single layer of the level.
2574 angle::Result getLevelLayerStorageImageView(Context *context,
2575 const ImageHelper &image,
2576 LevelIndex levelVk,
2577 uint32_t layer,
2578 VkImageUsageFlags imageUsageFlags,
2579 angle::FormatID formatID,
2580 const ImageView **imageViewOut);
2581
2582 // Creates a draw view with a range of layers of the level.
2583 angle::Result getLevelDrawImageView(Context *context,
2584 const ImageHelper &image,
2585 LevelIndex levelVk,
2586 uint32_t layer,
2587 uint32_t layerCount,
2588 gl::SrgbWriteControlMode mode,
2589 const ImageView **imageViewOut);
2590
2591 // Creates a draw view with a single layer of the level.
2592 angle::Result getLevelLayerDrawImageView(Context *context,
2593 const ImageHelper &image,
2594 LevelIndex levelVk,
2595 uint32_t layer,
2596 gl::SrgbWriteControlMode mode,
2597 const ImageView **imageViewOut);
2598
2599 // Return unique Serial for an imageView.
2600 ImageOrBufferViewSubresourceSerial getSubresourceSerial(
2601 gl::LevelIndex levelGL,
2602 uint32_t levelCount,
2603 uint32_t layer,
2604 LayerMode layerMode,
2605 SrgbDecodeMode srgbDecodeMode,
2606 gl::SrgbOverride srgbOverrideMode) const;
2607
2608 bool isImageViewGarbageEmpty() const;
2609
2610 void release(RendererVk *renderer, std::vector<vk::GarbageObject> &garbage);
2611
2612 private:
getReadImageView()2613 ImageView &getReadImageView()
2614 {
2615 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearReadImageViews)
2616 : getReadViewImpl(mPerLevelRangeSRGBReadImageViews);
2617 }
getFetchImageView()2618 ImageView &getFetchImageView()
2619 {
2620 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearFetchImageViews)
2621 : getReadViewImpl(mPerLevelRangeSRGBFetchImageViews);
2622 }
getCopyImageView()2623 ImageView &getCopyImageView()
2624 {
2625 return mLinearColorspace ? getReadViewImpl(mPerLevelRangeLinearCopyImageViews)
2626 : getReadViewImpl(mPerLevelRangeSRGBCopyImageViews);
2627 }
2628
2629 // Used by public get*ImageView() methods to do proper assert based on vector size and validity
getValidReadViewImpl(const ImageViewVector & imageViewVector)2630 inline const ImageView &getValidReadViewImpl(const ImageViewVector &imageViewVector) const
2631 {
2632 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size() &&
2633 imageViewVector[mCurrentBaseMaxLevelHash].valid());
2634 return imageViewVector[mCurrentBaseMaxLevelHash];
2635 }
2636
2637 // Used by public get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(const ImageViewVector & imageViewVector)2638 inline const ImageView &getReadViewImpl(const ImageViewVector &imageViewVector) const
2639 {
2640 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size());
2641 return imageViewVector[mCurrentBaseMaxLevelHash];
2642 }
2643
2644 // Used by private get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(ImageViewVector & imageViewVector)2645 inline ImageView &getReadViewImpl(ImageViewVector &imageViewVector)
2646 {
2647 ASSERT(mCurrentBaseMaxLevelHash < imageViewVector.size());
2648 return imageViewVector[mCurrentBaseMaxLevelHash];
2649 }
2650
2651 // Creates views with multiple layers and levels.
2652 angle::Result initReadViewsImpl(ContextVk *contextVk,
2653 gl::TextureType viewType,
2654 const ImageHelper &image,
2655 const angle::Format &format,
2656 const gl::SwizzleState &formatSwizzle,
2657 const gl::SwizzleState &readSwizzle,
2658 LevelIndex baseLevel,
2659 uint32_t levelCount,
2660 uint32_t baseLayer,
2661 uint32_t layerCount,
2662 const gl::SamplerState &samplerState);
2663
2664 // Create SRGB-reinterpreted read views
2665 angle::Result initSRGBReadViewsImpl(ContextVk *contextVk,
2666 gl::TextureType viewType,
2667 const ImageHelper &image,
2668 const angle::Format &format,
2669 const gl::SwizzleState &formatSwizzle,
2670 const gl::SwizzleState &readSwizzle,
2671 LevelIndex baseLevel,
2672 uint32_t levelCount,
2673 uint32_t baseLayer,
2674 uint32_t layerCount,
2675 VkImageUsageFlags imageUsageFlags);
2676
2677 // For applications that frequently switch a texture's base/max level, and make no other changes
2678 // to the texture, keep track of the currently-used base and max levels, and keep one "read
2679 // view" per each combination. The value stored here is base<<4|max, used to look up the view
2680 // in a vector.
2681 static_assert(gl::IMPLEMENTATION_MAX_TEXTURE_LEVELS <= 16,
2682 "Not enough bits in mCurrentBaseMaxLevelHash");
2683 uint8_t mCurrentBaseMaxLevelHash;
2684
2685 bool mLinearColorspace;
2686
2687 // Read views (one per [base, max] level range)
2688 ImageViewVector mPerLevelRangeLinearReadImageViews;
2689 ImageViewVector mPerLevelRangeSRGBReadImageViews;
2690 ImageViewVector mPerLevelRangeLinearFetchImageViews;
2691 ImageViewVector mPerLevelRangeSRGBFetchImageViews;
2692 ImageViewVector mPerLevelRangeLinearCopyImageViews;
2693 ImageViewVector mPerLevelRangeSRGBCopyImageViews;
2694 ImageViewVector mPerLevelRangeStencilReadImageViews;
2695
2696 // Draw views
2697 LayerLevelImageViewVector mLayerLevelDrawImageViews;
2698 LayerLevelImageViewVector mLayerLevelDrawImageViewsLinear;
2699 angle::HashMap<ImageSubresourceRange, std::unique_ptr<ImageView>> mSubresourceDrawImageViews;
2700
2701 // Storage views
2702 ImageViewVector mLevelStorageImageViews;
2703 LayerLevelImageViewVector mLayerLevelStorageImageViews;
2704
2705 // Serial for the image view set. getSubresourceSerial combines it with subresource info.
2706 ImageOrBufferViewSerial mImageViewSerial;
2707 };
2708
2709 ImageSubresourceRange MakeImageSubresourceReadRange(gl::LevelIndex level,
2710 uint32_t levelCount,
2711 uint32_t layer,
2712 LayerMode layerMode,
2713 SrgbDecodeMode srgbDecodeMode,
2714 gl::SrgbOverride srgbOverrideMode);
2715 ImageSubresourceRange MakeImageSubresourceDrawRange(gl::LevelIndex level,
2716 uint32_t layer,
2717 LayerMode layerMode,
2718 gl::SrgbWriteControlMode srgbWriteControlMode);
2719
2720 class BufferViewHelper final : public Resource
2721 {
2722 public:
2723 BufferViewHelper();
2724 BufferViewHelper(BufferViewHelper &&other);
2725 ~BufferViewHelper() override;
2726
2727 void init(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
2728 void release(ContextVk *contextVk);
2729 void destroy(VkDevice device);
2730
2731 angle::Result getView(Context *context,
2732 const BufferHelper &buffer,
2733 VkDeviceSize bufferOffset,
2734 const Format &format,
2735 const BufferView **viewOut);
2736
2737 // Return unique Serial for a bufferView.
2738 ImageOrBufferViewSubresourceSerial getSerial() const;
2739
2740 private:
2741 // To support format reinterpretation, additional views for formats other than the one specified
2742 // to glTexBuffer may need to be created. On draw/dispatch, the format layout qualifier of the
2743 // imageBuffer is used (if provided) to create a potentially different view of the buffer.
2744 angle::HashMap<VkFormat, BufferView> mViews;
2745
2746 // View properties:
2747 //
2748 // Offset and size specified to glTexBufferRange
2749 VkDeviceSize mOffset;
2750 VkDeviceSize mSize;
2751
2752 // Serial for the buffer view. An ImageOrBufferViewSerial is used for texture buffers so that
2753 // they fit together with the other texture types.
2754 ImageOrBufferViewSerial mViewSerial;
2755 };
2756
2757 class FramebufferHelper : public Resource
2758 {
2759 public:
2760 FramebufferHelper();
2761 ~FramebufferHelper() override;
2762
2763 FramebufferHelper(FramebufferHelper &&other);
2764 FramebufferHelper &operator=(FramebufferHelper &&other);
2765
2766 angle::Result init(ContextVk *contextVk, const VkFramebufferCreateInfo &createInfo);
2767 void release(ContextVk *contextVk);
2768
valid()2769 bool valid() { return mFramebuffer.valid(); }
2770
getFramebuffer()2771 const Framebuffer &getFramebuffer() const
2772 {
2773 ASSERT(mFramebuffer.valid());
2774 return mFramebuffer;
2775 }
2776
getFramebuffer()2777 Framebuffer &getFramebuffer()
2778 {
2779 ASSERT(mFramebuffer.valid());
2780 return mFramebuffer;
2781 }
2782
2783 private:
2784 // Vulkan object.
2785 Framebuffer mFramebuffer;
2786 };
2787
2788 class ShaderProgramHelper : angle::NonCopyable
2789 {
2790 public:
2791 ShaderProgramHelper();
2792 ~ShaderProgramHelper();
2793
2794 bool valid(const gl::ShaderType shaderType) const;
2795 void destroy(RendererVk *rendererVk);
2796 void release(ContextVk *contextVk);
2797
getShader(gl::ShaderType shaderType)2798 ShaderAndSerial &getShader(gl::ShaderType shaderType) { return mShaders[shaderType].get(); }
2799
2800 void setShader(gl::ShaderType shaderType, RefCounted<ShaderAndSerial> *shader);
2801 void setSpecializationConstant(sh::vk::SpecializationConstantId id, uint32_t value);
2802
2803 // For getting a Pipeline and from the pipeline cache.
getGraphicsPipeline(ContextVk * contextVk,RenderPassCache * renderPassCache,const PipelineCache & pipelineCache,const PipelineLayout & pipelineLayout,const GraphicsPipelineDesc & pipelineDesc,const gl::AttributesMask & activeAttribLocationsMask,const gl::ComponentTypeMask & programAttribsTypeMask,const gl::DrawBufferMask & missingOutputsMask,const GraphicsPipelineDesc ** descPtrOut,PipelineHelper ** pipelineOut)2804 ANGLE_INLINE angle::Result getGraphicsPipeline(
2805 ContextVk *contextVk,
2806 RenderPassCache *renderPassCache,
2807 const PipelineCache &pipelineCache,
2808 const PipelineLayout &pipelineLayout,
2809 const GraphicsPipelineDesc &pipelineDesc,
2810 const gl::AttributesMask &activeAttribLocationsMask,
2811 const gl::ComponentTypeMask &programAttribsTypeMask,
2812 const gl::DrawBufferMask &missingOutputsMask,
2813 const GraphicsPipelineDesc **descPtrOut,
2814 PipelineHelper **pipelineOut)
2815 {
2816 // Pull in a compatible RenderPass.
2817 RenderPass *compatibleRenderPass = nullptr;
2818 ANGLE_TRY(renderPassCache->getCompatibleRenderPass(
2819 contextVk, pipelineDesc.getRenderPassDesc(), &compatibleRenderPass));
2820
2821 return mGraphicsPipelines.getPipeline(
2822 contextVk, pipelineCache, *compatibleRenderPass, pipelineLayout,
2823 activeAttribLocationsMask, programAttribsTypeMask, missingOutputsMask, mShaders,
2824 mSpecializationConstants, pipelineDesc, descPtrOut, pipelineOut);
2825 }
2826
2827 angle::Result getComputePipeline(Context *context,
2828 const PipelineLayout &pipelineLayout,
2829 PipelineHelper **pipelineOut);
2830
2831 private:
2832 ShaderAndSerialMap mShaders;
2833 GraphicsPipelineCache mGraphicsPipelines;
2834
2835 // We should probably use PipelineHelper here so we can remove PipelineAndSerial.
2836 PipelineHelper mComputePipeline;
2837
2838 // Specialization constants, currently only used by the graphics queue.
2839 SpecializationConstants mSpecializationConstants;
2840 };
2841
2842 // Tracks current handle allocation counts in the back-end. Useful for debugging and profiling.
2843 // Note: not all handle types are currently implemented.
2844 class ActiveHandleCounter final : angle::NonCopyable
2845 {
2846 public:
2847 ActiveHandleCounter();
2848 ~ActiveHandleCounter();
2849
onAllocate(HandleType handleType)2850 void onAllocate(HandleType handleType)
2851 {
2852 mActiveCounts[handleType]++;
2853 mAllocatedCounts[handleType]++;
2854 }
2855
onDeallocate(HandleType handleType)2856 void onDeallocate(HandleType handleType) { mActiveCounts[handleType]--; }
2857
getActive(HandleType handleType)2858 uint32_t getActive(HandleType handleType) const { return mActiveCounts[handleType]; }
getAllocated(HandleType handleType)2859 uint32_t getAllocated(HandleType handleType) const { return mAllocatedCounts[handleType]; }
2860
2861 private:
2862 angle::PackedEnumMap<HandleType, uint32_t> mActiveCounts;
2863 angle::PackedEnumMap<HandleType, uint32_t> mAllocatedCounts;
2864 };
2865
2866 // Sometimes ANGLE issues a command internally, such as copies, draws and dispatches that do not
2867 // directly correspond to the application draw/dispatch call. Before the command is recorded in the
2868 // command buffer, the render pass may need to be broken and/or appropriate barriers may need to be
2869 // inserted. The following struct aggregates all resources that such internal commands need.
2870 struct CommandBufferBufferAccess
2871 {
2872 BufferHelper *buffer;
2873 VkAccessFlags accessType;
2874 PipelineStage stage;
2875 };
2876 struct CommandBufferImageAccess
2877 {
2878 ImageHelper *image;
2879 VkImageAspectFlags aspectFlags;
2880 ImageLayout imageLayout;
2881 };
2882 struct CommandBufferImageWrite
2883 {
2884 CommandBufferImageAccess access;
2885 gl::LevelIndex levelStart;
2886 uint32_t levelCount;
2887 uint32_t layerStart;
2888 uint32_t layerCount;
2889 };
2890 class CommandBufferAccess : angle::NonCopyable
2891 {
2892 public:
2893 CommandBufferAccess();
2894 ~CommandBufferAccess();
2895
onBufferTransferRead(BufferHelper * buffer)2896 void onBufferTransferRead(BufferHelper *buffer)
2897 {
2898 onBufferRead(VK_ACCESS_TRANSFER_READ_BIT, PipelineStage::Transfer, buffer);
2899 }
onBufferTransferWrite(BufferHelper * buffer)2900 void onBufferTransferWrite(BufferHelper *buffer)
2901 {
2902 onBufferWrite(VK_ACCESS_TRANSFER_WRITE_BIT, PipelineStage::Transfer, buffer);
2903 }
onBufferSelfCopy(BufferHelper * buffer)2904 void onBufferSelfCopy(BufferHelper *buffer)
2905 {
2906 onBufferWrite(VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
2907 PipelineStage::Transfer, buffer);
2908 }
onBufferComputeShaderRead(BufferHelper * buffer)2909 void onBufferComputeShaderRead(BufferHelper *buffer)
2910 {
2911 onBufferRead(VK_ACCESS_SHADER_READ_BIT, PipelineStage::ComputeShader, buffer);
2912 }
onBufferComputeShaderWrite(BufferHelper * buffer)2913 void onBufferComputeShaderWrite(BufferHelper *buffer)
2914 {
2915 onBufferWrite(VK_ACCESS_SHADER_WRITE_BIT, PipelineStage::ComputeShader, buffer);
2916 }
2917
onImageTransferRead(VkImageAspectFlags aspectFlags,ImageHelper * image)2918 void onImageTransferRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
2919 {
2920 onImageRead(aspectFlags, ImageLayout::TransferSrc, image);
2921 }
onImageTransferWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)2922 void onImageTransferWrite(gl::LevelIndex levelStart,
2923 uint32_t levelCount,
2924 uint32_t layerStart,
2925 uint32_t layerCount,
2926 VkImageAspectFlags aspectFlags,
2927 ImageHelper *image)
2928 {
2929 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
2930 ImageLayout::TransferDst, image);
2931 }
onImageComputeShaderRead(VkImageAspectFlags aspectFlags,ImageHelper * image)2932 void onImageComputeShaderRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
2933 {
2934 onImageRead(aspectFlags, ImageLayout::ComputeShaderReadOnly, image);
2935 }
onImageComputeShaderWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)2936 void onImageComputeShaderWrite(gl::LevelIndex levelStart,
2937 uint32_t levelCount,
2938 uint32_t layerStart,
2939 uint32_t layerCount,
2940 VkImageAspectFlags aspectFlags,
2941 ImageHelper *image)
2942 {
2943 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
2944 ImageLayout::ComputeShaderWrite, image);
2945 }
2946
2947 // The limits reflect the current maximum concurrent usage of each resource type. ASSERTs will
2948 // fire if this limit is exceeded in the future.
2949 using ReadBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
2950 using WriteBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
2951 using ReadImages = angle::FixedVector<CommandBufferImageAccess, 2>;
2952 using WriteImages = angle::FixedVector<CommandBufferImageWrite, 1>;
2953
getReadBuffers()2954 const ReadBuffers &getReadBuffers() const { return mReadBuffers; }
getWriteBuffers()2955 const WriteBuffers &getWriteBuffers() const { return mWriteBuffers; }
getReadImages()2956 const ReadImages &getReadImages() const { return mReadImages; }
getWriteImages()2957 const WriteImages &getWriteImages() const { return mWriteImages; }
2958
2959 private:
2960 void onBufferRead(VkAccessFlags readAccessType, PipelineStage readStage, BufferHelper *buffer);
2961 void onBufferWrite(VkAccessFlags writeAccessType,
2962 PipelineStage writeStage,
2963 BufferHelper *buffer);
2964
2965 void onImageRead(VkImageAspectFlags aspectFlags, ImageLayout imageLayout, ImageHelper *image);
2966 void onImageWrite(gl::LevelIndex levelStart,
2967 uint32_t levelCount,
2968 uint32_t layerStart,
2969 uint32_t layerCount,
2970 VkImageAspectFlags aspectFlags,
2971 ImageLayout imageLayout,
2972 ImageHelper *image);
2973
2974 ReadBuffers mReadBuffers;
2975 WriteBuffers mWriteBuffers;
2976 ReadImages mReadImages;
2977 WriteImages mWriteImages;
2978 };
2979
2980 // This class' responsibility is to create index buffers needed to support line loops in Vulkan.
2981 // In the setup phase of drawing, the createIndexBuffer method should be called with the
2982 // current draw call parameters. If an element array buffer is bound for an indexed draw, use
2983 // createIndexBufferFromElementArrayBuffer.
2984 //
2985 // If the user wants to draw a loop between [v1, v2, v3], we will create an indexed buffer with
2986 // these indexes: [0, 1, 2, 3, 0] to emulate the loop.
2987 class LineLoopHelper final : angle::NonCopyable
2988 {
2989 public:
2990 LineLoopHelper(RendererVk *renderer);
2991 ~LineLoopHelper();
2992
2993 angle::Result getIndexBufferForDrawArrays(ContextVk *contextVk,
2994 uint32_t clampedVertexCount,
2995 GLint firstVertex,
2996 BufferHelper **bufferOut);
2997
2998 angle::Result getIndexBufferForElementArrayBuffer(ContextVk *contextVk,
2999 BufferVk *elementArrayBufferVk,
3000 gl::DrawElementsType glIndexType,
3001 int indexCount,
3002 intptr_t elementArrayOffset,
3003 BufferHelper **bufferOut,
3004 uint32_t *indexCountOut);
3005
3006 angle::Result streamIndices(ContextVk *contextVk,
3007 gl::DrawElementsType glIndexType,
3008 GLsizei indexCount,
3009 const uint8_t *srcPtr,
3010 BufferHelper **bufferOut,
3011 uint32_t *indexCountOut);
3012
3013 angle::Result streamIndicesIndirect(ContextVk *contextVk,
3014 gl::DrawElementsType glIndexType,
3015 BufferHelper *indexBuffer,
3016 BufferHelper *indirectBuffer,
3017 VkDeviceSize indirectBufferOffset,
3018 BufferHelper **indexBufferOut,
3019 BufferHelper **indirectBufferOut);
3020
3021 angle::Result streamArrayIndirect(ContextVk *contextVk,
3022 size_t vertexCount,
3023 BufferHelper *arrayIndirectBuffer,
3024 VkDeviceSize arrayIndirectBufferOffset,
3025 BufferHelper **indexBufferOut,
3026 BufferHelper **indexIndirectBufferOut);
3027
3028 void release(ContextVk *contextVk);
3029 void destroy(RendererVk *renderer);
3030
3031 static void Draw(uint32_t count, uint32_t baseVertex, RenderPassCommandBuffer *commandBuffer);
3032
3033 private:
3034 BufferHelper mDynamicIndexBuffer;
3035 BufferHelper mDynamicIndirectBuffer;
3036 };
3037 } // namespace vk
3038 } // namespace rx
3039
3040 #endif // LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
3041