1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_helpers:
7 // Helper utility classes that manage Vulkan resources.
8
9 #ifndef LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
10 #define LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
11
12 #include "common/MemoryBuffer.h"
13 #include "libANGLE/renderer/vulkan/vk_cache_utils.h"
14 #include "libANGLE/renderer/vulkan/vk_format_utils.h"
15
16 #include <functional>
17
18 namespace gl
19 {
20 class ImageIndex;
21 } // namespace gl
22
23 namespace rx
24 {
25 namespace vk
26 {
27 constexpr VkBufferUsageFlags kVertexBufferUsageFlags =
28 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
29 constexpr VkBufferUsageFlags kIndexBufferUsageFlags =
30 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
31 constexpr VkBufferUsageFlags kIndirectBufferUsageFlags =
32 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
33 constexpr size_t kVertexBufferAlignment = 4;
34 constexpr size_t kIndexBufferAlignment = 4;
35 constexpr size_t kIndirectBufferAlignment = 4;
36
37 constexpr VkBufferUsageFlags kStagingBufferFlags =
38 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
39 constexpr size_t kStagingBufferSize = 1024 * 16;
40
41 constexpr VkImageCreateFlags kVkImageCreateFlagsNone = 0;
42
43 using StagingBufferOffsetArray = std::array<VkDeviceSize, 2>;
44
45 struct TextureUnit final
46 {
47 TextureVk *texture;
48 const SamplerHelper *sampler;
49 GLenum srgbDecode;
50 };
51
52 // A dynamic buffer is conceptually an infinitely long buffer. Each time you write to the buffer,
53 // you will always write to a previously unused portion. After a series of writes, you must flush
54 // the buffer data to the device. Buffer lifetime currently assumes that each new allocation will
55 // last as long or longer than each prior allocation.
56 //
57 // Dynamic buffers are used to implement a variety of data streaming operations in Vulkan, such
58 // as for immediate vertex array and element array data, uniform updates, and other dynamic data.
59 //
60 // Internally dynamic buffers keep a collection of VkBuffers. When we write past the end of a
61 // currently active VkBuffer we keep it until it is no longer in use. We then mark it available
62 // for future allocations in a free list.
63 class BufferHelper;
64 using BufferHelperPointerVector = std::vector<std::unique_ptr<BufferHelper>>;
65
66 enum class DynamicBufferPolicy
67 {
68 // Used where future allocations from the dynamic buffer are unlikely, so it's best to free the
69 // memory when the allocated buffers are no longer in use.
70 OneShotUse,
71 // Used where multiple small allocations are made every frame, so it's worth keeping the free
72 // buffers around to avoid release/reallocation.
73 FrequentSmallAllocations,
74 // Used where bursts of allocation happen occasionally, but the steady state may make
75 // allocations every now and then. In that case, a limited number of buffers are retained.
76 SporadicTextureUpload,
77 };
78
79 class DynamicBuffer : angle::NonCopyable
80 {
81 public:
82 DynamicBuffer();
83 DynamicBuffer(DynamicBuffer &&other);
84 ~DynamicBuffer();
85
86 // Init is called after the buffer creation so that the alignment can be specified later.
87 void init(RendererVk *renderer,
88 VkBufferUsageFlags usage,
89 size_t alignment,
90 size_t initialSize,
91 bool hostVisible,
92 DynamicBufferPolicy policy);
93
94 // Init that gives the ability to pass in specified memory property flags for the buffer.
95 void initWithFlags(RendererVk *renderer,
96 VkBufferUsageFlags usage,
97 size_t alignment,
98 size_t initialSize,
99 VkMemoryPropertyFlags memoryProperty,
100 DynamicBufferPolicy policy);
101
102 // This call will allocate a new region at the end of the current buffer. If it can't find
103 // enough space in the current buffer, it returns false. This gives caller a chance to deal with
104 // buffer switch that may occur with allocate call.
105 bool allocateFromCurrentBuffer(size_t sizeInBytes, uint8_t **ptrOut, VkDeviceSize *offsetOut);
106
107 // This call will allocate a new region at the end of the buffer. It internally may trigger
108 // a new buffer to be created (which is returned in the optional parameter
109 // `newBufferAllocatedOut`). The new region will be in the returned buffer at given offset. If
110 // a memory pointer is given, the buffer will be automatically map()ed.
111 angle::Result allocateWithAlignment(ContextVk *contextVk,
112 size_t sizeInBytes,
113 size_t alignment,
114 uint8_t **ptrOut,
115 VkBuffer *bufferOut,
116 VkDeviceSize *offsetOut,
117 bool *newBufferAllocatedOut);
118
119 // Allocate with default alignment
allocate(ContextVk * contextVk,size_t sizeInBytes,uint8_t ** ptrOut,VkBuffer * bufferOut,VkDeviceSize * offsetOut,bool * newBufferAllocatedOut)120 angle::Result allocate(ContextVk *contextVk,
121 size_t sizeInBytes,
122 uint8_t **ptrOut,
123 VkBuffer *bufferOut,
124 VkDeviceSize *offsetOut,
125 bool *newBufferAllocatedOut)
126 {
127 return allocateWithAlignment(contextVk, sizeInBytes, mAlignment, ptrOut, bufferOut,
128 offsetOut, newBufferAllocatedOut);
129 }
130
131 // After a sequence of writes, call flush to ensure the data is visible to the device.
132 angle::Result flush(ContextVk *contextVk);
133
134 // After a sequence of writes, call invalidate to ensure the data is visible to the host.
135 angle::Result invalidate(ContextVk *contextVk);
136
137 // This releases resources when they might currently be in use.
138 void release(RendererVk *renderer);
139
140 // This releases all the buffers that have been allocated since this was last called.
141 void releaseInFlightBuffers(ContextVk *contextVk);
142
143 // This adds inflight buffers to the context's mResourceUseList and then releases them
144 void releaseInFlightBuffersToResourceUseList(ContextVk *contextVk);
145
146 // This frees resources immediately.
147 void destroy(RendererVk *renderer);
148
getCurrentBuffer()149 BufferHelper *getCurrentBuffer() const { return mBuffer.get(); }
150
151 // **Accumulate** an alignment requirement. A dynamic buffer is used as the staging buffer for
152 // image uploads, which can contain updates to unrelated mips, possibly with different formats.
153 // The staging buffer should have an alignment that can satisfy all those formats, i.e. it's the
154 // lcm of all alignments set in its lifetime.
155 void requireAlignment(RendererVk *renderer, size_t alignment);
getAlignment()156 size_t getAlignment() const { return mAlignment; }
157
158 // For testing only!
159 void setMinimumSizeForTesting(size_t minSize);
160
isCoherent()161 bool isCoherent() const
162 {
163 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
164 }
165
valid()166 bool valid() const { return mSize != 0; }
167
168 private:
169 void reset();
170 angle::Result allocateNewBuffer(ContextVk *contextVk);
171
172 VkBufferUsageFlags mUsage;
173 bool mHostVisible;
174 DynamicBufferPolicy mPolicy;
175 size_t mInitialSize;
176 std::unique_ptr<BufferHelper> mBuffer;
177 uint32_t mNextAllocationOffset;
178 uint32_t mLastFlushOrInvalidateOffset;
179 size_t mSize;
180 size_t mAlignment;
181 VkMemoryPropertyFlags mMemoryPropertyFlags;
182
183 BufferHelperPointerVector mInFlightBuffers;
184 BufferHelperPointerVector mBufferFreeList;
185 };
186
187 // Based off of the DynamicBuffer class, DynamicShadowBuffer provides
188 // a similar conceptually infinitely long buffer that will only be written
189 // to and read by the CPU. This can be used to provide CPU cached copies of
190 // GPU-read only buffers. The value add here is that when an app requests
191 // CPU access to a buffer we can fullfil such a request in O(1) time since
192 // we don't need to wait for GPU to be done with in-flight commands.
193 //
194 // The hidden cost here is that any operation that updates a buffer, either
195 // through a buffer sub data update or a buffer-to-buffer copy will have an
196 // additional overhead of having to update its CPU only buffer
197 class DynamicShadowBuffer : public angle::NonCopyable
198 {
199 public:
200 DynamicShadowBuffer();
201 DynamicShadowBuffer(DynamicShadowBuffer &&other);
202 ~DynamicShadowBuffer();
203
204 // Initialize the DynamicShadowBuffer.
205 void init(size_t initialSize);
206
207 // Returns whether this DynamicShadowBuffer is active
valid()208 ANGLE_INLINE bool valid() { return (mSize != 0); }
209
210 // This call will actually allocate a new CPU only memory from the heap.
211 // The size can be different than the one specified during `init`.
212 angle::Result allocate(size_t sizeInBytes);
213
updateData(const uint8_t * data,size_t size,size_t offset)214 ANGLE_INLINE void updateData(const uint8_t *data, size_t size, size_t offset)
215 {
216 ASSERT(!mBuffer.empty());
217 // Memcopy data into the buffer
218 memcpy((mBuffer.data() + offset), data, size);
219 }
220
221 // Map the CPU only buffer and return the pointer. We map the entire buffer for now.
map(size_t offset,uint8_t ** mapPtr)222 ANGLE_INLINE void map(size_t offset, uint8_t **mapPtr)
223 {
224 ASSERT(mapPtr);
225 ASSERT(!mBuffer.empty());
226 *mapPtr = mBuffer.data() + offset;
227 }
228
229 // Unmap the CPU only buffer, NOOP for now
unmap()230 ANGLE_INLINE void unmap() {}
231
232 // This releases resources when they might currently be in use.
233 void release();
234
235 // This frees resources immediately.
236 void destroy(VkDevice device);
237
getCurrentBuffer()238 ANGLE_INLINE uint8_t *getCurrentBuffer()
239 {
240 ASSERT(!mBuffer.empty());
241 return mBuffer.data();
242 }
243
getCurrentBuffer()244 ANGLE_INLINE const uint8_t *getCurrentBuffer() const
245 {
246 ASSERT(!mBuffer.empty());
247 return mBuffer.data();
248 }
249
250 private:
251 void reset();
252
253 size_t mInitialSize;
254 size_t mSize;
255 angle::MemoryBuffer mBuffer;
256 };
257
258 // Uses DescriptorPool to allocate descriptor sets as needed. If a descriptor pool becomes full, we
259 // allocate new pools internally as needed. RendererVk takes care of the lifetime of the discarded
260 // pools. Note that we used a fixed layout for descriptor pools in ANGLE.
261
262 // Shared handle to a descriptor pool. Each helper is allocated from the dynamic descriptor pool.
263 // Can be used to share descriptor pools between multiple ProgramVks and the ContextVk.
264 class DescriptorPoolHelper : public Resource
265 {
266 public:
267 DescriptorPoolHelper();
268 ~DescriptorPoolHelper() override;
269
valid()270 bool valid() { return mDescriptorPool.valid(); }
271
272 bool hasCapacity(uint32_t descriptorSetCount) const;
273 angle::Result init(ContextVk *contextVk,
274 const std::vector<VkDescriptorPoolSize> &poolSizesIn,
275 uint32_t maxSets);
276 void destroy(VkDevice device);
277 void release(ContextVk *contextVk);
278
279 angle::Result allocateSets(ContextVk *contextVk,
280 const VkDescriptorSetLayout *descriptorSetLayout,
281 uint32_t descriptorSetCount,
282 VkDescriptorSet *descriptorSetsOut);
283
284 private:
285 uint32_t mFreeDescriptorSets;
286 DescriptorPool mDescriptorPool;
287 };
288
289 using RefCountedDescriptorPoolHelper = RefCounted<DescriptorPoolHelper>;
290 using RefCountedDescriptorPoolBinding = BindingPointer<DescriptorPoolHelper>;
291
292 class DynamicDescriptorPool final : angle::NonCopyable
293 {
294 public:
295 DynamicDescriptorPool();
296 ~DynamicDescriptorPool();
297
298 // The DynamicDescriptorPool only handles one pool size at this time.
299 // Note that setSizes[i].descriptorCount is expected to be the number of descriptors in
300 // an individual set. The pool size will be calculated accordingly.
301 angle::Result init(ContextVk *contextVk,
302 const VkDescriptorPoolSize *setSizes,
303 size_t setSizeCount,
304 VkDescriptorSetLayout descriptorSetLayout);
305 void destroy(VkDevice device);
306 void release(ContextVk *contextVk);
307
308 // We use the descriptor type to help count the number of free sets.
309 // By convention, sets are indexed according to the constants in vk_cache_utils.h.
allocateSets(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,RefCountedDescriptorPoolBinding * bindingOut,VkDescriptorSet * descriptorSetsOut)310 ANGLE_INLINE angle::Result allocateSets(ContextVk *contextVk,
311 const VkDescriptorSetLayout *descriptorSetLayout,
312 uint32_t descriptorSetCount,
313 RefCountedDescriptorPoolBinding *bindingOut,
314 VkDescriptorSet *descriptorSetsOut)
315 {
316 bool ignoreNewPoolAllocated;
317 return allocateSetsAndGetInfo(contextVk, descriptorSetLayout, descriptorSetCount,
318 bindingOut, descriptorSetsOut, &ignoreNewPoolAllocated);
319 }
320
321 // We use the descriptor type to help count the number of free sets.
322 // By convention, sets are indexed according to the constants in vk_cache_utils.h.
323 angle::Result allocateSetsAndGetInfo(ContextVk *contextVk,
324 const VkDescriptorSetLayout *descriptorSetLayout,
325 uint32_t descriptorSetCount,
326 RefCountedDescriptorPoolBinding *bindingOut,
327 VkDescriptorSet *descriptorSetsOut,
328 bool *newPoolAllocatedOut);
329
330 // For testing only!
331 static uint32_t GetMaxSetsPerPoolForTesting();
332 static void SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
333 static uint32_t GetMaxSetsPerPoolMultiplierForTesting();
334 static void SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPool);
335
336 private:
337 angle::Result allocateNewPool(ContextVk *contextVk);
338
339 static constexpr uint32_t kMaxSetsPerPoolMax = 512;
340 static uint32_t mMaxSetsPerPool;
341 static uint32_t mMaxSetsPerPoolMultiplier;
342 size_t mCurrentPoolIndex;
343 std::vector<RefCountedDescriptorPoolHelper *> mDescriptorPools;
344 std::vector<VkDescriptorPoolSize> mPoolSizes;
345 // This cached handle is used for verifying the layout being used to allocate descriptor sets
346 // from the pool matches the layout that the pool was created for, to ensure that the free
347 // descriptor count is accurate and new pools are created appropriately.
348 VkDescriptorSetLayout mCachedDescriptorSetLayout;
349 };
350
351 template <typename Pool>
352 class DynamicallyGrowingPool : angle::NonCopyable
353 {
354 public:
355 DynamicallyGrowingPool();
356 virtual ~DynamicallyGrowingPool();
357
isValid()358 bool isValid() { return mPoolSize > 0; }
359
360 protected:
361 angle::Result initEntryPool(Context *contextVk, uint32_t poolSize);
362
363 virtual void destroyPoolImpl(VkDevice device, Pool &poolToDestroy) = 0;
364 void destroyEntryPool(VkDevice device);
365
366 // Checks to see if any pool is already free, in which case it sets it as current pool and
367 // returns true.
368 bool findFreeEntryPool(ContextVk *contextVk);
369
370 // Allocates a new entry and initializes it with the given pool.
371 angle::Result allocateNewEntryPool(ContextVk *contextVk, Pool &&pool);
372
373 // Called by the implementation whenever an entry is freed.
374 void onEntryFreed(ContextVk *contextVk, size_t poolIndex);
375
getPool(size_t index)376 const Pool &getPool(size_t index) const
377 {
378 return const_cast<DynamicallyGrowingPool *>(this)->getPool(index);
379 }
380
getPool(size_t index)381 Pool &getPool(size_t index)
382 {
383 ASSERT(index < mPools.size());
384 return mPools[index].pool;
385 }
386
getPoolSize()387 uint32_t getPoolSize() const { return mPoolSize; }
388
389 virtual angle::Result allocatePoolImpl(ContextVk *contextVk,
390 Pool &poolToAllocate,
391 uint32_t entriesToAllocate) = 0;
392 angle::Result allocatePoolEntries(ContextVk *contextVk,
393 uint32_t entryCount,
394 uint32_t *poolIndexOut,
395 uint32_t *currentEntryOut);
396
397 private:
398 // The pool size, to know when a pool is completely freed.
399 uint32_t mPoolSize;
400
401 struct PoolResource : public Resource
402 {
403 PoolResource(Pool &&poolIn, uint32_t freedCountIn);
404 PoolResource(PoolResource &&other);
405
406 Pool pool;
407
408 // A count corresponding to each pool indicating how many of its allocated entries
409 // have been freed. Once that value reaches mPoolSize for each pool, that pool is considered
410 // free and reusable. While keeping a bitset would allow allocation of each index, the
411 // slight runtime overhead of finding free indices is not worth the slight memory overhead
412 // of creating new pools when unnecessary.
413 uint32_t freedCount;
414 };
415 std::vector<PoolResource> mPools;
416
417 // Index into mPools indicating pool we are currently allocating from.
418 size_t mCurrentPool;
419 // Index inside mPools[mCurrentPool] indicating which index can be allocated next.
420 uint32_t mCurrentFreeEntry;
421 };
422
423 // DynamicQueryPool allocates indices out of QueryPool as needed. Once a QueryPool is exhausted,
424 // another is created. The query pools live permanently, but are recycled as indices get freed.
425
426 // These are arbitrary default sizes for query pools.
427 constexpr uint32_t kDefaultOcclusionQueryPoolSize = 64;
428 constexpr uint32_t kDefaultTimestampQueryPoolSize = 64;
429 constexpr uint32_t kDefaultTransformFeedbackQueryPoolSize = 128;
430 constexpr uint32_t kDefaultPrimitivesGeneratedQueryPoolSize = 128;
431
432 class QueryHelper;
433
434 class DynamicQueryPool final : public DynamicallyGrowingPool<QueryPool>
435 {
436 public:
437 DynamicQueryPool();
438 ~DynamicQueryPool() override;
439
440 angle::Result init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize);
441 void destroy(VkDevice device);
442
443 angle::Result allocateQuery(ContextVk *contextVk, QueryHelper *queryOut, uint32_t queryCount);
444 void freeQuery(ContextVk *contextVk, QueryHelper *query);
445
getQueryPool(size_t index)446 const QueryPool &getQueryPool(size_t index) const { return getPool(index); }
447
448 private:
449 angle::Result allocatePoolImpl(ContextVk *contextVk,
450 QueryPool &poolToAllocate,
451 uint32_t entriesToAllocate) override;
452 void destroyPoolImpl(VkDevice device, QueryPool &poolToDestroy) override;
453
454 // Information required to create new query pools
455 VkQueryType mQueryType;
456 };
457
458 // Stores the result of a Vulkan query call. XFB queries in particular store two result values.
459 class QueryResult final
460 {
461 public:
QueryResult(uint32_t intsPerResult)462 QueryResult(uint32_t intsPerResult) : mIntsPerResult(intsPerResult), mResults{} {}
463
464 void operator+=(const QueryResult &rhs)
465 {
466 mResults[0] += rhs.mResults[0];
467 mResults[1] += rhs.mResults[1];
468 }
469
getDataSize()470 size_t getDataSize() const { return mIntsPerResult * sizeof(uint64_t); }
471 void setResults(uint64_t *results, uint32_t queryCount);
getResult(size_t index)472 uint64_t getResult(size_t index) const
473 {
474 ASSERT(index < mIntsPerResult);
475 return mResults[index];
476 }
477
478 static constexpr size_t kDefaultResultIndex = 0;
479 static constexpr size_t kTransformFeedbackPrimitivesWrittenIndex = 0;
480 static constexpr size_t kPrimitivesGeneratedIndex = 1;
481
482 private:
483 uint32_t mIntsPerResult;
484 std::array<uint64_t, 2> mResults;
485 };
486
487 // Queries in Vulkan are identified by the query pool and an index for a query within that pool.
488 // Unlike other pools, such as descriptor pools where an allocation returns an independent object
489 // from the pool, the query allocations are not done through a Vulkan function and are only an
490 // integer index.
491 //
492 // Furthermore, to support arbitrarily large number of queries, DynamicQueryPool creates query pools
493 // of a fixed size as needed and allocates indices within those pools.
494 //
495 // The QueryHelper class below keeps the pool and index pair together. For multiview, multiple
496 // consecutive query indices are implicitly written to by the driver, so the query count is
497 // additionally kept.
498 class QueryHelper final : public Resource
499 {
500 public:
501 QueryHelper();
502 ~QueryHelper() override;
503 QueryHelper(QueryHelper &&rhs);
504 QueryHelper &operator=(QueryHelper &&rhs);
505 void init(const DynamicQueryPool *dynamicQueryPool,
506 const size_t queryPoolIndex,
507 uint32_t query,
508 uint32_t queryCount);
509 void deinit();
510
valid()511 bool valid() const { return mDynamicQueryPool != nullptr; }
512
513 // Begin/end queries. These functions break the render pass.
514 angle::Result beginQuery(ContextVk *contextVk);
515 angle::Result endQuery(ContextVk *contextVk);
516 // Begin/end queries within a started render pass.
517 angle::Result beginRenderPassQuery(ContextVk *contextVk);
518 void endRenderPassQuery(ContextVk *contextVk);
519
520 angle::Result flushAndWriteTimestamp(ContextVk *contextVk);
521 // When syncing gpu/cpu time, main thread accesses primary directly
522 void writeTimestampToPrimary(ContextVk *contextVk, PrimaryCommandBuffer *primary);
523 // All other timestamp accesses should be made on outsideRenderPassCommandBuffer
524 void writeTimestamp(ContextVk *contextVk, CommandBuffer *outsideRenderPassCommandBuffer);
525
526 // Whether this query helper has generated and submitted any commands.
527 bool hasSubmittedCommands() const;
528
529 angle::Result getUint64ResultNonBlocking(ContextVk *contextVk,
530 QueryResult *resultOut,
531 bool *availableOut);
532 angle::Result getUint64Result(ContextVk *contextVk, QueryResult *resultOut);
533
534 private:
535 friend class DynamicQueryPool;
getQueryPool()536 const QueryPool &getQueryPool() const
537 {
538 ASSERT(valid());
539 return mDynamicQueryPool->getQueryPool(mQueryPoolIndex);
540 }
541
542 // Reset needs to always be done outside a render pass, which may be different from the
543 // passed-in command buffer (which could be the render pass').
544 void beginQueryImpl(ContextVk *contextVk,
545 CommandBuffer *resetCommandBuffer,
546 CommandBuffer *commandBuffer);
547 void endQueryImpl(ContextVk *contextVk, CommandBuffer *commandBuffer);
548 template <typename CommandBufferT>
549 void resetQueryPoolImpl(ContextVk *contextVk,
550 const QueryPool &queryPool,
551 CommandBufferT *commandBuffer);
552 VkResult getResultImpl(ContextVk *contextVk,
553 const VkQueryResultFlags flags,
554 QueryResult *resultOut);
555
556 const DynamicQueryPool *mDynamicQueryPool;
557 size_t mQueryPoolIndex;
558 uint32_t mQuery;
559 uint32_t mQueryCount;
560
561 enum class QueryStatus
562 {
563 Inactive,
564 Active,
565 Ended
566 };
567 QueryStatus mStatus;
568 };
569
570 // DynamicSemaphorePool allocates semaphores as needed. It uses a std::vector
571 // as a pool to allocate many semaphores at once. The pools live permanently,
572 // but are recycled as semaphores get freed.
573
574 // These are arbitrary default sizes for semaphore pools.
575 constexpr uint32_t kDefaultSemaphorePoolSize = 64;
576
577 class SemaphoreHelper;
578
579 class DynamicSemaphorePool final : public DynamicallyGrowingPool<std::vector<Semaphore>>
580 {
581 public:
582 DynamicSemaphorePool();
583 ~DynamicSemaphorePool() override;
584
585 angle::Result init(ContextVk *contextVk, uint32_t poolSize);
586 void destroy(VkDevice device);
587
588 // autoFree can be used to allocate a semaphore that's expected to be freed at the end of the
589 // frame. This renders freeSemaphore unnecessary and saves an eventual search.
590 angle::Result allocateSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphoreOut);
591 void freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore);
592
593 private:
594 angle::Result allocatePoolImpl(ContextVk *contextVk,
595 std::vector<Semaphore> &poolToAllocate,
596 uint32_t entriesToAllocate) override;
597 void destroyPoolImpl(VkDevice device, std::vector<Semaphore> &poolToDestroy) override;
598 };
599
600 // Semaphores that are allocated from the semaphore pool are encapsulated in a helper object,
601 // keeping track of where in the pool they are allocated from.
602 class SemaphoreHelper final : angle::NonCopyable
603 {
604 public:
605 SemaphoreHelper();
606 ~SemaphoreHelper();
607
608 SemaphoreHelper(SemaphoreHelper &&other);
609 SemaphoreHelper &operator=(SemaphoreHelper &&other);
610
611 void init(const size_t semaphorePoolIndex, const Semaphore *semaphore);
612 void deinit();
613
getSemaphore()614 const Semaphore *getSemaphore() const { return mSemaphore; }
615
616 // Used only by DynamicSemaphorePool.
getSemaphorePoolIndex()617 size_t getSemaphorePoolIndex() const { return mSemaphorePoolIndex; }
618
619 private:
620 size_t mSemaphorePoolIndex;
621 const Semaphore *mSemaphore;
622 };
623
624 // This class' responsibility is to create index buffers needed to support line loops in Vulkan.
625 // In the setup phase of drawing, the createIndexBuffer method should be called with the
626 // current draw call parameters. If an element array buffer is bound for an indexed draw, use
627 // createIndexBufferFromElementArrayBuffer.
628 //
629 // If the user wants to draw a loop between [v1, v2, v3], we will create an indexed buffer with
630 // these indexes: [0, 1, 2, 3, 0] to emulate the loop.
631 class LineLoopHelper final : angle::NonCopyable
632 {
633 public:
634 LineLoopHelper(RendererVk *renderer);
635 ~LineLoopHelper();
636
637 angle::Result getIndexBufferForDrawArrays(ContextVk *contextVk,
638 uint32_t clampedVertexCount,
639 GLint firstVertex,
640 BufferHelper **bufferOut,
641 VkDeviceSize *offsetOut);
642
643 angle::Result getIndexBufferForElementArrayBuffer(ContextVk *contextVk,
644 BufferVk *elementArrayBufferVk,
645 gl::DrawElementsType glIndexType,
646 int indexCount,
647 intptr_t elementArrayOffset,
648 BufferHelper **bufferOut,
649 VkDeviceSize *bufferOffsetOut,
650 uint32_t *indexCountOut);
651
652 angle::Result streamIndices(ContextVk *contextVk,
653 gl::DrawElementsType glIndexType,
654 GLsizei indexCount,
655 const uint8_t *srcPtr,
656 BufferHelper **bufferOut,
657 VkDeviceSize *bufferOffsetOut,
658 uint32_t *indexCountOut);
659
660 angle::Result streamIndicesIndirect(ContextVk *contextVk,
661 gl::DrawElementsType glIndexType,
662 BufferHelper *indexBuffer,
663 VkDeviceSize indexBufferOffset,
664 BufferHelper *indirectBuffer,
665 VkDeviceSize indirectBufferOffset,
666 BufferHelper **indexBufferOut,
667 VkDeviceSize *indexBufferOffsetOut,
668 BufferHelper **indirectBufferOut,
669 VkDeviceSize *indirectBufferOffsetOut);
670
671 angle::Result streamArrayIndirect(ContextVk *contextVk,
672 size_t vertexCount,
673 BufferHelper *arrayIndirectBuffer,
674 VkDeviceSize arrayIndirectBufferOffset,
675 BufferHelper **indexBufferOut,
676 VkDeviceSize *indexBufferOffsetOut,
677 BufferHelper **indexIndirectBufferOut,
678 VkDeviceSize *indexIndirectBufferOffsetOut);
679
680 void release(ContextVk *contextVk);
681 void destroy(RendererVk *renderer);
682
683 static void Draw(uint32_t count, uint32_t baseVertex, CommandBuffer *commandBuffer);
684
685 private:
686 DynamicBuffer mDynamicIndexBuffer;
687 DynamicBuffer mDynamicIndirectBuffer;
688 };
689
690 // This defines enum for VkPipelineStageFlagBits so that we can use it to compare and index into
691 // array.
692 enum class PipelineStage : uint16_t
693 {
694 // Bellow are ordered based on Graphics Pipeline Stages
695 TopOfPipe = 0,
696 DrawIndirect = 1,
697 VertexInput = 2,
698 VertexShader = 3,
699 GeometryShader = 4,
700 TransformFeedback = 5,
701 EarlyFragmentTest = 6,
702 FragmentShader = 7,
703 LateFragmentTest = 8,
704 ColorAttachmentOutput = 9,
705
706 // Compute specific pipeline Stage
707 ComputeShader = 10,
708
709 // Transfer specific pipeline Stage
710 Transfer = 11,
711 BottomOfPipe = 12,
712
713 // Host specific pipeline stage
714 Host = 13,
715
716 InvalidEnum = 14,
717 EnumCount = InvalidEnum,
718 };
719 using PipelineStagesMask = angle::PackedEnumBitSet<PipelineStage, uint16_t>;
720
721 PipelineStage GetPipelineStage(gl::ShaderType stage);
722
723 // This wraps data and API for vkCmdPipelineBarrier call
724 class PipelineBarrier : angle::NonCopyable
725 {
726 public:
PipelineBarrier()727 PipelineBarrier()
728 : mSrcStageMask(0),
729 mDstStageMask(0),
730 mMemoryBarrierSrcAccess(0),
731 mMemoryBarrierDstAccess(0),
732 mImageMemoryBarriers()
733 {}
734 ~PipelineBarrier() = default;
735
isEmpty()736 bool isEmpty() const { return mImageMemoryBarriers.empty() && mMemoryBarrierDstAccess == 0; }
737
execute(PrimaryCommandBuffer * primary)738 void execute(PrimaryCommandBuffer *primary)
739 {
740 if (isEmpty())
741 {
742 return;
743 }
744
745 // Issue vkCmdPipelineBarrier call
746 VkMemoryBarrier memoryBarrier = {};
747 uint32_t memoryBarrierCount = 0;
748 if (mMemoryBarrierDstAccess != 0)
749 {
750 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
751 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
752 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
753 memoryBarrierCount++;
754 }
755 primary->pipelineBarrier(
756 mSrcStageMask, mDstStageMask, 0, memoryBarrierCount, &memoryBarrier, 0, nullptr,
757 static_cast<uint32_t>(mImageMemoryBarriers.size()), mImageMemoryBarriers.data());
758
759 reset();
760 }
761
executeIndividually(PrimaryCommandBuffer * primary)762 void executeIndividually(PrimaryCommandBuffer *primary)
763 {
764 if (isEmpty())
765 {
766 return;
767 }
768
769 // Issue vkCmdPipelineBarrier call
770 VkMemoryBarrier memoryBarrier = {};
771 uint32_t memoryBarrierCount = 0;
772 if (mMemoryBarrierDstAccess != 0)
773 {
774 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
775 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
776 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
777 memoryBarrierCount++;
778 }
779
780 for (const VkImageMemoryBarrier &imageBarrier : mImageMemoryBarriers)
781 {
782 primary->pipelineBarrier(mSrcStageMask, mDstStageMask, 0, memoryBarrierCount,
783 &memoryBarrier, 0, nullptr, 1, &imageBarrier);
784 }
785
786 reset();
787 }
788
789 // merge two barriers into one
merge(PipelineBarrier * other)790 void merge(PipelineBarrier *other)
791 {
792 mSrcStageMask |= other->mSrcStageMask;
793 mDstStageMask |= other->mDstStageMask;
794 mMemoryBarrierSrcAccess |= other->mMemoryBarrierSrcAccess;
795 mMemoryBarrierDstAccess |= other->mMemoryBarrierDstAccess;
796 mImageMemoryBarriers.insert(mImageMemoryBarriers.end(), other->mImageMemoryBarriers.begin(),
797 other->mImageMemoryBarriers.end());
798 other->reset();
799 }
800
mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkAccessFlags srcAccess,VkAccessFlags dstAccess)801 void mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,
802 VkPipelineStageFlags dstStageMask,
803 VkAccessFlags srcAccess,
804 VkAccessFlags dstAccess)
805 {
806 mSrcStageMask |= srcStageMask;
807 mDstStageMask |= dstStageMask;
808 mMemoryBarrierSrcAccess |= srcAccess;
809 mMemoryBarrierDstAccess |= dstAccess;
810 }
811
mergeImageBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)812 void mergeImageBarrier(VkPipelineStageFlags srcStageMask,
813 VkPipelineStageFlags dstStageMask,
814 const VkImageMemoryBarrier &imageMemoryBarrier)
815 {
816 ASSERT(imageMemoryBarrier.pNext == nullptr);
817 mSrcStageMask |= srcStageMask;
818 mDstStageMask |= dstStageMask;
819 mImageMemoryBarriers.push_back(imageMemoryBarrier);
820 }
821
reset()822 void reset()
823 {
824 mSrcStageMask = 0;
825 mDstStageMask = 0;
826 mMemoryBarrierSrcAccess = 0;
827 mMemoryBarrierDstAccess = 0;
828 mImageMemoryBarriers.clear();
829 }
830
831 void addDiagnosticsString(std::ostringstream &out) const;
832
833 private:
834 VkPipelineStageFlags mSrcStageMask;
835 VkPipelineStageFlags mDstStageMask;
836 VkAccessFlags mMemoryBarrierSrcAccess;
837 VkAccessFlags mMemoryBarrierDstAccess;
838 std::vector<VkImageMemoryBarrier> mImageMemoryBarriers;
839 };
840 using PipelineBarrierArray = angle::PackedEnumMap<PipelineStage, PipelineBarrier>;
841
842 class FramebufferHelper;
843
844 class BufferHelper final : public ReadWriteResource
845 {
846 public:
847 BufferHelper();
848 ~BufferHelper() override;
849
850 angle::Result init(ContextVk *contextVk,
851 const VkBufferCreateInfo &createInfo,
852 VkMemoryPropertyFlags memoryPropertyFlags);
853 angle::Result initExternal(ContextVk *contextVk,
854 VkMemoryPropertyFlags memoryProperties,
855 const VkBufferCreateInfo &requestedCreateInfo,
856 GLeglClientBufferEXT clientBuffer);
857 void destroy(RendererVk *renderer);
858
859 void release(RendererVk *renderer);
860
getBufferSerial()861 BufferSerial getBufferSerial() const { return mSerial; }
valid()862 bool valid() const { return mBuffer.valid(); }
getBuffer()863 const Buffer &getBuffer() const { return mBuffer; }
getSize()864 VkDeviceSize getSize() const { return mSize; }
getMappedMemory()865 uint8_t *getMappedMemory() const
866 {
867 ASSERT(isMapped());
868 return mMemory.getMappedMemory();
869 }
isHostVisible()870 bool isHostVisible() const
871 {
872 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
873 }
isCoherent()874 bool isCoherent() const
875 {
876 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
877 }
878
isMapped()879 bool isMapped() const { return mMemory.getMappedMemory() != nullptr; }
isExternalBuffer()880 bool isExternalBuffer() const { return mMemory.isExternalBuffer(); }
881
882 // Also implicitly sets up the correct barriers.
883 angle::Result copyFromBuffer(ContextVk *contextVk,
884 BufferHelper *srcBuffer,
885 uint32_t regionCount,
886 const VkBufferCopy *copyRegions);
887
map(ContextVk * contextVk,uint8_t ** ptrOut)888 angle::Result map(ContextVk *contextVk, uint8_t **ptrOut)
889 {
890 return mMemory.map(contextVk, mSize, ptrOut);
891 }
892
mapWithOffset(ContextVk * contextVk,uint8_t ** ptrOut,size_t offset)893 angle::Result mapWithOffset(ContextVk *contextVk, uint8_t **ptrOut, size_t offset)
894 {
895 uint8_t *mapBufPointer;
896 ANGLE_TRY(mMemory.map(contextVk, mSize, &mapBufPointer));
897 *ptrOut = mapBufPointer + offset;
898 return angle::Result::Continue;
899 }
900
901 void unmap(RendererVk *renderer);
902
903 // After a sequence of writes, call flush to ensure the data is visible to the device.
904 angle::Result flush(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
905
906 // After a sequence of writes, call invalidate to ensure the data is visible to the host.
907 angle::Result invalidate(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
908
909 void changeQueue(uint32_t newQueueFamilyIndex, CommandBuffer *commandBuffer);
910
911 // Performs an ownership transfer from an external instance or API.
912 void acquireFromExternal(ContextVk *contextVk,
913 uint32_t externalQueueFamilyIndex,
914 uint32_t rendererQueueFamilyIndex,
915 CommandBuffer *commandBuffer);
916
917 // Performs an ownership transfer to an external instance or API.
918 void releaseToExternal(ContextVk *contextVk,
919 uint32_t rendererQueueFamilyIndex,
920 uint32_t externalQueueFamilyIndex,
921 CommandBuffer *commandBuffer);
922
923 // Returns true if the image is owned by an external API or instance.
924 bool isReleasedToExternal() const;
925
926 bool recordReadBarrier(VkAccessFlags readAccessType,
927 VkPipelineStageFlags readStage,
928 PipelineBarrier *barrier);
929
930 bool recordWriteBarrier(VkAccessFlags writeAccessType,
931 VkPipelineStageFlags writeStage,
932 PipelineBarrier *barrier);
933
934 private:
935 angle::Result initializeNonZeroMemory(Context *context, VkDeviceSize size);
936
937 // Vulkan objects.
938 Buffer mBuffer;
939 BufferMemory mMemory;
940
941 // Cached properties.
942 VkMemoryPropertyFlags mMemoryPropertyFlags;
943 VkDeviceSize mSize;
944 uint32_t mCurrentQueueFamilyIndex;
945
946 // For memory barriers.
947 VkFlags mCurrentWriteAccess;
948 VkFlags mCurrentReadAccess;
949 VkPipelineStageFlags mCurrentWriteStages;
950 VkPipelineStageFlags mCurrentReadStages;
951
952 BufferSerial mSerial;
953 };
954
955 enum class BufferAccess
956 {
957 Read,
958 Write,
959 };
960
961 enum class AliasingMode
962 {
963 Allowed,
964 Disallowed,
965 };
966
967 // Stores clear value In packed attachment index
968 class PackedClearValuesArray final
969 {
970 public:
971 PackedClearValuesArray();
972 ~PackedClearValuesArray();
973
974 PackedClearValuesArray(const PackedClearValuesArray &other);
975 PackedClearValuesArray &operator=(const PackedClearValuesArray &rhs);
976 void store(PackedAttachmentIndex index,
977 VkImageAspectFlags aspectFlags,
978 const VkClearValue &clearValue);
979 void storeNoDepthStencil(PackedAttachmentIndex index, const VkClearValue &clearValue);
980 const VkClearValue &operator[](PackedAttachmentIndex index) const
981 {
982 return mValues[index.get()];
983 }
data()984 const VkClearValue *data() const { return mValues.data(); }
985
986 private:
987 gl::AttachmentArray<VkClearValue> mValues;
988 };
989
990 // Stores ImageHelpers In packed attachment index
991 class PackedImageAttachmentArray final
992 {
993 public:
PackedImageAttachmentArray()994 PackedImageAttachmentArray() : mImages{} {}
995 ~PackedImageAttachmentArray() = default;
996 ImageHelper *&operator[](PackedAttachmentIndex index) { return mImages[index.get()]; }
reset()997 void reset() { mImages.fill(nullptr); }
998
999 private:
1000 gl::AttachmentArray<ImageHelper *> mImages;
1001 };
1002
1003 // The following are used to help track the state of an invalidated attachment.
1004
1005 // This value indicates an "infinite" CmdCount that is not valid for comparing
1006 constexpr uint32_t kInfiniteCmdCount = 0xFFFFFFFF;
1007
1008 // CommandBufferHelper (CBH) class wraps ANGLE's custom command buffer
1009 // class, SecondaryCommandBuffer. This provides a way to temporarily
1010 // store Vulkan commands that be can submitted in-line to a primary
1011 // command buffer at a later time.
1012 // The current plan is for the main ANGLE thread to record commands
1013 // into the CBH and then pass the CBH off to a worker thread that will
1014 // process the commands into a primary command buffer and then submit
1015 // those commands to the queue.
1016 class CommandBufferHelper : angle::NonCopyable
1017 {
1018 public:
1019 CommandBufferHelper();
1020 ~CommandBufferHelper();
1021
1022 // General Functions (non-renderPass specific)
1023 angle::Result initialize(Context *context,
1024 bool isRenderPassCommandBuffer,
1025 CommandPool *commandPool);
1026
1027 void bufferRead(ContextVk *contextVk,
1028 VkAccessFlags readAccessType,
1029 PipelineStage readStage,
1030 BufferHelper *buffer);
1031 void bufferWrite(ContextVk *contextVk,
1032 VkAccessFlags writeAccessType,
1033 PipelineStage writeStage,
1034 AliasingMode aliasingMode,
1035 BufferHelper *buffer);
1036
1037 void imageRead(ContextVk *contextVk,
1038 VkImageAspectFlags aspectFlags,
1039 ImageLayout imageLayout,
1040 ImageHelper *image);
1041 void imageWrite(ContextVk *contextVk,
1042 gl::LevelIndex level,
1043 uint32_t layerStart,
1044 uint32_t layerCount,
1045 VkImageAspectFlags aspectFlags,
1046 ImageLayout imageLayout,
1047 AliasingMode aliasingMode,
1048 ImageHelper *image);
1049
1050 void colorImagesDraw(ResourceUseList *resourceUseList,
1051 ImageHelper *image,
1052 ImageHelper *resolveImage,
1053 PackedAttachmentIndex packedAttachmentIndex);
1054 void depthStencilImagesDraw(ResourceUseList *resourceUseList,
1055 gl::LevelIndex level,
1056 uint32_t layerStart,
1057 uint32_t layerCount,
1058 ImageHelper *image,
1059 ImageHelper *resolveImage);
1060
getCommandBuffer()1061 CommandBuffer &getCommandBuffer() { return mCommandBuffer; }
getCommandPool()1062 CommandPool *getCommandPool() { return mCommandPool; }
1063
1064 angle::Result flushToPrimary(Context *context,
1065 PrimaryCommandBuffer *primary,
1066 const RenderPass *renderPass);
1067
1068 void executeBarriers(const angle::FeaturesVk &features, PrimaryCommandBuffer *primary);
1069
setHasRenderPass(bool hasRenderPass)1070 void setHasRenderPass(bool hasRenderPass) { mIsRenderPassCommandBuffer = hasRenderPass; }
1071
1072 // The markOpen and markClosed functions are to aid in proper use of the CommandBufferHelper.
1073 // saw invalid use due to threading issues that can be easily caught by marking when it's safe
1074 // (open) to write to the commandbuffer.
1075 #if defined(ANGLE_ENABLE_ASSERTS)
markOpen()1076 void markOpen() { mCommandBuffer.open(); }
markClosed()1077 void markClosed() { mCommandBuffer.close(); }
1078 #else
markOpen()1079 void markOpen() {}
markClosed()1080 void markClosed() {}
1081 #endif
1082
1083 angle::Result reset(Context *context);
1084
1085 // Returns true if we have no work to execute. For renderpass command buffer, even if the
1086 // underlying command buffer is empty, we may still have a renderpass with an empty command
1087 // buffer just to do the clear.
empty()1088 bool empty() const
1089 {
1090 return mIsRenderPassCommandBuffer ? !mRenderPassStarted : mCommandBuffer.empty();
1091 }
1092 // RenderPass related functions. This is equivalent to !empty(), but only when you know this is
1093 // a RenderPass command buffer
started()1094 bool started() const
1095 {
1096 ASSERT(mIsRenderPassCommandBuffer);
1097 return mRenderPassStarted;
1098 }
1099
1100 // Finalize the layout if image has any deferred layout transition.
1101 void finalizeImageLayout(Context *context, const ImageHelper *image);
1102
1103 angle::Result beginRenderPass(ContextVk *contextVk,
1104 const Framebuffer &framebuffer,
1105 const gl::Rectangle &renderArea,
1106 const RenderPassDesc &renderPassDesc,
1107 const AttachmentOpsArray &renderPassAttachmentOps,
1108 const vk::PackedAttachmentCount colorAttachmentCount,
1109 const PackedAttachmentIndex depthStencilAttachmentIndex,
1110 const PackedClearValuesArray &clearValues,
1111 CommandBuffer **commandBufferOut);
1112
1113 angle::Result endRenderPass(ContextVk *contextVk);
1114
1115 void updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode);
1116
1117 void beginTransformFeedback(size_t validBufferCount,
1118 const VkBuffer *counterBuffers,
1119 bool rebindBuffers);
1120
1121 void endTransformFeedback();
1122
1123 void invalidateRenderPassColorAttachment(PackedAttachmentIndex attachmentIndex);
1124 void invalidateRenderPassDepthAttachment(const gl::DepthStencilState &dsState,
1125 const gl::Rectangle &invalidateArea);
1126 void invalidateRenderPassStencilAttachment(const gl::DepthStencilState &dsState,
1127 const gl::Rectangle &invalidateArea);
1128
hasWriteAfterInvalidate(uint32_t cmdCountInvalidated,uint32_t cmdCountDisabled)1129 bool hasWriteAfterInvalidate(uint32_t cmdCountInvalidated, uint32_t cmdCountDisabled)
1130 {
1131 ASSERT(mIsRenderPassCommandBuffer);
1132 return (cmdCountInvalidated != kInfiniteCmdCount &&
1133 std::min(cmdCountDisabled, mCommandBuffer.getRenderPassWriteCommandCount()) !=
1134 cmdCountInvalidated);
1135 }
1136
isInvalidated(uint32_t cmdCountInvalidated,uint32_t cmdCountDisabled)1137 bool isInvalidated(uint32_t cmdCountInvalidated, uint32_t cmdCountDisabled)
1138 {
1139 ASSERT(mIsRenderPassCommandBuffer);
1140 return cmdCountInvalidated != kInfiniteCmdCount &&
1141 std::min(cmdCountDisabled, mCommandBuffer.getRenderPassWriteCommandCount()) ==
1142 cmdCountInvalidated;
1143 }
1144
1145 void updateRenderPassColorClear(PackedAttachmentIndex colorIndex,
1146 const VkClearValue &colorClearValue);
1147 void updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,
1148 const VkClearValue &clearValue);
1149
getRenderArea()1150 const gl::Rectangle &getRenderArea() const
1151 {
1152 ASSERT(mIsRenderPassCommandBuffer);
1153 return mRenderArea;
1154 }
1155
1156 // If render pass is started with a small render area due to a small scissor, and if a new
1157 // larger scissor is specified, grow the render area to accomodate it.
1158 void growRenderArea(ContextVk *contextVk, const gl::Rectangle &newRenderArea);
1159
1160 void resumeTransformFeedback();
1161 void pauseTransformFeedback();
isTransformFeedbackStarted()1162 bool isTransformFeedbackStarted() const { return mValidTransformFeedbackBufferCount > 0; }
isTransformFeedbackActiveUnpaused()1163 bool isTransformFeedbackActiveUnpaused() const { return mIsTransformFeedbackActiveUnpaused; }
1164
getAndResetCounter()1165 uint32_t getAndResetCounter()
1166 {
1167 ASSERT(mIsRenderPassCommandBuffer);
1168 uint32_t count = mCounter;
1169 mCounter = 0;
1170 return count;
1171 }
1172
getFramebufferHandle()1173 VkFramebuffer getFramebufferHandle() const
1174 {
1175 ASSERT(mIsRenderPassCommandBuffer);
1176 return mFramebuffer.getHandle();
1177 }
1178
1179 bool usesBuffer(const BufferHelper &buffer) const;
1180 bool usesBufferForWrite(const BufferHelper &buffer) const;
1181 bool usesImageInRenderPass(const ImageHelper &image) const;
getUsedBuffersCount()1182 size_t getUsedBuffersCount() const { return mUsedBuffers.size(); }
1183
1184 // Dumping the command stream is disabled by default.
1185 static constexpr bool kEnableCommandStreamDiagnostics = false;
1186
1187 void onDepthAccess(ResourceAccess access);
1188 void onStencilAccess(ResourceAccess access);
1189
1190 void updateRenderPassForResolve(ContextVk *contextVk,
1191 Framebuffer *newFramebuffer,
1192 const RenderPassDesc &renderPassDesc);
1193
hasDepthStencilWriteOrClear()1194 bool hasDepthStencilWriteOrClear() const
1195 {
1196 return mDepthAccess == ResourceAccess::Write || mStencilAccess == ResourceAccess::Write ||
1197 mAttachmentOps[mDepthStencilAttachmentIndex].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR ||
1198 mAttachmentOps[mDepthStencilAttachmentIndex].stencilLoadOp ==
1199 VK_ATTACHMENT_LOAD_OP_CLEAR;
1200 }
1201
1202 void addCommandDiagnostics(ContextVk *contextVk);
1203
getRenderPassDesc()1204 const RenderPassDesc &getRenderPassDesc() const { return mRenderPassDesc; }
getAttachmentOps()1205 const AttachmentOpsArray &getAttachmentOps() const { return mAttachmentOps; }
1206
hasRenderPass()1207 bool hasRenderPass() const { return mIsRenderPassCommandBuffer; }
1208
setHasShaderStorageOutput()1209 void setHasShaderStorageOutput() { mHasShaderStorageOutput = true; }
hasShaderStorageOutput()1210 bool hasShaderStorageOutput() const { return mHasShaderStorageOutput; }
1211
setGLMemoryBarrierIssued()1212 void setGLMemoryBarrierIssued()
1213 {
1214 if (!empty())
1215 {
1216 mHasGLMemoryBarrierIssued = true;
1217 }
1218 }
hasGLMemoryBarrierIssued()1219 bool hasGLMemoryBarrierIssued() const { return mHasGLMemoryBarrierIssued; }
setImageOptimizeForPresent(ImageHelper * image)1220 void setImageOptimizeForPresent(ImageHelper *image) { mImageOptimizeForPresent = image; }
1221
1222 private:
1223 angle::Result initializeCommandBuffer(Context *context);
1224
1225 bool onDepthStencilAccess(ResourceAccess access,
1226 uint32_t *cmdCountInvalidated,
1227 uint32_t *cmdCountDisabled);
1228 void restoreDepthContent();
1229 void restoreStencilContent();
1230
1231 // We can't determine the image layout at the renderpass start time since their full usage
1232 // aren't known until later time. We finalize the layout when either ImageHelper object is
1233 // released or when renderpass ends.
1234 void finalizeColorImageLayout(Context *context,
1235 ImageHelper *image,
1236 PackedAttachmentIndex packedAttachmentIndex,
1237 bool isResolveImage);
1238 void finalizeDepthStencilImageLayout(Context *context);
1239 void finalizeDepthStencilResolveImageLayout(Context *context);
1240 void finalizeDepthStencilLoadStore(Context *context);
1241 void finalizeDepthStencilLoadStoreOps(Context *context,
1242 ResourceAccess access,
1243 RenderPassLoadOp *loadOp,
1244 RenderPassStoreOp *storeOp);
1245 void finalizeDepthStencilImageLayoutAndLoadStore(Context *context);
1246
1247 void updateImageLayoutAndBarrier(Context *context,
1248 ImageHelper *image,
1249 VkImageAspectFlags aspectFlags,
1250 ImageLayout imageLayout);
1251
1252 // Allocator used by this class. Using a pool allocator per CBH to avoid threading issues
1253 // that occur w/ shared allocator between multiple CBHs.
1254 angle::PoolAllocator mAllocator;
1255
1256 // General state (non-renderPass related)
1257 PipelineBarrierArray mPipelineBarriers;
1258 PipelineStagesMask mPipelineBarrierMask;
1259 CommandBuffer mCommandBuffer;
1260 // The command pool mCommandBuffer is allocated from. Only used with Vulkan secondary command
1261 // buffers (as opposed to ANGLE's SecondaryCommandBuffer).
1262 CommandPool *mCommandPool;
1263
1264 // RenderPass state
1265 uint32_t mCounter;
1266 RenderPassDesc mRenderPassDesc;
1267 AttachmentOpsArray mAttachmentOps;
1268 Framebuffer mFramebuffer;
1269 gl::Rectangle mRenderArea;
1270 PackedClearValuesArray mClearValues;
1271 bool mRenderPassStarted;
1272
1273 // Transform feedback state
1274 gl::TransformFeedbackBuffersArray<VkBuffer> mTransformFeedbackCounterBuffers;
1275 uint32_t mValidTransformFeedbackBufferCount;
1276 bool mRebindTransformFeedbackBuffers;
1277 bool mIsTransformFeedbackActiveUnpaused;
1278
1279 bool mIsRenderPassCommandBuffer;
1280
1281 // Whether the command buffers contains any draw/dispatch calls that possibly output data
1282 // through storage buffers and images. This is used to determine whether glMemoryBarrier*
1283 // should flush the command buffer.
1284 bool mHasShaderStorageOutput;
1285 // Whether glMemoryBarrier has been called while commands are recorded in this command buffer.
1286 // This is used to know when to check and potentially flush the command buffer if storage
1287 // buffers and images are used in it.
1288 bool mHasGLMemoryBarrierIssued;
1289
1290 // State tracking for the maximum (Write been the highest) depth access during the entire
1291 // renderpass. Note that this does not include VK_ATTACHMENT_LOAD_OP_CLEAR which is tracked
1292 // separately. This is done this way to allow clear op to being optimized out when we find out
1293 // that the depth buffer is not being used during the entire renderpass and store op is
1294 // VK_ATTACHMENT_STORE_OP_DONTCARE.
1295 ResourceAccess mDepthAccess;
1296 // Similar tracking to mDepthAccess but for the stencil aspect.
1297 ResourceAccess mStencilAccess;
1298
1299 // State tracking for whether to optimize the storeOp to DONT_CARE
1300 uint32_t mDepthCmdCountInvalidated;
1301 uint32_t mDepthCmdCountDisabled;
1302 uint32_t mStencilCmdCountInvalidated;
1303 uint32_t mStencilCmdCountDisabled;
1304 gl::Rectangle mDepthInvalidateArea;
1305 gl::Rectangle mStencilInvalidateArea;
1306
1307 // Keep track of the depth/stencil attachment index
1308 PackedAttachmentIndex mDepthStencilAttachmentIndex;
1309
1310 // Tracks resources used in the command buffer.
1311 // For Buffers, we track the read/write access type so we can enable simultaneous reads.
1312 // Images have unique layouts unlike buffers therefore we don't support multi-read.
1313 angle::FastIntegerMap<BufferAccess> mUsedBuffers;
1314 angle::FastIntegerSet mRenderPassUsedImages;
1315
1316 ImageHelper *mDepthStencilImage;
1317 ImageHelper *mDepthStencilResolveImage;
1318 gl::LevelIndex mDepthStencilLevelIndex;
1319 uint32_t mDepthStencilLayerIndex;
1320 uint32_t mDepthStencilLayerCount;
1321
1322 // Array size of mColorImages
1323 PackedAttachmentCount mColorImagesCount;
1324 // Attached render target images. Color and depth resolve images are always come last.
1325 PackedImageAttachmentArray mColorImages;
1326 PackedImageAttachmentArray mColorResolveImages;
1327 // This is last renderpass before present and this is the image will be presented. We can use
1328 // final layout of the renderpass to transit it to the presentable layout
1329 ImageHelper *mImageOptimizeForPresent;
1330 };
1331
1332 // The following class helps support both Vulkan and ANGLE secondary command buffers by
1333 // encapsulating their differences.
1334 class CommandBufferRecycler
1335 {
1336 public:
1337 CommandBufferRecycler();
1338 ~CommandBufferRecycler();
1339
1340 void onDestroy();
1341
1342 angle::Result getCommandBufferHelper(Context *context,
1343 bool hasRenderPass,
1344 CommandPool *commandPool,
1345 CommandBufferHelper **commandBufferHelperOut);
1346
1347 void recycleCommandBufferHelper(VkDevice device, CommandBufferHelper **commandBuffer);
1348
1349 void resetCommandBufferHelper(CommandBuffer &&commandBuffer);
1350
getCommandBuffersToReset()1351 SecondaryCommandBufferList &&getCommandBuffersToReset()
1352 {
1353 return std::move(mSecondaryCommandBuffersToReset);
1354 }
1355
1356 private:
1357 void recycleImpl(VkDevice device, CommandBufferHelper **commandBuffer);
1358
1359 std::vector<vk::CommandBufferHelper *> mCommandBufferHelperFreeList;
1360 SecondaryCommandBufferList mSecondaryCommandBuffersToReset;
1361 };
1362
1363 // Imagine an image going through a few layout transitions:
1364 //
1365 // srcStage 1 dstStage 2 srcStage 2 dstStage 3
1366 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
1367 // srcAccess 1 dstAccess 2 srcAccess 2 dstAccess 3
1368 // \_________________ ___________________/
1369 // \/
1370 // A transition
1371 //
1372 // Every transition requires 6 pieces of information: from/to layouts, src/dst stage masks and
1373 // src/dst access masks. At the moment we decide to transition the image to Layout 2 (i.e.
1374 // Transition 1), we need to have Layout 1, srcStage 1 and srcAccess 1 stored as history of the
1375 // image. To perform the transition, we need to know Layout 2, dstStage 2 and dstAccess 2.
1376 // Additionally, we need to know srcStage 2 and srcAccess 2 to retain them for the next transition.
1377 //
1378 // That is, with the history kept, on every new transition we need 5 pieces of new information:
1379 // layout/dstStage/dstAccess to transition into the layout, and srcStage/srcAccess for the future
1380 // transition out from it. Given the small number of possible combinations of these values, an
1381 // enum is used were each value encapsulates these 5 pieces of information:
1382 //
1383 // +--------------------------------+
1384 // srcStage 1 | dstStage 2 srcStage 2 | dstStage 3
1385 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3
1386 // srcAccess 1 |dstAccess 2 srcAccess 2| dstAccess 3
1387 // +--------------- ---------------+
1388 // \/
1389 // One enum value
1390 //
1391 // Note that, while generally dstStage for the to-transition and srcStage for the from-transition
1392 // are the same, they may occasionally be BOTTOM_OF_PIPE and TOP_OF_PIPE respectively.
1393 enum class ImageLayout
1394 {
1395 Undefined = 0,
1396 // Framebuffer attachment layouts are placed first, so they can fit in fewer bits in
1397 // PackedAttachmentOpsDesc.
1398 ColorAttachment,
1399 ColorAttachmentAndFragmentShaderRead,
1400 ColorAttachmentAndAllShadersRead,
1401 DSAttachmentWriteAndFragmentShaderRead,
1402 DSAttachmentWriteAndAllShadersRead,
1403 DSAttachmentReadAndFragmentShaderRead,
1404 DSAttachmentReadAndAllShadersRead,
1405 DepthStencilAttachmentReadOnly,
1406 DepthStencilAttachment,
1407 DepthStencilResolveAttachment,
1408 Present,
1409 SharedPresent,
1410 // The rest of the layouts.
1411 ExternalPreInitialized,
1412 ExternalShadersReadOnly,
1413 ExternalShadersWrite,
1414 TransferSrc,
1415 TransferDst,
1416 VertexShaderReadOnly,
1417 VertexShaderWrite,
1418 // PreFragment == Vertex, Tessellation and Geometry stages
1419 PreFragmentShadersReadOnly,
1420 PreFragmentShadersWrite,
1421 FragmentShaderReadOnly,
1422 FragmentShaderWrite,
1423 ComputeShaderReadOnly,
1424 ComputeShaderWrite,
1425 AllGraphicsShadersReadOnly,
1426 AllGraphicsShadersWrite,
1427
1428 InvalidEnum,
1429 EnumCount = InvalidEnum,
1430 };
1431
1432 VkImageCreateFlags GetImageCreateFlags(gl::TextureType textureType);
1433
1434 ImageLayout GetImageLayoutFromGLImageLayout(GLenum layout);
1435
1436 GLenum ConvertImageLayoutToGLImageLayout(ImageLayout imageLayout);
1437
1438 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout);
1439
1440 // How the ImageHelper object is being used by the renderpass
1441 enum class RenderPassUsage
1442 {
1443 // Attached to the render taget of the current renderpass commands. It could be read/write or
1444 // read only access.
1445 RenderTargetAttachment,
1446 // This is special case of RenderTargetAttachment where the render target access is read only.
1447 // Right now it is only tracked for depth stencil attachment
1448 ReadOnlyAttachment,
1449 // Attached to the texture sampler of the current renderpass commands
1450 TextureSampler,
1451
1452 InvalidEnum,
1453 EnumCount = InvalidEnum,
1454 };
1455 using RenderPassUsageFlags = angle::PackedEnumBitSet<RenderPassUsage, uint16_t>;
1456
1457 bool FormatHasNecessaryFeature(RendererVk *renderer,
1458 angle::FormatID formatID,
1459 VkImageTiling tilingMode,
1460 VkFormatFeatureFlags featureBits);
1461
1462 bool CanCopyWithTransfer(RendererVk *renderer,
1463 angle::FormatID srcFormatID,
1464 VkImageTiling srcTilingMode,
1465 angle::FormatID dstFormatID,
1466 VkImageTiling dstTilingMode);
1467
1468 class ImageHelper final : public Resource, public angle::Subject
1469 {
1470 public:
1471 ImageHelper();
1472 ImageHelper(ImageHelper &&other);
1473 ~ImageHelper() override;
1474
1475 void initStagingBuffer(RendererVk *renderer,
1476 size_t imageCopyBufferAlignment,
1477 VkBufferUsageFlags usageFlags,
1478 size_t initialSize);
1479
1480 angle::Result init(Context *context,
1481 gl::TextureType textureType,
1482 const VkExtent3D &extents,
1483 const Format &format,
1484 GLint samples,
1485 VkImageUsageFlags usage,
1486 gl::LevelIndex firstLevel,
1487 uint32_t mipLevels,
1488 uint32_t layerCount,
1489 bool isRobustResourceInitEnabled,
1490 bool hasProtectedContent);
1491 angle::Result initMSAASwapchain(Context *context,
1492 gl::TextureType textureType,
1493 const VkExtent3D &extents,
1494 bool rotatedAspectRatio,
1495 const Format &format,
1496 GLint samples,
1497 VkImageUsageFlags usage,
1498 gl::LevelIndex firstLevel,
1499 uint32_t mipLevels,
1500 uint32_t layerCount,
1501 bool isRobustResourceInitEnabled,
1502 bool hasProtectedContent);
1503 angle::Result initExternal(Context *context,
1504 gl::TextureType textureType,
1505 const VkExtent3D &extents,
1506 angle::FormatID intendedFormatID,
1507 angle::FormatID actualFormatID,
1508 GLint samples,
1509 VkImageUsageFlags usage,
1510 VkImageCreateFlags additionalCreateFlags,
1511 ImageLayout initialLayout,
1512 const void *externalImageCreateInfo,
1513 gl::LevelIndex firstLevel,
1514 uint32_t mipLevels,
1515 uint32_t layerCount,
1516 bool isRobustResourceInitEnabled,
1517 bool hasProtectedContent);
1518 angle::Result initMemory(Context *context,
1519 bool hasProtectedContent,
1520 const MemoryProperties &memoryProperties,
1521 VkMemoryPropertyFlags flags);
1522 angle::Result initExternalMemory(
1523 Context *context,
1524 const MemoryProperties &memoryProperties,
1525 const VkMemoryRequirements &memoryRequirements,
1526 const VkSamplerYcbcrConversionCreateInfo *samplerYcbcrConversionCreateInfo,
1527 uint32_t extraAllocationInfoCount,
1528 const void **extraAllocationInfo,
1529 uint32_t currentQueueFamilyIndex,
1530 VkMemoryPropertyFlags flags);
1531 angle::Result initLayerImageView(Context *context,
1532 gl::TextureType textureType,
1533 VkImageAspectFlags aspectMask,
1534 const gl::SwizzleState &swizzleMap,
1535 ImageView *imageViewOut,
1536 LevelIndex baseMipLevelVk,
1537 uint32_t levelCount,
1538 uint32_t baseArrayLayer,
1539 uint32_t layerCount,
1540 gl::SrgbWriteControlMode mode) const;
1541 angle::Result initLayerImageViewWithFormat(Context *context,
1542 gl::TextureType textureType,
1543 VkFormat imageFormat,
1544 VkImageAspectFlags aspectMask,
1545 const gl::SwizzleState &swizzleMap,
1546 ImageView *imageViewOut,
1547 LevelIndex baseMipLevelVk,
1548 uint32_t levelCount,
1549 uint32_t baseArrayLayer,
1550 uint32_t layerCount) const;
1551 angle::Result initReinterpretedLayerImageView(Context *context,
1552 gl::TextureType textureType,
1553 VkImageAspectFlags aspectMask,
1554 const gl::SwizzleState &swizzleMap,
1555 ImageView *imageViewOut,
1556 LevelIndex baseMipLevelVk,
1557 uint32_t levelCount,
1558 uint32_t baseArrayLayer,
1559 uint32_t layerCount,
1560 VkImageUsageFlags imageUsageFlags,
1561 angle::FormatID imageViewFormat) const;
1562 angle::Result initImageView(Context *context,
1563 gl::TextureType textureType,
1564 VkImageAspectFlags aspectMask,
1565 const gl::SwizzleState &swizzleMap,
1566 ImageView *imageViewOut,
1567 LevelIndex baseMipLevelVk,
1568 uint32_t levelCount);
1569 // Create a 2D[Array] for staging purposes. Used by:
1570 //
1571 // - TextureVk::copySubImageImplWithDraw
1572 // - FramebufferVk::readPixelsImpl
1573 //
1574 angle::Result init2DStaging(Context *context,
1575 bool hasProtectedContent,
1576 const MemoryProperties &memoryProperties,
1577 const gl::Extents &glExtents,
1578 angle::FormatID intendedFormatID,
1579 angle::FormatID actualFormatID,
1580 VkImageUsageFlags usage,
1581 uint32_t layerCount);
1582 // Create an image for staging purposes. Used by:
1583 //
1584 // - TextureVk::copyAndStageImageData
1585 //
1586 angle::Result initStaging(Context *context,
1587 bool hasProtectedContent,
1588 const MemoryProperties &memoryProperties,
1589 VkImageType imageType,
1590 const VkExtent3D &extents,
1591 angle::FormatID intendedFormatID,
1592 angle::FormatID actualFormatID,
1593 GLint samples,
1594 VkImageUsageFlags usage,
1595 uint32_t mipLevels,
1596 uint32_t layerCount);
1597 // Create a multisampled image for use as the implicit image in multisampled render to texture
1598 // rendering. If LAZILY_ALLOCATED memory is available, it will prefer that.
1599 angle::Result initImplicitMultisampledRenderToTexture(Context *context,
1600 bool hasProtectedContent,
1601 const MemoryProperties &memoryProperties,
1602 gl::TextureType textureType,
1603 GLint samples,
1604 const ImageHelper &resolveImage,
1605 bool isRobustResourceInitEnabled);
1606
1607 // Helper for initExternal and users to automatically derive the appropriate VkImageCreateInfo
1608 // pNext chain based on the given parameters, and adjust create flags. In some cases, these
1609 // shouldn't be automatically derived, for example when importing images through
1610 // EXT_external_objects and ANGLE_external_objects_flags.
1611 static constexpr uint32_t kImageListFormatCount = 2;
1612 using ImageListFormats = std::array<VkFormat, kImageListFormatCount>;
1613 static const void *DeriveCreateInfoPNext(
1614 Context *context,
1615 angle::FormatID actualFormatID,
1616 const void *pNext,
1617 VkImageFormatListCreateInfoKHR *imageFormatListInfoStorage,
1618 ImageListFormats *imageListFormatsStorage,
1619 VkImageCreateFlags *createFlagsOut);
1620
1621 // Release the underlining VkImage object for garbage collection.
1622 void releaseImage(RendererVk *renderer);
1623 // Similar to releaseImage, but also notify all contexts in the same share group to stop
1624 // accessing to it.
1625 void releaseImageFromShareContexts(RendererVk *renderer, ContextVk *contextVk);
1626 void releaseStagingBuffer(RendererVk *renderer);
1627
valid()1628 bool valid() const { return mImage.valid(); }
1629
1630 VkImageAspectFlags getAspectFlags() const;
1631 // True if image contains both depth & stencil aspects
1632 bool isCombinedDepthStencilFormat() const;
1633 void destroy(RendererVk *renderer);
release(RendererVk * renderer)1634 void release(RendererVk *renderer) { destroy(renderer); }
1635
1636 void init2DWeakReference(Context *context,
1637 VkImage handle,
1638 const gl::Extents &glExtents,
1639 bool rotatedAspectRatio,
1640 angle::FormatID intendedFormatID,
1641 angle::FormatID actualFormatID,
1642 GLint samples,
1643 bool isRobustResourceInitEnabled);
1644 void resetImageWeakReference();
1645
getImage()1646 const Image &getImage() const { return mImage; }
getDeviceMemory()1647 const DeviceMemory &getDeviceMemory() const { return mDeviceMemory; }
1648
getVkImageCreateInfo()1649 const VkImageCreateInfo &getVkImageCreateInfo() const { return mVkImageCreateInfo; }
setTilingMode(VkImageTiling tilingMode)1650 void setTilingMode(VkImageTiling tilingMode) { mTilingMode = tilingMode; }
getTilingMode()1651 VkImageTiling getTilingMode() const { return mTilingMode; }
getCreateFlags()1652 VkImageCreateFlags getCreateFlags() const { return mCreateFlags; }
getUsage()1653 VkImageUsageFlags getUsage() const { return mUsage; }
getType()1654 VkImageType getType() const { return mImageType; }
getExtents()1655 const VkExtent3D &getExtents() const { return mExtents; }
1656 const VkExtent3D getRotatedExtents() const;
getLayerCount()1657 uint32_t getLayerCount() const
1658 {
1659 ASSERT(valid());
1660 return mLayerCount;
1661 }
getLevelCount()1662 uint32_t getLevelCount() const
1663 {
1664 ASSERT(valid());
1665 return mLevelCount;
1666 }
getIntendedFormatID()1667 angle::FormatID getIntendedFormatID() const
1668 {
1669 ASSERT(valid());
1670 return mIntendedFormatID;
1671 }
getIntendedFormat()1672 const angle::Format &getIntendedFormat() const
1673 {
1674 ASSERT(valid());
1675 return angle::Format::Get(mIntendedFormatID);
1676 }
getActualFormatID()1677 angle::FormatID getActualFormatID() const
1678 {
1679 ASSERT(valid());
1680 return mActualFormatID;
1681 }
getActualVkFormat()1682 VkFormat getActualVkFormat() const
1683 {
1684 ASSERT(valid());
1685 return GetVkFormatFromFormatID(mActualFormatID);
1686 }
getActualFormat()1687 const angle::Format &getActualFormat() const
1688 {
1689 ASSERT(valid());
1690 return angle::Format::Get(mActualFormatID);
1691 }
1692 bool hasEmulatedImageChannels() const;
hasEmulatedImageFormat()1693 bool hasEmulatedImageFormat() const { return mActualFormatID != mIntendedFormatID; }
getSamples()1694 GLint getSamples() const { return mSamples; }
1695
getImageSerial()1696 ImageSerial getImageSerial() const
1697 {
1698 ASSERT(valid() && mImageSerial.valid());
1699 return mImageSerial;
1700 }
1701
setCurrentImageLayout(ImageLayout newLayout)1702 void setCurrentImageLayout(ImageLayout newLayout)
1703 {
1704 // Once you transition to ImageLayout::SharedPresent, you never transition out of it.
1705 if (mCurrentLayout == ImageLayout::SharedPresent)
1706 {
1707 return;
1708 }
1709 mCurrentLayout = newLayout;
1710 }
getCurrentImageLayout()1711 ImageLayout getCurrentImageLayout() const { return mCurrentLayout; }
1712 VkImageLayout getCurrentLayout() const;
1713
1714 gl::Extents getLevelExtents(LevelIndex levelVk) const;
1715 // Helper function to calculate the extents of a render target created for a certain mip of the
1716 // image.
1717 gl::Extents getLevelExtents2D(LevelIndex levelVk) const;
1718 gl::Extents getRotatedLevelExtents2D(LevelIndex levelVk) const;
1719
1720 bool isDepthOrStencil() const;
1721
1722 void setRenderPassUsageFlag(RenderPassUsage flag);
1723 void clearRenderPassUsageFlag(RenderPassUsage flag);
1724 void resetRenderPassUsageFlags();
1725 bool hasRenderPassUsageFlag(RenderPassUsage flag) const;
1726 bool usedByCurrentRenderPassAsAttachmentAndSampler() const;
1727
1728 static void Copy(ImageHelper *srcImage,
1729 ImageHelper *dstImage,
1730 const gl::Offset &srcOffset,
1731 const gl::Offset &dstOffset,
1732 const gl::Extents ©Size,
1733 const VkImageSubresourceLayers &srcSubresources,
1734 const VkImageSubresourceLayers &dstSubresources,
1735 CommandBuffer *commandBuffer);
1736
1737 static angle::Result CopyImageSubData(const gl::Context *context,
1738 ImageHelper *srcImage,
1739 GLint srcLevel,
1740 GLint srcX,
1741 GLint srcY,
1742 GLint srcZ,
1743 ImageHelper *dstImage,
1744 GLint dstLevel,
1745 GLint dstX,
1746 GLint dstY,
1747 GLint dstZ,
1748 GLsizei srcWidth,
1749 GLsizei srcHeight,
1750 GLsizei srcDepth);
1751
1752 // Generate mipmap from level 0 into the rest of the levels with blit.
1753 angle::Result generateMipmapsWithBlit(ContextVk *contextVk,
1754 LevelIndex baseLevel,
1755 LevelIndex maxLevel);
1756
1757 // Resolve this image into a destination image. This image should be in the TransferSrc layout.
1758 // The destination image is automatically transitioned into TransferDst.
1759 void resolve(ImageHelper *dst, const VkImageResolve ®ion, CommandBuffer *commandBuffer);
1760
1761 // Data staging
1762 void removeSingleSubresourceStagedUpdates(ContextVk *contextVk,
1763 gl::LevelIndex levelIndexGL,
1764 uint32_t layerIndex,
1765 uint32_t layerCount);
1766 void removeStagedUpdates(Context *context,
1767 gl::LevelIndex levelGLStart,
1768 gl::LevelIndex levelGLEnd);
1769
1770 angle::Result stageSubresourceUpdateImpl(ContextVk *contextVk,
1771 const gl::ImageIndex &index,
1772 const gl::Extents &glExtents,
1773 const gl::Offset &offset,
1774 const gl::InternalFormat &formatInfo,
1775 const gl::PixelUnpackState &unpack,
1776 DynamicBuffer *stagingBufferOverride,
1777 GLenum type,
1778 const uint8_t *pixels,
1779 const Format &vkFormat,
1780 ImageAccess access,
1781 const GLuint inputRowPitch,
1782 const GLuint inputDepthPitch,
1783 const GLuint inputSkipBytes);
1784
1785 angle::Result stageSubresourceUpdate(ContextVk *contextVk,
1786 const gl::ImageIndex &index,
1787 const gl::Extents &glExtents,
1788 const gl::Offset &offset,
1789 const gl::InternalFormat &formatInfo,
1790 const gl::PixelUnpackState &unpack,
1791 DynamicBuffer *stagingBufferOverride,
1792 GLenum type,
1793 const uint8_t *pixels,
1794 const Format &vkFormat,
1795 ImageAccess access);
1796
1797 angle::Result stageSubresourceUpdateAndGetData(ContextVk *contextVk,
1798 size_t allocationSize,
1799 const gl::ImageIndex &imageIndex,
1800 const gl::Extents &glExtents,
1801 const gl::Offset &offset,
1802 uint8_t **destData,
1803 DynamicBuffer *stagingBufferOverride,
1804 angle::FormatID formatID);
1805
1806 angle::Result stageSubresourceUpdateFromFramebuffer(const gl::Context *context,
1807 const gl::ImageIndex &index,
1808 const gl::Rectangle &sourceArea,
1809 const gl::Offset &dstOffset,
1810 const gl::Extents &dstExtent,
1811 const gl::InternalFormat &formatInfo,
1812 ImageAccess access,
1813 FramebufferVk *framebufferVk,
1814 DynamicBuffer *stagingBufferOverride);
1815
1816 void stageSubresourceUpdateFromImage(RefCounted<ImageHelper> *image,
1817 const gl::ImageIndex &index,
1818 LevelIndex srcMipLevel,
1819 const gl::Offset &destOffset,
1820 const gl::Extents &glExtents,
1821 const VkImageType imageType);
1822
1823 // Takes an image and stages a subresource update for each level of it, including its full
1824 // extent and all its layers, at the specified GL level.
1825 void stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> *image,
1826 gl::LevelIndex baseLevel);
1827
1828 // Stage a clear to an arbitrary value.
1829 void stageClear(const gl::ImageIndex &index,
1830 VkImageAspectFlags aspectFlags,
1831 const VkClearValue &clearValue);
1832
1833 // Stage a clear based on robust resource init.
1834 angle::Result stageRobustResourceClearWithFormat(ContextVk *contextVk,
1835 const gl::ImageIndex &index,
1836 const gl::Extents &glExtents,
1837 const angle::Format &intendedFormat,
1838 const angle::Format &actualFormat);
1839 void stageRobustResourceClear(const gl::ImageIndex &index);
1840
1841 // Stage the currently allocated image as updates to base level and on, making this !valid().
1842 // This is used for:
1843 //
1844 // - Mipmap generation, where levelCount is 1 so only the base level is retained
1845 // - Image respecification, where every level (other than those explicitly skipped) is staged
1846 void stageSelfAsSubresourceUpdates(ContextVk *contextVk,
1847 uint32_t levelCount,
1848 gl::TexLevelMask skipLevelsMask);
1849
1850 // Flush staged updates for a single subresource. Can optionally take a parameter to defer
1851 // clears to a subsequent RenderPass load op.
1852 angle::Result flushSingleSubresourceStagedUpdates(ContextVk *contextVk,
1853 gl::LevelIndex levelGL,
1854 uint32_t layer,
1855 uint32_t layerCount,
1856 ClearValuesArray *deferredClears,
1857 uint32_t deferredClearIndex);
1858
1859 // Flushes staged updates to a range of levels and layers from start to (but not including) end.
1860 // Due to the nature of updates (done wholly to a VkImageSubresourceLayers), some unsolicited
1861 // layers may also be updated.
1862 angle::Result flushStagedUpdates(ContextVk *contextVk,
1863 gl::LevelIndex levelGLStart,
1864 gl::LevelIndex levelGLEnd,
1865 uint32_t layerStart,
1866 uint32_t layerEnd,
1867 gl::TexLevelMask skipLevelsMask);
1868
1869 // Creates a command buffer and flushes all staged updates. This is used for one-time
1870 // initialization of resources that we don't expect to accumulate further staged updates, such
1871 // as with renderbuffers or surface images.
1872 angle::Result flushAllStagedUpdates(ContextVk *contextVk);
1873
1874 bool hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,
1875 uint32_t layer,
1876 uint32_t layerCount) const;
1877 bool hasStagedUpdatesInAllocatedLevels() const;
1878
recordWriteBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,CommandBuffer * commandBuffer)1879 void recordWriteBarrier(Context *context,
1880 VkImageAspectFlags aspectMask,
1881 ImageLayout newLayout,
1882 CommandBuffer *commandBuffer)
1883 {
1884 barrierImpl(context, aspectMask, newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1885 }
1886
recordWriteBarrierOneOff(Context * context,ImageLayout newLayout,PrimaryCommandBuffer * commandBuffer)1887 void recordWriteBarrierOneOff(Context *context,
1888 ImageLayout newLayout,
1889 PrimaryCommandBuffer *commandBuffer)
1890 {
1891 barrierImpl(context, getAspectFlags(), newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1892 }
1893
1894 // This function can be used to prevent issuing redundant layout transition commands.
1895 bool isReadBarrierNecessary(ImageLayout newLayout) const;
1896
recordReadBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,CommandBuffer * commandBuffer)1897 void recordReadBarrier(Context *context,
1898 VkImageAspectFlags aspectMask,
1899 ImageLayout newLayout,
1900 CommandBuffer *commandBuffer)
1901 {
1902 if (!isReadBarrierNecessary(newLayout))
1903 {
1904 return;
1905 }
1906
1907 barrierImpl(context, aspectMask, newLayout, mCurrentQueueFamilyIndex, commandBuffer);
1908 }
1909
isQueueChangeNeccesary(uint32_t newQueueFamilyIndex)1910 bool isQueueChangeNeccesary(uint32_t newQueueFamilyIndex) const
1911 {
1912 return mCurrentQueueFamilyIndex != newQueueFamilyIndex;
1913 }
1914
1915 void changeLayoutAndQueue(Context *context,
1916 VkImageAspectFlags aspectMask,
1917 ImageLayout newLayout,
1918 uint32_t newQueueFamilyIndex,
1919 CommandBuffer *commandBuffer);
1920
1921 // Returns true if barrier has been generated
1922 bool updateLayoutAndBarrier(Context *context,
1923 VkImageAspectFlags aspectMask,
1924 ImageLayout newLayout,
1925 PipelineBarrier *barrier);
1926
1927 // Performs an ownership transfer from an external instance or API.
1928 void acquireFromExternal(ContextVk *contextVk,
1929 uint32_t externalQueueFamilyIndex,
1930 uint32_t rendererQueueFamilyIndex,
1931 ImageLayout currentLayout,
1932 CommandBuffer *commandBuffer);
1933
1934 // Performs an ownership transfer to an external instance or API.
1935 void releaseToExternal(ContextVk *contextVk,
1936 uint32_t rendererQueueFamilyIndex,
1937 uint32_t externalQueueFamilyIndex,
1938 ImageLayout desiredLayout,
1939 CommandBuffer *commandBuffer);
1940
1941 // Returns true if the image is owned by an external API or instance.
1942 bool isReleasedToExternal() const;
1943
getFirstAllocatedLevel()1944 gl::LevelIndex getFirstAllocatedLevel() const
1945 {
1946 ASSERT(valid());
1947 return mFirstAllocatedLevel;
1948 }
1949 gl::LevelIndex getLastAllocatedLevel() const;
1950 LevelIndex toVkLevel(gl::LevelIndex levelIndexGL) const;
1951 gl::LevelIndex toGLLevel(LevelIndex levelIndexVk) const;
1952
1953 angle::Result copyImageDataToBuffer(ContextVk *contextVk,
1954 gl::LevelIndex sourceLevelGL,
1955 uint32_t layerCount,
1956 uint32_t baseLayer,
1957 const gl::Box &sourceArea,
1958 BufferHelper **bufferOut,
1959 size_t *bufferSize,
1960 StagingBufferOffsetArray *bufferOffsetsOut,
1961 uint8_t **outDataPtr);
1962
1963 static angle::Result GetReadPixelsParams(ContextVk *contextVk,
1964 const gl::PixelPackState &packState,
1965 gl::Buffer *packBuffer,
1966 GLenum format,
1967 GLenum type,
1968 const gl::Rectangle &area,
1969 const gl::Rectangle &clippedArea,
1970 PackPixelsParams *paramsOut,
1971 GLuint *skipBytesOut);
1972
1973 angle::Result readPixelsForGetImage(ContextVk *contextVk,
1974 const gl::PixelPackState &packState,
1975 gl::Buffer *packBuffer,
1976 gl::LevelIndex levelGL,
1977 uint32_t layer,
1978 uint32_t layerCount,
1979 GLenum format,
1980 GLenum type,
1981 void *pixels);
1982
1983 angle::Result readPixels(ContextVk *contextVk,
1984 const gl::Rectangle &area,
1985 const PackPixelsParams &packPixelsParams,
1986 VkImageAspectFlagBits copyAspectFlags,
1987 gl::LevelIndex levelGL,
1988 uint32_t layer,
1989 void *pixels,
1990 DynamicBuffer *stagingBuffer);
1991
1992 angle::Result CalculateBufferInfo(ContextVk *contextVk,
1993 const gl::Extents &glExtents,
1994 const gl::InternalFormat &formatInfo,
1995 const gl::PixelUnpackState &unpack,
1996 GLenum type,
1997 bool is3D,
1998 GLuint *inputRowPitch,
1999 GLuint *inputDepthPitch,
2000 GLuint *inputSkipBytes);
2001
2002 // Mark a given subresource as written to. The subresource is identified by [levelStart,
2003 // levelStart + levelCount) and [layerStart, layerStart + layerCount).
2004 void onWrite(gl::LevelIndex levelStart,
2005 uint32_t levelCount,
2006 uint32_t layerStart,
2007 uint32_t layerCount,
2008 VkImageAspectFlags aspectFlags);
hasImmutableSampler()2009 bool hasImmutableSampler() const { return mYcbcrConversionDesc.valid(); }
getExternalFormat()2010 uint64_t getExternalFormat() const
2011 {
2012 return mYcbcrConversionDesc.mIsExternalFormat ? mYcbcrConversionDesc.mExternalOrVkFormat
2013 : 0;
2014 }
getYcbcrConversionDesc()2015 const YcbcrConversionDesc *getYcbcrConversionDesc() const { return &mYcbcrConversionDesc; }
2016
2017 // Used by framebuffer and render pass functions to decide loadOps and invalidate/un-invalidate
2018 // render target contents.
2019 bool hasSubresourceDefinedContent(gl::LevelIndex level,
2020 uint32_t layerIndex,
2021 uint32_t layerCount) const;
2022 bool hasSubresourceDefinedStencilContent(gl::LevelIndex level,
2023 uint32_t layerIndex,
2024 uint32_t layerCount) const;
2025 void invalidateSubresourceContent(ContextVk *contextVk,
2026 gl::LevelIndex level,
2027 uint32_t layerIndex,
2028 uint32_t layerCount);
2029 void invalidateSubresourceStencilContent(ContextVk *contextVk,
2030 gl::LevelIndex level,
2031 uint32_t layerIndex,
2032 uint32_t layerCount);
2033 void restoreSubresourceContent(gl::LevelIndex level, uint32_t layerIndex, uint32_t layerCount);
2034 void restoreSubresourceStencilContent(gl::LevelIndex level,
2035 uint32_t layerIndex,
2036 uint32_t layerCount);
2037 angle::Result reformatStagedBufferUpdates(ContextVk *contextVk,
2038 angle::FormatID srcFormatID,
2039 angle::FormatID dstFormatID);
2040 bool hasStagedImageUpdatesWithMismatchedFormat(gl::LevelIndex levelStart,
2041 gl::LevelIndex levelEnd,
2042 angle::FormatID formatID) const;
2043
2044 private:
2045 enum class UpdateSource
2046 {
2047 // Clear an image subresource.
2048 Clear,
2049 // Clear only the emulated channels of the subresource. This operation is more expensive
2050 // than Clear, and so is only used for emulated color formats and only for external images.
2051 // Color only because depth or stencil clear is already per channel, so Clear works for
2052 // them. External only because they may contain data that needs to be preserved.
2053 // Additionally, this is a one-time only clear. Once the emulated channels are cleared,
2054 // ANGLE ensures that they remain untouched.
2055 ClearEmulatedChannelsOnly,
2056 // The source of the copy is a buffer.
2057 Buffer,
2058 // The source of the copy is an image.
2059 Image,
2060 };
2061 ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
2062 struct ClearUpdate
2063 {
2064 bool operator==(const ClearUpdate &rhs)
2065 {
2066 return memcmp(this, &rhs, sizeof(ClearUpdate)) == 0;
2067 }
2068 VkImageAspectFlags aspectFlags;
2069 VkClearValue value;
2070 uint32_t levelIndex;
2071 uint32_t layerIndex;
2072 uint32_t layerCount;
2073 // For ClearEmulatedChannelsOnly, mask of which channels to clear.
2074 VkColorComponentFlags colorMaskFlags;
2075 };
2076 ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
2077 struct BufferUpdate
2078 {
2079 BufferHelper *bufferHelper;
2080 VkBufferImageCopy copyRegion;
2081 angle::FormatID formatID;
2082 };
2083 struct ImageUpdate
2084 {
2085 VkImageCopy copyRegion;
2086 angle::FormatID formatID;
2087 };
2088
2089 struct SubresourceUpdate : angle::NonCopyable
2090 {
2091 SubresourceUpdate();
2092 ~SubresourceUpdate();
2093 SubresourceUpdate(BufferHelper *bufferHelperIn,
2094 const VkBufferImageCopy ©Region,
2095 angle::FormatID formatID);
2096 SubresourceUpdate(RefCounted<ImageHelper> *imageIn,
2097 const VkImageCopy ©Region,
2098 angle::FormatID formatID);
2099 SubresourceUpdate(VkImageAspectFlags aspectFlags,
2100 const VkClearValue &clearValue,
2101 const gl::ImageIndex &imageIndex);
2102 SubresourceUpdate(VkColorComponentFlags colorMaskFlags,
2103 const VkClearColorValue &clearValue,
2104 const gl::ImageIndex &imageIndex);
2105 SubresourceUpdate(SubresourceUpdate &&other);
2106
2107 SubresourceUpdate &operator=(SubresourceUpdate &&other);
2108
2109 void release(RendererVk *renderer);
2110
2111 bool isUpdateToLayers(uint32_t layerIndex, uint32_t layerCount) const;
2112 void getDestSubresource(uint32_t imageLayerCount,
2113 uint32_t *baseLayerOut,
2114 uint32_t *layerCountOut) const;
2115 VkImageAspectFlags getDestAspectFlags() const;
2116
2117 UpdateSource updateSource;
2118 union
2119 {
2120 ClearUpdate clear;
2121 BufferUpdate buffer;
2122 ImageUpdate image;
2123 } data;
2124 RefCounted<ImageHelper> *image;
2125 };
2126
2127 void deriveExternalImageTiling(const void *createInfoChain);
2128
2129 // Called from flushStagedUpdates, removes updates that are later superseded by another. This
2130 // cannot be done at the time the updates were staged, as the image is not created (and thus the
2131 // extents are not known).
2132 void removeSupersededUpdates(ContextVk *contextVk, gl::TexLevelMask skipLevelsMask);
2133
2134 void initImageMemoryBarrierStruct(VkImageAspectFlags aspectMask,
2135 ImageLayout newLayout,
2136 uint32_t newQueueFamilyIndex,
2137 VkImageMemoryBarrier *imageMemoryBarrier) const;
2138
2139 // Generalized to accept both "primary" and "secondary" command buffers.
2140 template <typename CommandBufferT>
2141 void barrierImpl(Context *context,
2142 VkImageAspectFlags aspectMask,
2143 ImageLayout newLayout,
2144 uint32_t newQueueFamilyIndex,
2145 CommandBufferT *commandBuffer);
2146
2147 // If the image has emulated channels, we clear them once so as not to leave garbage on those
2148 // channels.
2149 VkColorComponentFlags getEmulatedChannelsMask() const;
2150 void stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled, bool isExternalImage);
2151 bool verifyEmulatedClearsAreBeforeOtherUpdates(const std::vector<SubresourceUpdate> &updates);
2152
2153 // Clear either color or depth/stencil based on image format.
2154 void clear(VkImageAspectFlags aspectFlags,
2155 const VkClearValue &value,
2156 LevelIndex mipLevel,
2157 uint32_t baseArrayLayer,
2158 uint32_t layerCount,
2159 CommandBuffer *commandBuffer);
2160
2161 void clearColor(const VkClearColorValue &color,
2162 LevelIndex baseMipLevelVk,
2163 uint32_t levelCount,
2164 uint32_t baseArrayLayer,
2165 uint32_t layerCount,
2166 CommandBuffer *commandBuffer);
2167
2168 void clearDepthStencil(VkImageAspectFlags clearAspectFlags,
2169 const VkClearDepthStencilValue &depthStencil,
2170 LevelIndex baseMipLevelVk,
2171 uint32_t levelCount,
2172 uint32_t baseArrayLayer,
2173 uint32_t layerCount,
2174 CommandBuffer *commandBuffer);
2175
2176 angle::Result clearEmulatedChannels(ContextVk *contextVk,
2177 VkColorComponentFlags colorMaskFlags,
2178 const VkClearValue &value,
2179 LevelIndex mipLevel,
2180 uint32_t baseArrayLayer,
2181 uint32_t layerCount);
2182
2183 angle::Result initializeNonZeroMemory(Context *context,
2184 bool hasProtectedContent,
2185 VkDeviceSize size);
2186
2187 std::vector<SubresourceUpdate> *getLevelUpdates(gl::LevelIndex level);
2188 const std::vector<SubresourceUpdate> *getLevelUpdates(gl::LevelIndex level) const;
2189
2190 void appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
2191 void prependSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update);
2192 // Whether there are any updates in [start, end).
2193 bool hasStagedUpdatesInLevels(gl::LevelIndex levelStart, gl::LevelIndex levelEnd) const;
2194
2195 // Used only for assertions, these functions verify that SubresourceUpdate::image references
2196 // have the correct ref count. This is to prevent accidental leaks.
2197 bool validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> *image) const;
2198 bool validateSubresourceUpdateImageRefsConsistent() const;
2199
2200 void resetCachedProperties();
2201 void setEntireContentDefined();
2202 void setEntireContentUndefined();
2203 void setContentDefined(LevelIndex levelStart,
2204 uint32_t levelCount,
2205 uint32_t layerStart,
2206 uint32_t layerCount,
2207 VkImageAspectFlags aspectFlags);
2208
2209 // Up to 8 layers are tracked per level for whether contents are defined, above which the
2210 // contents are considered unconditionally defined. This handles the more likely scenarios of:
2211 //
2212 // - Single layer framebuffer attachments,
2213 // - Cube map framebuffer attachments,
2214 // - Multi-view rendering.
2215 //
2216 // If there arises a need to optimize an application that invalidates layer >= 8, an additional
2217 // hash map can be used to track such subresources.
2218 static constexpr uint32_t kMaxContentDefinedLayerCount = 8;
2219 using LevelContentDefinedMask = angle::BitSet8<kMaxContentDefinedLayerCount>;
2220
2221 // Use the following functions to access m*ContentDefined to make sure the correct level index
2222 // is used (i.e. vk::LevelIndex and not gl::LevelIndex).
2223 LevelContentDefinedMask &getLevelContentDefined(LevelIndex level);
2224 LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level);
2225 const LevelContentDefinedMask &getLevelContentDefined(LevelIndex level) const;
2226 const LevelContentDefinedMask &getLevelStencilContentDefined(LevelIndex level) const;
2227
2228 angle::Result initLayerImageViewImpl(
2229 Context *context,
2230 gl::TextureType textureType,
2231 VkImageAspectFlags aspectMask,
2232 const gl::SwizzleState &swizzleMap,
2233 ImageView *imageViewOut,
2234 LevelIndex baseMipLevelVk,
2235 uint32_t levelCount,
2236 uint32_t baseArrayLayer,
2237 uint32_t layerCount,
2238 VkFormat imageFormat,
2239 const VkImageViewUsageCreateInfo *imageViewUsageCreateInfo) const;
2240
2241 bool canCopyWithTransformForReadPixels(const PackPixelsParams &packPixelsParams,
2242 const angle::Format *readFormat);
2243 // Vulkan objects.
2244 Image mImage;
2245 DeviceMemory mDeviceMemory;
2246
2247 // Image properties.
2248 VkImageCreateInfo mVkImageCreateInfo;
2249 VkImageType mImageType;
2250 VkImageTiling mTilingMode;
2251 VkImageCreateFlags mCreateFlags;
2252 VkImageUsageFlags mUsage;
2253 // For Android swapchain images, the Vulkan VkImage must be "rotated". However, most of ANGLE
2254 // uses non-rotated extents (i.e. the way the application views the extents--see "Introduction
2255 // to Android rotation and pre-rotation" in "SurfaceVk.cpp"). Thus, mExtents are non-rotated.
2256 // The rotated extents are also stored along with a bool that indicates if the aspect ratio is
2257 // different between the rotated and non-rotated extents.
2258 VkExtent3D mExtents;
2259 bool mRotatedAspectRatio;
2260 angle::FormatID mIntendedFormatID;
2261 angle::FormatID mActualFormatID;
2262 GLint mSamples;
2263 ImageSerial mImageSerial;
2264
2265 // Current state.
2266 ImageLayout mCurrentLayout;
2267 uint32_t mCurrentQueueFamilyIndex;
2268 // For optimizing transition between different shader readonly layouts
2269 ImageLayout mLastNonShaderReadOnlyLayout;
2270 VkPipelineStageFlags mCurrentShaderReadStageMask;
2271 // Track how it is being used by current open renderpass.
2272 RenderPassUsageFlags mRenderPassUsageFlags;
2273
2274 // For imported images
2275 YcbcrConversionDesc mYcbcrConversionDesc;
2276 BindingPointer<SamplerYcbcrConversion> mYuvConversionSampler;
2277
2278 // The first level that has been allocated. For mutable textures, this should be same as
2279 // mBaseLevel since we always reallocate VkImage based on mBaseLevel change. But for immutable
2280 // textures, we always allocate from level 0 regardless of mBaseLevel change.
2281 gl::LevelIndex mFirstAllocatedLevel;
2282
2283 // Cached properties.
2284 uint32_t mLayerCount;
2285 uint32_t mLevelCount;
2286
2287 // Staging buffer
2288 DynamicBuffer mStagingBuffer;
2289 std::vector<std::vector<SubresourceUpdate>> mSubresourceUpdates;
2290
2291 // Optimization for repeated clear with the same value. If this pointer is not null, the entire
2292 // image it has been cleared to the specified clear value. If another clear call is made with
2293 // the exact same clear value, we will detect and skip the clear call.
2294 Optional<ClearUpdate> mCurrentSingleClearValue;
2295
2296 // Track whether each subresource has defined contents. Up to 8 layers are tracked per level,
2297 // above which the contents are considered unconditionally defined.
2298 gl::TexLevelArray<LevelContentDefinedMask> mContentDefined;
2299 gl::TexLevelArray<LevelContentDefinedMask> mStencilContentDefined;
2300 };
2301
2302 // A vector of image views, such as one per level or one per layer.
2303 using ImageViewVector = std::vector<ImageView>;
2304
2305 // A vector of vector of image views. Primary index is layer, secondary index is level.
2306 using LayerLevelImageViewVector = std::vector<ImageViewVector>;
2307
2308 // Address mode for layers: only possible to access either all layers, or up to
2309 // IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS layers. This enum uses 0 for all layers and the rest of
2310 // the values conveniently alias the number of layers.
2311 enum LayerMode
2312 {
2313 All,
2314 _1,
2315 _2,
2316 _3,
2317 _4,
2318 };
2319 static_assert(gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS == 4, "Update LayerMode");
2320
2321 LayerMode GetLayerMode(const vk::ImageHelper &image, uint32_t layerCount);
2322
2323 // Sampler decode mode indicating if an attachment needs to be decoded in linear colorspace or sRGB
2324 enum class SrgbDecodeMode
2325 {
2326 SkipDecode,
2327 SrgbDecode
2328 };
2329
2330 class ImageViewHelper final : public Resource
2331 {
2332 public:
2333 ImageViewHelper();
2334 ImageViewHelper(ImageViewHelper &&other);
2335 ~ImageViewHelper() override;
2336
2337 void init(RendererVk *renderer);
2338 void release(RendererVk *renderer);
2339 void destroy(VkDevice device);
2340
getLinearReadImageView()2341 const ImageView &getLinearReadImageView() const
2342 {
2343 return getValidReadViewImpl(mPerLevelLinearReadImageViews);
2344 }
getSRGBReadImageView()2345 const ImageView &getSRGBReadImageView() const
2346 {
2347 return getValidReadViewImpl(mPerLevelSRGBReadImageViews);
2348 }
getLinearFetchImageView()2349 const ImageView &getLinearFetchImageView() const
2350 {
2351 return getValidReadViewImpl(mPerLevelLinearFetchImageViews);
2352 }
getSRGBFetchImageView()2353 const ImageView &getSRGBFetchImageView() const
2354 {
2355 return getValidReadViewImpl(mPerLevelSRGBFetchImageViews);
2356 }
getLinearCopyImageView()2357 const ImageView &getLinearCopyImageView() const
2358 {
2359 return getValidReadViewImpl(mPerLevelLinearCopyImageViews);
2360 }
getSRGBCopyImageView()2361 const ImageView &getSRGBCopyImageView() const
2362 {
2363 return getValidReadViewImpl(mPerLevelSRGBCopyImageViews);
2364 }
getStencilReadImageView()2365 const ImageView &getStencilReadImageView() const
2366 {
2367 return getValidReadViewImpl(mPerLevelStencilReadImageViews);
2368 }
2369
getReadImageView()2370 const ImageView &getReadImageView() const
2371 {
2372 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearReadImageViews)
2373 : getReadViewImpl(mPerLevelSRGBReadImageViews);
2374 }
2375
getFetchImageView()2376 const ImageView &getFetchImageView() const
2377 {
2378 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearFetchImageViews)
2379 : getReadViewImpl(mPerLevelSRGBFetchImageViews);
2380 }
2381
getCopyImageView()2382 const ImageView &getCopyImageView() const
2383 {
2384 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearCopyImageViews)
2385 : getReadViewImpl(mPerLevelSRGBCopyImageViews);
2386 }
2387
2388 // Used when initialized RenderTargets.
hasStencilReadImageView()2389 bool hasStencilReadImageView() const
2390 {
2391 return mCurrentMaxLevel.get() < mPerLevelStencilReadImageViews.size()
2392 ? mPerLevelStencilReadImageViews[mCurrentMaxLevel.get()].valid()
2393 : false;
2394 }
2395
hasFetchImageView()2396 bool hasFetchImageView() const
2397 {
2398 if ((mLinearColorspace && mCurrentMaxLevel.get() < mPerLevelLinearFetchImageViews.size()) ||
2399 (!mLinearColorspace && mCurrentMaxLevel.get() < mPerLevelSRGBFetchImageViews.size()))
2400 {
2401 return getFetchImageView().valid();
2402 }
2403 else
2404 {
2405 return false;
2406 }
2407 }
2408
hasCopyImageView()2409 bool hasCopyImageView() const
2410 {
2411 if ((mLinearColorspace && mCurrentMaxLevel.get() < mPerLevelLinearCopyImageViews.size()) ||
2412 (!mLinearColorspace && mCurrentMaxLevel.get() < mPerLevelSRGBCopyImageViews.size()))
2413 {
2414 return getCopyImageView().valid();
2415 }
2416 else
2417 {
2418 return false;
2419 }
2420 }
2421
2422 // For applications that frequently switch a texture's max level, and make no other changes to
2423 // the texture, change the currently-used max level, and potentially create new "read views"
2424 // for the new max-level
2425 angle::Result initReadViews(ContextVk *contextVk,
2426 gl::TextureType viewType,
2427 const ImageHelper &image,
2428 const angle::Format &format,
2429 const gl::SwizzleState &formatSwizzle,
2430 const gl::SwizzleState &readSwizzle,
2431 LevelIndex baseLevel,
2432 uint32_t levelCount,
2433 uint32_t baseLayer,
2434 uint32_t layerCount,
2435 bool requiresSRGBViews,
2436 VkImageUsageFlags imageUsageFlags);
2437
2438 // Creates a storage view with all layers of the level.
2439 angle::Result getLevelStorageImageView(ContextVk *contextVk,
2440 gl::TextureType viewType,
2441 const ImageHelper &image,
2442 LevelIndex levelVk,
2443 uint32_t layer,
2444 VkImageUsageFlags imageUsageFlags,
2445 angle::FormatID formatID,
2446 const ImageView **imageViewOut);
2447
2448 // Creates a storage view with a single layer of the level.
2449 angle::Result getLevelLayerStorageImageView(ContextVk *contextVk,
2450 const ImageHelper &image,
2451 LevelIndex levelVk,
2452 uint32_t layer,
2453 VkImageUsageFlags imageUsageFlags,
2454 angle::FormatID formatID,
2455 const ImageView **imageViewOut);
2456
2457 // Creates a draw view with a range of layers of the level.
2458 angle::Result getLevelDrawImageView(ContextVk *contextVk,
2459 const ImageHelper &image,
2460 LevelIndex levelVk,
2461 uint32_t layer,
2462 uint32_t layerCount,
2463 gl::SrgbWriteControlMode mode,
2464 const ImageView **imageViewOut);
2465
2466 // Creates a draw view with a single layer of the level.
2467 angle::Result getLevelLayerDrawImageView(ContextVk *contextVk,
2468 const ImageHelper &image,
2469 LevelIndex levelVk,
2470 uint32_t layer,
2471 gl::SrgbWriteControlMode mode,
2472 const ImageView **imageViewOut);
2473
2474 // Return unique Serial for an imageView.
2475 ImageOrBufferViewSubresourceSerial getSubresourceSerial(
2476 gl::LevelIndex levelGL,
2477 uint32_t levelCount,
2478 uint32_t layer,
2479 LayerMode layerMode,
2480 SrgbDecodeMode srgbDecodeMode,
2481 gl::SrgbOverride srgbOverrideMode) const;
2482
2483 private:
getReadImageView()2484 ImageView &getReadImageView()
2485 {
2486 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearReadImageViews)
2487 : getReadViewImpl(mPerLevelSRGBReadImageViews);
2488 }
getFetchImageView()2489 ImageView &getFetchImageView()
2490 {
2491 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearFetchImageViews)
2492 : getReadViewImpl(mPerLevelSRGBFetchImageViews);
2493 }
getCopyImageView()2494 ImageView &getCopyImageView()
2495 {
2496 return mLinearColorspace ? getReadViewImpl(mPerLevelLinearCopyImageViews)
2497 : getReadViewImpl(mPerLevelSRGBCopyImageViews);
2498 }
2499
2500 // Used by public get*ImageView() methods to do proper assert based on vector size and validity
getValidReadViewImpl(const ImageViewVector & imageViewVector)2501 inline const ImageView &getValidReadViewImpl(const ImageViewVector &imageViewVector) const
2502 {
2503 ASSERT(mCurrentMaxLevel.get() < imageViewVector.size() &&
2504 imageViewVector[mCurrentMaxLevel.get()].valid());
2505 return imageViewVector[mCurrentMaxLevel.get()];
2506 }
2507
2508 // Used by public get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(const ImageViewVector & imageViewVector)2509 inline const ImageView &getReadViewImpl(const ImageViewVector &imageViewVector) const
2510 {
2511 ASSERT(mCurrentMaxLevel.get() < imageViewVector.size());
2512 return imageViewVector[mCurrentMaxLevel.get()];
2513 }
2514
2515 // Used by private get*ImageView() methods to do proper assert based on vector size
getReadViewImpl(ImageViewVector & imageViewVector)2516 inline ImageView &getReadViewImpl(ImageViewVector &imageViewVector)
2517 {
2518 ASSERT(mCurrentMaxLevel.get() < imageViewVector.size());
2519 return imageViewVector[mCurrentMaxLevel.get()];
2520 }
2521
2522 // Creates views with multiple layers and levels.
2523 angle::Result initReadViewsImpl(ContextVk *contextVk,
2524 gl::TextureType viewType,
2525 const ImageHelper &image,
2526 const angle::Format &format,
2527 const gl::SwizzleState &formatSwizzle,
2528 const gl::SwizzleState &readSwizzle,
2529 LevelIndex baseLevel,
2530 uint32_t levelCount,
2531 uint32_t baseLayer,
2532 uint32_t layerCount);
2533
2534 // Create SRGB-reinterpreted read views
2535 angle::Result initSRGBReadViewsImpl(ContextVk *contextVk,
2536 gl::TextureType viewType,
2537 const ImageHelper &image,
2538 const angle::Format &format,
2539 const gl::SwizzleState &formatSwizzle,
2540 const gl::SwizzleState &readSwizzle,
2541 LevelIndex baseLevel,
2542 uint32_t levelCount,
2543 uint32_t baseLayer,
2544 uint32_t layerCount,
2545 VkImageUsageFlags imageUsageFlags);
2546
2547 // For applications that frequently switch a texture's max level, and make no other changes to
2548 // the texture, keep track of the currently-used max level, and keep one "read view" per
2549 // max-level
2550 LevelIndex mCurrentMaxLevel;
2551
2552 // Read views (one per max-level)
2553 ImageViewVector mPerLevelLinearReadImageViews;
2554 ImageViewVector mPerLevelSRGBReadImageViews;
2555 ImageViewVector mPerLevelLinearFetchImageViews;
2556 ImageViewVector mPerLevelSRGBFetchImageViews;
2557 ImageViewVector mPerLevelLinearCopyImageViews;
2558 ImageViewVector mPerLevelSRGBCopyImageViews;
2559 ImageViewVector mPerLevelStencilReadImageViews;
2560
2561 bool mLinearColorspace;
2562
2563 // Draw views
2564 LayerLevelImageViewVector mLayerLevelDrawImageViews;
2565 LayerLevelImageViewVector mLayerLevelDrawImageViewsLinear;
2566 angle::HashMap<ImageSubresourceRange, std::unique_ptr<ImageView>> mSubresourceDrawImageViews;
2567
2568 // Storage views
2569 ImageViewVector mLevelStorageImageViews;
2570 LayerLevelImageViewVector mLayerLevelStorageImageViews;
2571
2572 // Serial for the image view set. getSubresourceSerial combines it with subresource info.
2573 ImageOrBufferViewSerial mImageViewSerial;
2574 };
2575
2576 ImageSubresourceRange MakeImageSubresourceReadRange(gl::LevelIndex level,
2577 uint32_t levelCount,
2578 uint32_t layer,
2579 LayerMode layerMode,
2580 SrgbDecodeMode srgbDecodeMode,
2581 gl::SrgbOverride srgbOverrideMode);
2582 ImageSubresourceRange MakeImageSubresourceDrawRange(gl::LevelIndex level,
2583 uint32_t layer,
2584 LayerMode layerMode,
2585 gl::SrgbWriteControlMode srgbWriteControlMode);
2586
2587 class BufferViewHelper final : public Resource
2588 {
2589 public:
2590 BufferViewHelper();
2591 BufferViewHelper(BufferViewHelper &&other);
2592 ~BufferViewHelper() override;
2593
2594 void init(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size);
2595 void release(ContextVk *contextVk);
2596 void destroy(VkDevice device);
2597
2598 angle::Result getView(ContextVk *contextVk,
2599 const BufferHelper &buffer,
2600 VkDeviceSize bufferOffset,
2601 const Format &format,
2602 const BufferView **viewOut);
2603
2604 // Return unique Serial for a bufferView.
2605 ImageOrBufferViewSubresourceSerial getSerial() const;
2606
2607 private:
2608 // To support format reinterpretation, additional views for formats other than the one specified
2609 // to glTexBuffer may need to be created. On draw/dispatch, the format layout qualifier of the
2610 // imageBuffer is used (if provided) to create a potentially different view of the buffer.
2611 angle::HashMap<VkFormat, BufferView> mViews;
2612
2613 // View properties:
2614 //
2615 // Offset and size specified to glTexBufferRange
2616 VkDeviceSize mOffset;
2617 VkDeviceSize mSize;
2618
2619 // Serial for the buffer view. An ImageOrBufferViewSerial is used for texture buffers so that
2620 // they fit together with the other texture types.
2621 ImageOrBufferViewSerial mViewSerial;
2622 };
2623
2624 class FramebufferHelper : public Resource
2625 {
2626 public:
2627 FramebufferHelper();
2628 ~FramebufferHelper() override;
2629
2630 FramebufferHelper(FramebufferHelper &&other);
2631 FramebufferHelper &operator=(FramebufferHelper &&other);
2632
2633 angle::Result init(ContextVk *contextVk, const VkFramebufferCreateInfo &createInfo);
2634 void release(ContextVk *contextVk);
2635
valid()2636 bool valid() { return mFramebuffer.valid(); }
2637
getFramebuffer()2638 const Framebuffer &getFramebuffer() const
2639 {
2640 ASSERT(mFramebuffer.valid());
2641 return mFramebuffer;
2642 }
2643
getFramebuffer()2644 Framebuffer &getFramebuffer()
2645 {
2646 ASSERT(mFramebuffer.valid());
2647 return mFramebuffer;
2648 }
2649
2650 private:
2651 // Vulkan object.
2652 Framebuffer mFramebuffer;
2653 };
2654
2655 class ShaderProgramHelper : angle::NonCopyable
2656 {
2657 public:
2658 ShaderProgramHelper();
2659 ~ShaderProgramHelper();
2660
2661 bool valid(const gl::ShaderType shaderType) const;
2662 void destroy(RendererVk *rendererVk);
2663 void release(ContextVk *contextVk);
2664
getShader(gl::ShaderType shaderType)2665 ShaderAndSerial &getShader(gl::ShaderType shaderType) { return mShaders[shaderType].get(); }
2666
2667 void setShader(gl::ShaderType shaderType, RefCounted<ShaderAndSerial> *shader);
2668 void setSpecializationConstant(sh::vk::SpecializationConstantId id, uint32_t value);
2669
2670 // For getting a Pipeline and from the pipeline cache.
getGraphicsPipeline(ContextVk * contextVk,RenderPassCache * renderPassCache,const PipelineCache & pipelineCache,const PipelineLayout & pipelineLayout,const GraphicsPipelineDesc & pipelineDesc,const gl::AttributesMask & activeAttribLocationsMask,const gl::ComponentTypeMask & programAttribsTypeMask,const gl::DrawBufferMask & missingOutputsMask,const GraphicsPipelineDesc ** descPtrOut,PipelineHelper ** pipelineOut)2671 ANGLE_INLINE angle::Result getGraphicsPipeline(
2672 ContextVk *contextVk,
2673 RenderPassCache *renderPassCache,
2674 const PipelineCache &pipelineCache,
2675 const PipelineLayout &pipelineLayout,
2676 const GraphicsPipelineDesc &pipelineDesc,
2677 const gl::AttributesMask &activeAttribLocationsMask,
2678 const gl::ComponentTypeMask &programAttribsTypeMask,
2679 const gl::DrawBufferMask &missingOutputsMask,
2680 const GraphicsPipelineDesc **descPtrOut,
2681 PipelineHelper **pipelineOut)
2682 {
2683 // Pull in a compatible RenderPass.
2684 RenderPass *compatibleRenderPass = nullptr;
2685 ANGLE_TRY(renderPassCache->getCompatibleRenderPass(
2686 contextVk, pipelineDesc.getRenderPassDesc(), &compatibleRenderPass));
2687
2688 return mGraphicsPipelines.getPipeline(
2689 contextVk, pipelineCache, *compatibleRenderPass, pipelineLayout,
2690 activeAttribLocationsMask, programAttribsTypeMask, missingOutputsMask, mShaders,
2691 mSpecializationConstants, pipelineDesc, descPtrOut, pipelineOut);
2692 }
2693
2694 angle::Result getComputePipeline(Context *context,
2695 const PipelineLayout &pipelineLayout,
2696 PipelineHelper **pipelineOut);
2697
2698 private:
2699 ShaderAndSerialMap mShaders;
2700 GraphicsPipelineCache mGraphicsPipelines;
2701
2702 // We should probably use PipelineHelper here so we can remove PipelineAndSerial.
2703 PipelineHelper mComputePipeline;
2704
2705 // Specialization constants, currently only used by the graphics queue.
2706 SpecializationConstants mSpecializationConstants;
2707 };
2708
2709 // Tracks current handle allocation counts in the back-end. Useful for debugging and profiling.
2710 // Note: not all handle types are currently implemented.
2711 class ActiveHandleCounter final : angle::NonCopyable
2712 {
2713 public:
2714 ActiveHandleCounter();
2715 ~ActiveHandleCounter();
2716
onAllocate(HandleType handleType)2717 void onAllocate(HandleType handleType)
2718 {
2719 mActiveCounts[handleType]++;
2720 mAllocatedCounts[handleType]++;
2721 }
2722
onDeallocate(HandleType handleType)2723 void onDeallocate(HandleType handleType) { mActiveCounts[handleType]--; }
2724
getActive(HandleType handleType)2725 uint32_t getActive(HandleType handleType) const { return mActiveCounts[handleType]; }
getAllocated(HandleType handleType)2726 uint32_t getAllocated(HandleType handleType) const { return mAllocatedCounts[handleType]; }
2727
2728 private:
2729 angle::PackedEnumMap<HandleType, uint32_t> mActiveCounts;
2730 angle::PackedEnumMap<HandleType, uint32_t> mAllocatedCounts;
2731 };
2732
usesImageInRenderPass(const ImageHelper & image)2733 ANGLE_INLINE bool CommandBufferHelper::usesImageInRenderPass(const ImageHelper &image) const
2734 {
2735 ASSERT(mIsRenderPassCommandBuffer);
2736 return mRenderPassUsedImages.contains(image.getImageSerial().getValue());
2737 }
2738
2739 // Sometimes ANGLE issues a command internally, such as copies, draws and dispatches that do not
2740 // directly correspond to the application draw/dispatch call. Before the command is recorded in the
2741 // command buffer, the render pass may need to be broken and/or appropriate barriers may need to be
2742 // inserted. The following struct aggregates all resources that such internal commands need.
2743 struct CommandBufferBufferAccess
2744 {
2745 BufferHelper *buffer;
2746 VkAccessFlags accessType;
2747 PipelineStage stage;
2748 };
2749 struct CommandBufferImageAccess
2750 {
2751 ImageHelper *image;
2752 VkImageAspectFlags aspectFlags;
2753 ImageLayout imageLayout;
2754 };
2755 struct CommandBufferImageWrite
2756 {
2757 CommandBufferImageAccess access;
2758 gl::LevelIndex levelStart;
2759 uint32_t levelCount;
2760 uint32_t layerStart;
2761 uint32_t layerCount;
2762 };
2763 class CommandBufferAccess : angle::NonCopyable
2764 {
2765 public:
2766 CommandBufferAccess();
2767 ~CommandBufferAccess();
2768
onBufferTransferRead(BufferHelper * buffer)2769 void onBufferTransferRead(BufferHelper *buffer)
2770 {
2771 onBufferRead(VK_ACCESS_TRANSFER_READ_BIT, PipelineStage::Transfer, buffer);
2772 }
onBufferTransferWrite(BufferHelper * buffer)2773 void onBufferTransferWrite(BufferHelper *buffer)
2774 {
2775 onBufferWrite(VK_ACCESS_TRANSFER_WRITE_BIT, PipelineStage::Transfer, buffer);
2776 }
onBufferSelfCopy(BufferHelper * buffer)2777 void onBufferSelfCopy(BufferHelper *buffer)
2778 {
2779 onBufferWrite(VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
2780 PipelineStage::Transfer, buffer);
2781 }
onBufferComputeShaderRead(BufferHelper * buffer)2782 void onBufferComputeShaderRead(BufferHelper *buffer)
2783 {
2784 onBufferRead(VK_ACCESS_SHADER_READ_BIT, PipelineStage::ComputeShader, buffer);
2785 }
onBufferComputeShaderWrite(BufferHelper * buffer)2786 void onBufferComputeShaderWrite(BufferHelper *buffer)
2787 {
2788 onBufferWrite(VK_ACCESS_SHADER_WRITE_BIT, PipelineStage::ComputeShader, buffer);
2789 }
2790
onImageTransferRead(VkImageAspectFlags aspectFlags,ImageHelper * image)2791 void onImageTransferRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
2792 {
2793 onImageRead(aspectFlags, ImageLayout::TransferSrc, image);
2794 }
onImageTransferWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)2795 void onImageTransferWrite(gl::LevelIndex levelStart,
2796 uint32_t levelCount,
2797 uint32_t layerStart,
2798 uint32_t layerCount,
2799 VkImageAspectFlags aspectFlags,
2800 ImageHelper *image)
2801 {
2802 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
2803 ImageLayout::TransferDst, image);
2804 }
onImageComputeShaderRead(VkImageAspectFlags aspectFlags,ImageHelper * image)2805 void onImageComputeShaderRead(VkImageAspectFlags aspectFlags, ImageHelper *image)
2806 {
2807 onImageRead(aspectFlags, ImageLayout::ComputeShaderReadOnly, image);
2808 }
onImageComputeShaderWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageHelper * image)2809 void onImageComputeShaderWrite(gl::LevelIndex levelStart,
2810 uint32_t levelCount,
2811 uint32_t layerStart,
2812 uint32_t layerCount,
2813 VkImageAspectFlags aspectFlags,
2814 ImageHelper *image)
2815 {
2816 onImageWrite(levelStart, levelCount, layerStart, layerCount, aspectFlags,
2817 ImageLayout::ComputeShaderWrite, image);
2818 }
2819
2820 // The limits reflect the current maximum concurrent usage of each resource type. ASSERTs will
2821 // fire if this limit is exceeded in the future.
2822 using ReadBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
2823 using WriteBuffers = angle::FixedVector<CommandBufferBufferAccess, 2>;
2824 using ReadImages = angle::FixedVector<CommandBufferImageAccess, 2>;
2825 using WriteImages = angle::FixedVector<CommandBufferImageWrite, 1>;
2826
getReadBuffers()2827 const ReadBuffers &getReadBuffers() const { return mReadBuffers; }
getWriteBuffers()2828 const WriteBuffers &getWriteBuffers() const { return mWriteBuffers; }
getReadImages()2829 const ReadImages &getReadImages() const { return mReadImages; }
getWriteImages()2830 const WriteImages &getWriteImages() const { return mWriteImages; }
2831
2832 private:
2833 void onBufferRead(VkAccessFlags readAccessType, PipelineStage readStage, BufferHelper *buffer);
2834 void onBufferWrite(VkAccessFlags writeAccessType,
2835 PipelineStage writeStage,
2836 BufferHelper *buffer);
2837
2838 void onImageRead(VkImageAspectFlags aspectFlags, ImageLayout imageLayout, ImageHelper *image);
2839 void onImageWrite(gl::LevelIndex levelStart,
2840 uint32_t levelCount,
2841 uint32_t layerStart,
2842 uint32_t layerCount,
2843 VkImageAspectFlags aspectFlags,
2844 ImageLayout imageLayout,
2845 ImageHelper *image);
2846
2847 ReadBuffers mReadBuffers;
2848 WriteBuffers mWriteBuffers;
2849 ReadImages mReadImages;
2850 WriteImages mWriteImages;
2851 };
2852 } // namespace vk
2853 } // namespace rx
2854
2855 #endif // LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_
2856