1 // 2 // Copyright 2018 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // vk_helpers: 7 // Helper utilitiy classes that manage Vulkan resources. 8 9 #ifndef LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_ 10 #define LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_ 11 12 #include "common/MemoryBuffer.h" 13 #include "libANGLE/renderer/vulkan/ResourceVk.h" 14 #include "libANGLE/renderer/vulkan/vk_cache_utils.h" 15 16 namespace gl 17 { 18 class ImageIndex; 19 } // namespace gl 20 21 namespace rx 22 { 23 namespace vk 24 { 25 constexpr VkBufferUsageFlags kVertexBufferUsageFlags = 26 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; 27 constexpr VkBufferUsageFlags kIndexBufferUsageFlags = 28 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; 29 constexpr VkBufferUsageFlags kIndirectBufferUsageFlags = 30 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; 31 constexpr size_t kVertexBufferAlignment = 4; 32 constexpr size_t kIndexBufferAlignment = 4; 33 constexpr size_t kIndirectBufferAlignment = 4; 34 35 constexpr VkBufferUsageFlags kStagingBufferFlags = 36 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; 37 constexpr size_t kStagingBufferSize = 1024 * 16; 38 39 constexpr VkImageCreateFlags kVkImageCreateFlagsNone = 0; 40 41 using StagingBufferOffsetArray = std::array<VkDeviceSize, 2>; 42 43 struct TextureUnit final 44 { 45 TextureVk *texture; 46 SamplerVk *sampler; 47 }; 48 49 // A dynamic buffer is conceptually an infinitely long buffer. Each time you write to the buffer, 50 // you will always write to a previously unused portion. After a series of writes, you must flush 51 // the buffer data to the device. Buffer lifetime currently assumes that each new allocation will 52 // last as long or longer than each prior allocation. 53 // 54 // Dynamic buffers are used to implement a variety of data streaming operations in Vulkan, such 55 // as for immediate vertex array and element array data, uniform updates, and other dynamic data. 56 // 57 // Internally dynamic buffers keep a collection of VkBuffers. When we write past the end of a 58 // currently active VkBuffer we keep it until it is no longer in use. We then mark it available 59 // for future allocations in a free list. 60 class BufferHelper; 61 class DynamicBuffer : angle::NonCopyable 62 { 63 public: 64 DynamicBuffer(); 65 DynamicBuffer(DynamicBuffer &&other); 66 ~DynamicBuffer(); 67 68 // Init is called after the buffer creation so that the alignment can be specified later. 69 void init(RendererVk *renderer, 70 VkBufferUsageFlags usage, 71 size_t alignment, 72 size_t initialSize, 73 bool hostVisible); 74 75 // Init that gives the ability to pass in specified memory property flags for the buffer. 76 void initWithFlags(RendererVk *renderer, 77 VkBufferUsageFlags usage, 78 size_t alignment, 79 size_t initialSize, 80 VkMemoryPropertyFlags memoryProperty); 81 82 // This call will allocate a new region at the end of the buffer. It internally may trigger 83 // a new buffer to be created (which is returned in the optional parameter 84 // `newBufferAllocatedOut`). The new region will be in the returned buffer at given offset. If 85 // a memory pointer is given, the buffer will be automatically map()ed. 86 angle::Result allocate(ContextVk *contextVk, 87 size_t sizeInBytes, 88 uint8_t **ptrOut, 89 VkBuffer *bufferOut, 90 VkDeviceSize *offsetOut, 91 bool *newBufferAllocatedOut); 92 93 // After a sequence of writes, call flush to ensure the data is visible to the device. 94 angle::Result flush(ContextVk *contextVk); 95 96 // After a sequence of writes, call invalidate to ensure the data is visible to the host. 97 angle::Result invalidate(ContextVk *contextVk); 98 99 // This releases resources when they might currently be in use. 100 void release(RendererVk *renderer); 101 102 // This releases all the buffers that have been allocated since this was last called. 103 void releaseInFlightBuffers(ContextVk *contextVk); 104 105 // This frees resources immediately. 106 void destroy(RendererVk *renderer); 107 getCurrentBuffer()108 BufferHelper *getCurrentBuffer() { return mBuffer; } 109 110 void updateAlignment(RendererVk *renderer, size_t alignment); 111 112 // For testing only! 113 void setMinimumSizeForTesting(size_t minSize); 114 isCoherent()115 bool isCoherent() const 116 { 117 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; 118 } 119 120 private: 121 void reset(); 122 angle::Result allocateNewBuffer(ContextVk *contextVk); 123 void releaseBufferListToRenderer(RendererVk *renderer, std::vector<BufferHelper *> *buffers); 124 void destroyBufferList(RendererVk *renderer, std::vector<BufferHelper *> *buffers); 125 126 VkBufferUsageFlags mUsage; 127 bool mHostVisible; 128 size_t mInitialSize; 129 BufferHelper *mBuffer; 130 uint32_t mNextAllocationOffset; 131 uint32_t mLastFlushOrInvalidateOffset; 132 size_t mSize; 133 size_t mAlignment; 134 VkMemoryPropertyFlags mMemoryPropertyFlags; 135 136 std::vector<BufferHelper *> mInFlightBuffers; 137 std::vector<BufferHelper *> mBufferFreeList; 138 }; 139 140 // Based off of the DynamicBuffer class, DynamicShadowBuffer provides 141 // a similar conceptually infinitely long buffer that will only be written 142 // to and read by the CPU. This can be used to provide CPU cached copies of 143 // GPU-read only buffers. The value add here is that when an app requests 144 // CPU access to a buffer we can fullfil such a request in O(1) time since 145 // we don't need to wait for GPU to be done with in-flight commands. 146 // 147 // The hidden cost here is that any operation that updates a buffer, either 148 // through a buffer sub data update or a buffer-to-buffer copy will have an 149 // additional overhead of having to update its CPU only buffer 150 class DynamicShadowBuffer : public angle::NonCopyable 151 { 152 public: 153 DynamicShadowBuffer(); 154 DynamicShadowBuffer(DynamicShadowBuffer &&other); 155 ~DynamicShadowBuffer(); 156 157 // Initialize the DynamicShadowBuffer. 158 void init(size_t initialSize); 159 160 // Returns whether this DynamicShadowBuffer is active valid()161 ANGLE_INLINE bool valid() { return (mSize != 0); } 162 163 // This call will actually allocate a new CPU only memory from the heap. 164 // The size can be different than the one specified during `init`. 165 angle::Result allocate(size_t sizeInBytes); 166 updateData(const uint8_t * data,size_t size,size_t offset)167 ANGLE_INLINE void updateData(const uint8_t *data, size_t size, size_t offset) 168 { 169 ASSERT(!mBuffer.empty()); 170 // Memcopy data into the buffer 171 memcpy((mBuffer.data() + offset), data, size); 172 } 173 174 // Map the CPU only buffer and return the pointer. We map the entire buffer for now. map(size_t offset,void ** mapPtr)175 ANGLE_INLINE void map(size_t offset, void **mapPtr) 176 { 177 ASSERT(mapPtr); 178 ASSERT(!mBuffer.empty()); 179 *mapPtr = mBuffer.data() + offset; 180 } 181 182 // Unmap the CPU only buffer, NOOP for now unmap()183 ANGLE_INLINE void unmap() {} 184 185 // This releases resources when they might currently be in use. 186 void release(); 187 188 // This frees resources immediately. 189 void destroy(VkDevice device); 190 getCurrentBuffer()191 ANGLE_INLINE uint8_t *getCurrentBuffer() 192 { 193 ASSERT(!mBuffer.empty()); 194 return mBuffer.data(); 195 } 196 getCurrentBuffer()197 ANGLE_INLINE const uint8_t *getCurrentBuffer() const 198 { 199 ASSERT(!mBuffer.empty()); 200 return mBuffer.data(); 201 } 202 203 private: 204 void reset(); 205 206 size_t mInitialSize; 207 size_t mSize; 208 angle::MemoryBuffer mBuffer; 209 }; 210 211 // Uses DescriptorPool to allocate descriptor sets as needed. If a descriptor pool becomes full, we 212 // allocate new pools internally as needed. RendererVk takes care of the lifetime of the discarded 213 // pools. Note that we used a fixed layout for descriptor pools in ANGLE. Uniform buffers must 214 // use set zero and combined Image Samplers must use set 1. We conservatively count each new set 215 // using the maximum number of descriptor sets and buffers with each allocation. Currently: 2 216 // (Vertex/Fragment) uniform buffers and 64 (MAX_ACTIVE_TEXTURES) image/samplers. 217 218 // Shared handle to a descriptor pool. Each helper is allocated from the dynamic descriptor pool. 219 // Can be used to share descriptor pools between multiple ProgramVks and the ContextVk. 220 class DescriptorPoolHelper 221 { 222 public: 223 DescriptorPoolHelper(); 224 ~DescriptorPoolHelper(); 225 valid()226 bool valid() { return mDescriptorPool.valid(); } 227 228 bool hasCapacity(uint32_t descriptorSetCount) const; 229 angle::Result init(Context *context, 230 const std::vector<VkDescriptorPoolSize> &poolSizes, 231 uint32_t maxSets); 232 void destroy(VkDevice device); 233 void release(ContextVk *contextVk); 234 235 angle::Result allocateSets(ContextVk *contextVk, 236 const VkDescriptorSetLayout *descriptorSetLayout, 237 uint32_t descriptorSetCount, 238 VkDescriptorSet *descriptorSetsOut); 239 updateSerial(Serial serial)240 void updateSerial(Serial serial) { mMostRecentSerial = serial; } 241 getSerial()242 Serial getSerial() const { return mMostRecentSerial; } 243 244 private: 245 uint32_t mFreeDescriptorSets; 246 DescriptorPool mDescriptorPool; 247 Serial mMostRecentSerial; 248 }; 249 250 using RefCountedDescriptorPoolHelper = RefCounted<DescriptorPoolHelper>; 251 using RefCountedDescriptorPoolBinding = BindingPointer<DescriptorPoolHelper>; 252 253 class DynamicDescriptorPool final : angle::NonCopyable 254 { 255 public: 256 DynamicDescriptorPool(); 257 ~DynamicDescriptorPool(); 258 259 // The DynamicDescriptorPool only handles one pool size at this time. 260 // Note that setSizes[i].descriptorCount is expected to be the number of descriptors in 261 // an individual set. The pool size will be calculated accordingly. 262 angle::Result init(ContextVk *contextVk, 263 const VkDescriptorPoolSize *setSizes, 264 uint32_t setSizeCount); 265 void destroy(VkDevice device); 266 void release(ContextVk *contextVk); 267 268 // We use the descriptor type to help count the number of free sets. 269 // By convention, sets are indexed according to the constants in vk_cache_utils.h. allocateSets(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,RefCountedDescriptorPoolBinding * bindingOut,VkDescriptorSet * descriptorSetsOut)270 ANGLE_INLINE angle::Result allocateSets(ContextVk *contextVk, 271 const VkDescriptorSetLayout *descriptorSetLayout, 272 uint32_t descriptorSetCount, 273 RefCountedDescriptorPoolBinding *bindingOut, 274 VkDescriptorSet *descriptorSetsOut) 275 { 276 bool ignoreNewPoolAllocated; 277 return allocateSetsAndGetInfo(contextVk, descriptorSetLayout, descriptorSetCount, 278 bindingOut, descriptorSetsOut, &ignoreNewPoolAllocated); 279 } 280 281 // We use the descriptor type to help count the number of free sets. 282 // By convention, sets are indexed according to the constants in vk_cache_utils.h. 283 angle::Result allocateSetsAndGetInfo(ContextVk *contextVk, 284 const VkDescriptorSetLayout *descriptorSetLayout, 285 uint32_t descriptorSetCount, 286 RefCountedDescriptorPoolBinding *bindingOut, 287 VkDescriptorSet *descriptorSetsOut, 288 bool *newPoolAllocatedOut); 289 290 // For testing only! 291 void setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool); 292 293 private: 294 angle::Result allocateNewPool(ContextVk *contextVk); 295 296 uint32_t mMaxSetsPerPool; 297 size_t mCurrentPoolIndex; 298 std::vector<RefCountedDescriptorPoolHelper *> mDescriptorPools; 299 std::vector<VkDescriptorPoolSize> mPoolSizes; 300 }; 301 302 template <typename Pool> 303 class DynamicallyGrowingPool : angle::NonCopyable 304 { 305 public: 306 DynamicallyGrowingPool(); 307 virtual ~DynamicallyGrowingPool(); 308 isValid()309 bool isValid() { return mPoolSize > 0; } 310 311 protected: 312 angle::Result initEntryPool(Context *contextVk, uint32_t poolSize); 313 void destroyEntryPool(); 314 315 // Checks to see if any pool is already free, in which case it sets it as current pool and 316 // returns true. 317 bool findFreeEntryPool(ContextVk *contextVk); 318 319 // Allocates a new entry and initializes it with the given pool. 320 angle::Result allocateNewEntryPool(ContextVk *contextVk, Pool &&pool); 321 322 // Called by the implementation whenever an entry is freed. 323 void onEntryFreed(ContextVk *contextVk, size_t poolIndex); 324 325 // The pool size, to know when a pool is completely freed. 326 uint32_t mPoolSize; 327 328 std::vector<Pool> mPools; 329 330 struct PoolStats 331 { 332 // A count corresponding to each pool indicating how many of its allocated entries 333 // have been freed. Once that value reaches mPoolSize for each pool, that pool is considered 334 // free and reusable. While keeping a bitset would allow allocation of each index, the 335 // slight runtime overhead of finding free indices is not worth the slight memory overhead 336 // of creating new pools when unnecessary. 337 uint32_t freedCount; 338 // The serial of the renderer is stored on each object free to make sure no 339 // new allocations are made from the pool until it's not in use. 340 Serial serial; 341 }; 342 std::vector<PoolStats> mPoolStats; 343 344 // Index into mPools indicating pool we are currently allocating from. 345 size_t mCurrentPool; 346 // Index inside mPools[mCurrentPool] indicating which index can be allocated next. 347 uint32_t mCurrentFreeEntry; 348 }; 349 350 // DynamicQueryPool allocates indices out of QueryPool as needed. Once a QueryPool is exhausted, 351 // another is created. The query pools live permanently, but are recycled as indices get freed. 352 353 // These are arbitrary default sizes for query pools. 354 constexpr uint32_t kDefaultOcclusionQueryPoolSize = 64; 355 constexpr uint32_t kDefaultTimestampQueryPoolSize = 64; 356 357 class QueryHelper; 358 359 class DynamicQueryPool final : public DynamicallyGrowingPool<QueryPool> 360 { 361 public: 362 DynamicQueryPool(); 363 ~DynamicQueryPool() override; 364 365 angle::Result init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize); 366 void destroy(VkDevice device); 367 368 angle::Result allocateQuery(ContextVk *contextVk, QueryHelper *queryOut); 369 void freeQuery(ContextVk *contextVk, QueryHelper *query); 370 getQueryPool(size_t index)371 const QueryPool &getQueryPool(size_t index) const { return mPools[index]; } 372 373 private: 374 angle::Result allocateNewPool(ContextVk *contextVk); 375 376 // Information required to create new query pools 377 VkQueryType mQueryType; 378 }; 379 380 // Queries in vulkan are identified by the query pool and an index for a query within that pool. 381 // Unlike other pools, such as descriptor pools where an allocation returns an independent object 382 // from the pool, the query allocations are not done through a Vulkan function and are only an 383 // integer index. 384 // 385 // Furthermore, to support arbitrarily large number of queries, DynamicQueryPool creates query pools 386 // of a fixed size as needed and allocates indices within those pools. 387 // 388 // The QueryHelper class below keeps the pool and index pair together. 389 class QueryHelper final 390 { 391 public: 392 QueryHelper(); 393 ~QueryHelper(); 394 395 void init(const DynamicQueryPool *dynamicQueryPool, 396 const size_t queryPoolIndex, 397 uint32_t query); 398 void deinit(); 399 valid()400 bool valid() const { return mDynamicQueryPool != nullptr; } 401 402 angle::Result beginQuery(ContextVk *contextVk); 403 angle::Result endQuery(ContextVk *contextVk); 404 405 // for occlusion query 406 // Must resetQueryPool outside of RenderPass before beginning occlusion query. 407 void resetQueryPool(ContextVk *contextVk, CommandBuffer *outsideRenderPassCommandBuffer); 408 void beginOcclusionQuery(ContextVk *contextVk, CommandBuffer *renderPassCommandBuffer); 409 void endOcclusionQuery(ContextVk *contextVk, CommandBuffer *renderPassCommandBuffer); 410 411 angle::Result flushAndWriteTimestamp(ContextVk *contextVk); 412 // When syncing gpu/cpu time, main thread accesses primary directly 413 void writeTimestamp(ContextVk *contextVk, PrimaryCommandBuffer *primary); 414 // All other timestamp accesses should be made on outsideRenderPassCommandBuffer 415 void writeTimestamp(ContextVk *contextVk, CommandBuffer *outsideRenderPassCommandBuffer); 416 getStoredQueueSerial()417 Serial getStoredQueueSerial() { return mMostRecentSerial; } 418 bool hasPendingWork(ContextVk *contextVk); 419 420 angle::Result getUint64ResultNonBlocking(ContextVk *contextVk, 421 uint64_t *resultOut, 422 bool *availableOut); 423 angle::Result getUint64Result(ContextVk *contextVk, uint64_t *resultOut); 424 425 private: 426 friend class DynamicQueryPool; getQueryPool()427 const QueryPool &getQueryPool() const 428 { 429 ASSERT(valid()); 430 return mDynamicQueryPool->getQueryPool(mQueryPoolIndex); 431 } 432 433 const DynamicQueryPool *mDynamicQueryPool; 434 size_t mQueryPoolIndex; 435 uint32_t mQuery; 436 Serial mMostRecentSerial; 437 }; 438 439 // DynamicSemaphorePool allocates semaphores as needed. It uses a std::vector 440 // as a pool to allocate many semaphores at once. The pools live permanently, 441 // but are recycled as semaphores get freed. 442 443 // These are arbitrary default sizes for semaphore pools. 444 constexpr uint32_t kDefaultSemaphorePoolSize = 64; 445 446 class SemaphoreHelper; 447 448 class DynamicSemaphorePool final : public DynamicallyGrowingPool<std::vector<Semaphore>> 449 { 450 public: 451 DynamicSemaphorePool(); 452 ~DynamicSemaphorePool() override; 453 454 angle::Result init(ContextVk *contextVk, uint32_t poolSize); 455 void destroy(VkDevice device); 456 isValid()457 bool isValid() { return mPoolSize > 0; } 458 459 // autoFree can be used to allocate a semaphore that's expected to be freed at the end of the 460 // frame. This renders freeSemaphore unnecessary and saves an eventual search. 461 angle::Result allocateSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphoreOut); 462 void freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore); 463 464 private: 465 angle::Result allocateNewPool(ContextVk *contextVk); 466 }; 467 468 // Semaphores that are allocated from the semaphore pool are encapsulated in a helper object, 469 // keeping track of where in the pool they are allocated from. 470 class SemaphoreHelper final : angle::NonCopyable 471 { 472 public: 473 SemaphoreHelper(); 474 ~SemaphoreHelper(); 475 476 SemaphoreHelper(SemaphoreHelper &&other); 477 SemaphoreHelper &operator=(SemaphoreHelper &&other); 478 479 void init(const size_t semaphorePoolIndex, const Semaphore *semaphore); 480 void deinit(); 481 getSemaphore()482 const Semaphore *getSemaphore() const { return mSemaphore; } 483 484 // Used only by DynamicSemaphorePool. getSemaphorePoolIndex()485 size_t getSemaphorePoolIndex() const { return mSemaphorePoolIndex; } 486 487 private: 488 size_t mSemaphorePoolIndex; 489 const Semaphore *mSemaphore; 490 }; 491 492 // This class' responsibility is to create index buffers needed to support line loops in Vulkan. 493 // In the setup phase of drawing, the createIndexBuffer method should be called with the 494 // current draw call parameters. If an element array buffer is bound for an indexed draw, use 495 // createIndexBufferFromElementArrayBuffer. 496 // 497 // If the user wants to draw a loop between [v1, v2, v3], we will create an indexed buffer with 498 // these indexes: [0, 1, 2, 3, 0] to emulate the loop. 499 class LineLoopHelper final : angle::NonCopyable 500 { 501 public: 502 LineLoopHelper(RendererVk *renderer); 503 ~LineLoopHelper(); 504 505 angle::Result getIndexBufferForDrawArrays(ContextVk *contextVk, 506 uint32_t clampedVertexCount, 507 GLint firstVertex, 508 BufferHelper **bufferOut, 509 VkDeviceSize *offsetOut); 510 511 angle::Result getIndexBufferForElementArrayBuffer(ContextVk *contextVk, 512 BufferVk *elementArrayBufferVk, 513 gl::DrawElementsType glIndexType, 514 int indexCount, 515 intptr_t elementArrayOffset, 516 BufferHelper **bufferOut, 517 VkDeviceSize *bufferOffsetOut, 518 uint32_t *indexCountOut); 519 520 angle::Result streamIndices(ContextVk *contextVk, 521 gl::DrawElementsType glIndexType, 522 GLsizei indexCount, 523 const uint8_t *srcPtr, 524 BufferHelper **bufferOut, 525 VkDeviceSize *bufferOffsetOut, 526 uint32_t *indexCountOut); 527 528 angle::Result streamIndicesIndirect(ContextVk *contextVk, 529 gl::DrawElementsType glIndexType, 530 BufferHelper *indexBuffer, 531 BufferHelper *indirectBuffer, 532 VkDeviceSize indirectBufferOffset, 533 BufferHelper **indexBufferOut, 534 VkDeviceSize *indexBufferOffsetOut, 535 BufferHelper **indirectBufferOut, 536 VkDeviceSize *indirectBufferOffsetOut); 537 538 angle::Result streamArrayIndirect(ContextVk *contextVk, 539 size_t vertexCount, 540 BufferHelper *arrayIndirectBuffer, 541 VkDeviceSize arrayIndirectBufferOffset, 542 BufferHelper **indexBufferOut, 543 VkDeviceSize *indexBufferOffsetOut, 544 BufferHelper **indexIndirectBufferOut, 545 VkDeviceSize *indexIndirectBufferOffsetOut); 546 547 void release(ContextVk *contextVk); 548 void destroy(RendererVk *renderer); 549 550 static void Draw(uint32_t count, uint32_t baseVertex, CommandBuffer *commandBuffer); 551 552 private: 553 DynamicBuffer mDynamicIndexBuffer; 554 DynamicBuffer mDynamicIndirectBuffer; 555 }; 556 557 // This defines enum for VkPipelineStageFlagBits so that we can use it to compare and index into 558 // array. 559 enum class PipelineStage : uint16_t 560 { 561 // Bellow are ordered based on Graphics Pipeline Stages 562 TopOfPipe = 0, 563 DrawIndirect = 1, 564 VertexInput = 2, 565 VertexShader = 3, 566 GeometryShader = 4, 567 TransformFeedback = 5, 568 EarlyFragmentTest = 6, 569 FragmentShader = 7, 570 LateFragmentTest = 8, 571 ColorAttachmentOutput = 9, 572 573 // Compute specific pipeline Stage 574 ComputeShader = 10, 575 576 // Transfer specific pipeline Stage 577 Transfer = 11, 578 BottomOfPipe = 12, 579 580 // Host specific pipeline stage 581 Host = 13, 582 583 InvalidEnum = 14, 584 EnumCount = InvalidEnum, 585 }; 586 using PipelineStagesMask = angle::PackedEnumBitSet<PipelineStage, uint16_t>; 587 588 // This wraps data and API for vkCmdPipelineBarrier call 589 class PipelineBarrier : angle::NonCopyable 590 { 591 public: PipelineBarrier()592 PipelineBarrier() 593 : mSrcStageMask(0), 594 mDstStageMask(0), 595 mMemoryBarrierSrcAccess(0), 596 mMemoryBarrierDstAccess(0), 597 mImageMemoryBarriers() 598 {} 599 ~PipelineBarrier() = default; 600 isEmpty()601 bool isEmpty() const { return mImageMemoryBarriers.empty() && mMemoryBarrierSrcAccess == 0; } 602 execute(PrimaryCommandBuffer * primary)603 void execute(PrimaryCommandBuffer *primary) 604 { 605 if (isEmpty()) 606 { 607 return; 608 } 609 610 // Issue vkCmdPipelineBarrier call 611 VkMemoryBarrier memoryBarrier = {}; 612 uint32_t memoryBarrierCount = 0; 613 if (mMemoryBarrierSrcAccess != 0) 614 { 615 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; 616 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess; 617 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess; 618 memoryBarrierCount++; 619 } 620 primary->pipelineBarrier( 621 mSrcStageMask, mDstStageMask, 0, memoryBarrierCount, &memoryBarrier, 0, nullptr, 622 static_cast<uint32_t>(mImageMemoryBarriers.size()), mImageMemoryBarriers.data()); 623 624 reset(); 625 } 626 627 // merge two barriers into one merge(PipelineBarrier * other)628 void merge(PipelineBarrier *other) 629 { 630 mSrcStageMask |= other->mSrcStageMask; 631 mDstStageMask |= other->mDstStageMask; 632 mMemoryBarrierSrcAccess |= other->mMemoryBarrierSrcAccess; 633 mMemoryBarrierDstAccess |= other->mMemoryBarrierDstAccess; 634 mImageMemoryBarriers.insert(mImageMemoryBarriers.end(), other->mImageMemoryBarriers.begin(), 635 other->mImageMemoryBarriers.end()); 636 other->reset(); 637 } 638 mergeMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkFlags srcAccess,VkFlags dstAccess)639 void mergeMemoryBarrier(VkPipelineStageFlags srcStageMask, 640 VkPipelineStageFlags dstStageMask, 641 VkFlags srcAccess, 642 VkFlags dstAccess) 643 { 644 mSrcStageMask |= srcStageMask; 645 mDstStageMask |= dstStageMask; 646 mMemoryBarrierSrcAccess |= srcAccess; 647 mMemoryBarrierDstAccess |= dstAccess; 648 } 649 mergeImageBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)650 void mergeImageBarrier(VkPipelineStageFlags srcStageMask, 651 VkPipelineStageFlags dstStageMask, 652 const VkImageMemoryBarrier &imageMemoryBarrier) 653 { 654 ASSERT(imageMemoryBarrier.pNext == nullptr); 655 mSrcStageMask |= srcStageMask; 656 mDstStageMask |= dstStageMask; 657 mImageMemoryBarriers.push_back(imageMemoryBarrier); 658 } 659 reset()660 void reset() 661 { 662 mSrcStageMask = 0; 663 mDstStageMask = 0; 664 mMemoryBarrierSrcAccess = 0; 665 mMemoryBarrierDstAccess = 0; 666 mImageMemoryBarriers.clear(); 667 } 668 669 void addDiagnosticsString(std::ostringstream &out) const; 670 671 private: 672 VkPipelineStageFlags mSrcStageMask; 673 VkPipelineStageFlags mDstStageMask; 674 VkFlags mMemoryBarrierSrcAccess; 675 VkFlags mMemoryBarrierDstAccess; 676 std::vector<VkImageMemoryBarrier> mImageMemoryBarriers; 677 }; 678 using PipelineBarrierArray = angle::PackedEnumMap<PipelineStage, PipelineBarrier>; 679 680 class FramebufferHelper; 681 682 class BufferHelper final : public Resource 683 { 684 public: 685 BufferHelper(); 686 ~BufferHelper() override; 687 688 angle::Result init(Context *context, 689 const VkBufferCreateInfo &createInfo, 690 VkMemoryPropertyFlags memoryPropertyFlags); 691 void destroy(RendererVk *renderer); 692 693 void release(RendererVk *renderer); 694 valid()695 bool valid() const { return mBuffer.valid(); } getBuffer()696 const Buffer &getBuffer() const { return mBuffer; } getSize()697 VkDeviceSize getSize() const { return mSize; } isHostVisible()698 bool isHostVisible() const 699 { 700 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; 701 } isCoherent()702 bool isCoherent() const 703 { 704 return (mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; 705 } 706 707 // Set write access mask when the buffer is modified externally, e.g. by host. There is no 708 // graph resource to create a dependency to. onExternalWrite(VkAccessFlags writeAccessType)709 void onExternalWrite(VkAccessFlags writeAccessType) 710 { 711 ASSERT(writeAccessType == VK_ACCESS_HOST_WRITE_BIT); 712 mCurrentWriteAccess |= writeAccessType; 713 mCurrentWriteStages |= VK_PIPELINE_STAGE_HOST_BIT; 714 } 715 716 // Also implicitly sets up the correct barriers. 717 angle::Result copyFromBuffer(ContextVk *contextVk, 718 BufferHelper *srcBuffer, 719 uint32_t regionCount, 720 const VkBufferCopy *copyRegions); 721 722 // Note: currently only one view is allowed. If needs be, multiple views can be created 723 // based on format. 724 angle::Result initBufferView(ContextVk *contextVk, const Format &format); 725 getBufferView()726 const BufferView &getBufferView() const 727 { 728 ASSERT(mBufferView.valid()); 729 return mBufferView; 730 } 731 getViewFormat()732 const Format &getViewFormat() const 733 { 734 ASSERT(mViewFormat); 735 return *mViewFormat; 736 } 737 map(ContextVk * contextVk,uint8_t ** ptrOut)738 angle::Result map(ContextVk *contextVk, uint8_t **ptrOut) 739 { 740 if (!mMappedMemory) 741 { 742 ANGLE_TRY(mapImpl(contextVk)); 743 } 744 *ptrOut = mMappedMemory; 745 return angle::Result::Continue; 746 } 747 mapWithOffset(ContextVk * contextVk,uint8_t ** ptrOut,size_t offset)748 angle::Result mapWithOffset(ContextVk *contextVk, uint8_t **ptrOut, size_t offset) 749 { 750 uint8_t *mapBufPointer; 751 ANGLE_TRY(map(contextVk, &mapBufPointer)); 752 *ptrOut = mapBufPointer + offset; 753 return angle::Result::Continue; 754 } 755 756 void unmap(RendererVk *renderer); 757 758 // After a sequence of writes, call flush to ensure the data is visible to the device. 759 angle::Result flush(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size); 760 761 // After a sequence of writes, call invalidate to ensure the data is visible to the host. 762 angle::Result invalidate(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size); 763 764 void changeQueue(uint32_t newQueueFamilyIndex, CommandBuffer *commandBuffer); 765 766 // Performs an ownership transfer from an external instance or API. 767 void acquireFromExternal(ContextVk *contextVk, 768 uint32_t externalQueueFamilyIndex, 769 uint32_t rendererQueueFamilyIndex, 770 CommandBuffer *commandBuffer); 771 772 // Performs an ownership transfer to an external instance or API. 773 void releaseToExternal(ContextVk *contextVk, 774 uint32_t rendererQueueFamilyIndex, 775 uint32_t externalQueueFamilyIndex, 776 CommandBuffer *commandBuffer); 777 778 // Returns true if the image is owned by an external API or instance. 779 bool isReleasedToExternal() const; 780 781 // Currently always returns false. Should be smarter about accumulation. 782 bool canAccumulateRead(ContextVk *contextVk, VkAccessFlags readAccessType); 783 bool canAccumulateWrite(ContextVk *contextVk, VkAccessFlags writeAccessType); 784 785 bool updateReadBarrier(VkAccessFlags readAccessType, 786 VkPipelineStageFlags readStage, 787 PipelineBarrier *barrier); 788 789 bool updateWriteBarrier(VkAccessFlags writeAccessType, 790 VkPipelineStageFlags writeStage, 791 PipelineBarrier *barrier); 792 793 private: 794 angle::Result mapImpl(ContextVk *contextVk); 795 angle::Result initializeNonZeroMemory(Context *context, VkDeviceSize size); 796 797 // Vulkan objects. 798 Buffer mBuffer; 799 BufferView mBufferView; 800 Allocation mAllocation; 801 802 // Cached properties. 803 VkMemoryPropertyFlags mMemoryPropertyFlags; 804 VkDeviceSize mSize; 805 uint8_t *mMappedMemory; 806 const Format *mViewFormat; 807 uint32_t mCurrentQueueFamilyIndex; 808 809 // For memory barriers. 810 VkFlags mCurrentWriteAccess; 811 VkFlags mCurrentReadAccess; 812 VkPipelineStageFlags mCurrentWriteStages; 813 VkPipelineStageFlags mCurrentReadStages; 814 }; 815 816 // CommandBufferHelper (CBH) class wraps ANGLE's custom command buffer 817 // class, SecondaryCommandBuffer. This provides a way to temporarily 818 // store Vulkan commands that be can submitted in-line to a primary 819 // command buffer at a later time. 820 // The current plan is for the main ANGLE thread to record commands 821 // into the CBH and then pass the CBH off to a worker thread that will 822 // process the commands into a primary command buffer and then submit 823 // those commands to the queue. 824 struct CommandBufferHelper : angle::NonCopyable 825 { 826 public: 827 CommandBufferHelper(); 828 ~CommandBufferHelper(); 829 830 // General Functions (non-renderPass specific) 831 void initialize(bool isRenderPassCommandBuffer, bool mergeBarriers); 832 833 void bufferRead(vk::ResourceUseList *resourceUseList, 834 VkAccessFlags readAccessType, 835 vk::PipelineStage readStage, 836 vk::BufferHelper *buffer); 837 void bufferWrite(vk::ResourceUseList *resourceUseList, 838 VkAccessFlags writeAccessType, 839 vk::PipelineStage writeStage, 840 vk::BufferHelper *buffer); 841 842 void imageRead(vk::ResourceUseList *resourceUseList, 843 VkImageAspectFlags aspectFlags, 844 vk::ImageLayout imageLayout, 845 vk::ImageHelper *image); 846 847 void imageWrite(vk::ResourceUseList *resourceUseList, 848 VkImageAspectFlags aspectFlags, 849 vk::ImageLayout imageLayout, 850 vk::ImageHelper *image); 851 getCommandBufferCommandBufferHelper852 vk::CommandBuffer &getCommandBuffer() { return mCommandBuffer; } 853 854 angle::Result flushToPrimary(ContextVk *contextVk, vk::PrimaryCommandBuffer *primary); 855 856 void executeBarriers(vk::PrimaryCommandBuffer *primary); 857 emptyCommandBufferHelper858 bool empty() const { return (!mCommandBuffer.empty() || mRenderPassStarted) ? false : true; } setHasRenderPassCommandBufferHelper859 void setHasRenderPass(bool hasRenderPass) { mIsRenderPassCommandBuffer = hasRenderPass; } 860 void reset(); 861 void releaseToContextQueue(ContextVk *contextVk); 862 863 // RenderPass related functions startedCommandBufferHelper864 bool started() const 865 { 866 ASSERT(mIsRenderPassCommandBuffer); 867 return mRenderPassStarted; 868 } 869 870 void beginRenderPass(const vk::Framebuffer &framebuffer, 871 const gl::Rectangle &renderArea, 872 const vk::RenderPassDesc &renderPassDesc, 873 const vk::AttachmentOpsArray &renderPassAttachmentOps, 874 const vk::ClearValuesArray &clearValues, 875 vk::CommandBuffer **commandBufferOut); 876 877 void beginTransformFeedback(size_t validBufferCount, 878 const VkBuffer *counterBuffers, 879 bool rebindBuffers); 880 invalidateRenderPassColorAttachmentCommandBufferHelper881 void invalidateRenderPassColorAttachment(size_t attachmentIndex) 882 { 883 ASSERT(mIsRenderPassCommandBuffer); 884 SetBitField(mAttachmentOps[attachmentIndex].storeOp, VK_ATTACHMENT_STORE_OP_DONT_CARE); 885 } 886 invalidateRenderPassDepthAttachmentCommandBufferHelper887 void invalidateRenderPassDepthAttachment(size_t attachmentIndex) 888 { 889 ASSERT(mIsRenderPassCommandBuffer); 890 SetBitField(mAttachmentOps[attachmentIndex].storeOp, VK_ATTACHMENT_STORE_OP_DONT_CARE); 891 } 892 invalidateRenderPassStencilAttachmentCommandBufferHelper893 void invalidateRenderPassStencilAttachment(size_t attachmentIndex) 894 { 895 ASSERT(mIsRenderPassCommandBuffer); 896 SetBitField(mAttachmentOps[attachmentIndex].stencilStoreOp, 897 VK_ATTACHMENT_STORE_OP_DONT_CARE); 898 } 899 updateRenderPassAttachmentFinalLayoutCommandBufferHelper900 void updateRenderPassAttachmentFinalLayout(size_t attachmentIndex, vk::ImageLayout finalLayout) 901 { 902 ASSERT(mIsRenderPassCommandBuffer); 903 SetBitField(mAttachmentOps[attachmentIndex].finalLayout, finalLayout); 904 } 905 getRenderAreaCommandBufferHelper906 const gl::Rectangle &getRenderArea() const 907 { 908 ASSERT(mIsRenderPassCommandBuffer); 909 return mRenderArea; 910 } 911 912 void resumeTransformFeedbackIfStarted(); 913 void pauseTransformFeedbackIfStarted(); 914 getAndResetCounterCommandBufferHelper915 uint32_t getAndResetCounter() 916 { 917 ASSERT(mIsRenderPassCommandBuffer); 918 uint32_t count = mCounter; 919 mCounter = 0; 920 return count; 921 } 922 getFramebufferHandleCommandBufferHelper923 VkFramebuffer getFramebufferHandle() const 924 { 925 ASSERT(mIsRenderPassCommandBuffer); 926 return mFramebuffer.getHandle(); 927 } 928 929 // Dumping the command stream is disabled by default. 930 static constexpr bool kEnableCommandStreamDiagnostics = false; 931 932 private: 933 void addCommandDiagnostics(ContextVk *contextVk); 934 // Allocator used by this class. Using a pool allocator per CBH to avoid threading issues 935 // that occur w/ shared allocator between multiple CBHs. 936 angle::PoolAllocator mAllocator; 937 938 // General state (non-renderPass related) 939 PipelineBarrierArray mPipelineBarriers; 940 PipelineStagesMask mPipelineBarrierMask; 941 vk::CommandBuffer mCommandBuffer; 942 943 // RenderPass state 944 uint32_t mCounter; 945 vk::RenderPassDesc mRenderPassDesc; 946 vk::AttachmentOpsArray mAttachmentOps; 947 vk::Framebuffer mFramebuffer; 948 gl::Rectangle mRenderArea; 949 vk::ClearValuesArray mClearValues; 950 bool mRenderPassStarted; 951 952 // Transform feedback state 953 gl::TransformFeedbackBuffersArray<VkBuffer> mTransformFeedbackCounterBuffers; 954 uint32_t mValidTransformFeedbackBufferCount; 955 bool mRebindTransformFeedbackBuffers; 956 957 bool mIsRenderPassCommandBuffer; 958 bool mMergeBarriers; 959 }; 960 961 // Imagine an image going through a few layout transitions: 962 // 963 // srcStage 1 dstStage 2 srcStage 2 dstStage 3 964 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3 965 // srcAccess 1 dstAccess 2 srcAccess 2 dstAccess 3 966 // \_________________ ___________________/ 967 // \/ 968 // A transition 969 // 970 // Every transition requires 6 pieces of information: from/to layouts, src/dst stage masks and 971 // src/dst access masks. At the moment we decide to transition the image to Layout 2 (i.e. 972 // Transition 1), we need to have Layout 1, srcStage 1 and srcAccess 1 stored as history of the 973 // image. To perform the transition, we need to know Layout 2, dstStage 2 and dstAccess 2. 974 // Additionally, we need to know srcStage 2 and srcAccess 2 to retain them for the next transition. 975 // 976 // That is, with the history kept, on every new transition we need 5 pieces of new information: 977 // layout/dstStage/dstAccess to transition into the layout, and srcStage/srcAccess for the future 978 // transition out from it. Given the small number of possible combinations of these values, an 979 // enum is used were each value encapsulates these 5 pieces of information: 980 // 981 // +--------------------------------+ 982 // srcStage 1 | dstStage 2 srcStage 2 | dstStage 3 983 // Layout 1 ------Transition 1-----> Layout 2 ------Transition 2------> Layout 3 984 // srcAccess 1 |dstAccess 2 srcAccess 2| dstAccess 3 985 // +--------------- ---------------+ 986 // \/ 987 // One enum value 988 // 989 // Note that, while generally dstStage for the to-transition and srcStage for the from-transition 990 // are the same, they may occasionally be BOTTOM_OF_PIPE and TOP_OF_PIPE respectively. 991 enum class ImageLayout 992 { 993 Undefined = 0, 994 ExternalPreInitialized = 1, 995 ExternalShadersReadOnly = 2, 996 ExternalShadersWrite = 3, 997 TransferSrc = 4, 998 TransferDst = 5, 999 VertexShaderReadOnly = 6, 1000 VertexShaderWrite = 7, 1001 GeometryShaderReadOnly = 8, 1002 GeometryShaderWrite = 9, 1003 FragmentShaderReadOnly = 10, 1004 FragmentShaderWrite = 11, 1005 ComputeShaderReadOnly = 12, 1006 ComputeShaderWrite = 13, 1007 AllGraphicsShadersReadOnly = 14, 1008 AllGraphicsShadersReadWrite = 15, 1009 ColorAttachment = 16, 1010 DepthStencilAttachment = 17, 1011 Present = 18, 1012 1013 InvalidEnum = 19, 1014 EnumCount = 19, 1015 }; 1016 1017 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout); 1018 1019 class ImageHelper final : public Resource, public angle::Subject 1020 { 1021 public: 1022 ImageHelper(); 1023 ImageHelper(ImageHelper &&other); 1024 ~ImageHelper() override; 1025 1026 void initStagingBuffer(RendererVk *renderer, 1027 const Format &format, 1028 VkBufferUsageFlags usageFlags, 1029 size_t initialSize); 1030 1031 angle::Result init(Context *context, 1032 gl::TextureType textureType, 1033 const VkExtent3D &extents, 1034 const Format &format, 1035 GLint samples, 1036 VkImageUsageFlags usage, 1037 uint32_t baseLevel, 1038 uint32_t maxLevel, 1039 uint32_t mipLevels, 1040 uint32_t layerCount); 1041 angle::Result initExternal(Context *context, 1042 gl::TextureType textureType, 1043 const VkExtent3D &extents, 1044 const Format &format, 1045 GLint samples, 1046 VkImageUsageFlags usage, 1047 VkImageCreateFlags additionalCreateFlags, 1048 ImageLayout initialLayout, 1049 const void *externalImageCreateInfo, 1050 uint32_t baseLevel, 1051 uint32_t maxLevel, 1052 uint32_t mipLevels, 1053 uint32_t layerCount); 1054 angle::Result initMemory(Context *context, 1055 const MemoryProperties &memoryProperties, 1056 VkMemoryPropertyFlags flags); 1057 angle::Result initExternalMemory(Context *context, 1058 const MemoryProperties &memoryProperties, 1059 const VkMemoryRequirements &memoryRequirements, 1060 const void *extraAllocationInfo, 1061 uint32_t currentQueueFamilyIndex, 1062 VkMemoryPropertyFlags flags); 1063 angle::Result initLayerImageView(Context *context, 1064 gl::TextureType textureType, 1065 VkImageAspectFlags aspectMask, 1066 const gl::SwizzleState &swizzleMap, 1067 ImageView *imageViewOut, 1068 uint32_t baseMipLevel, 1069 uint32_t levelCount, 1070 uint32_t baseArrayLayer, 1071 uint32_t layerCount) const; 1072 angle::Result initLayerImageViewImpl(Context *context, 1073 gl::TextureType textureType, 1074 VkImageAspectFlags aspectMask, 1075 const gl::SwizzleState &swizzleMap, 1076 ImageView *imageViewOut, 1077 uint32_t baseMipLevel, 1078 uint32_t levelCount, 1079 uint32_t baseArrayLayer, 1080 uint32_t layerCount, 1081 VkFormat imageFormat) const; 1082 angle::Result initImageView(Context *context, 1083 gl::TextureType textureType, 1084 VkImageAspectFlags aspectMask, 1085 const gl::SwizzleState &swizzleMap, 1086 ImageView *imageViewOut, 1087 uint32_t baseMipLevel, 1088 uint32_t levelCount); 1089 // Create a 2D[Array] for staging purposes. Used by: 1090 // 1091 // - TextureVk::copySubImageImplWithDraw 1092 // - FramebufferVk::readPixelsImpl 1093 // 1094 angle::Result init2DStaging(Context *context, 1095 const MemoryProperties &memoryProperties, 1096 const gl::Extents &glExtents, 1097 const Format &format, 1098 VkImageUsageFlags usage, 1099 uint32_t layerCount); 1100 1101 void releaseImage(RendererVk *rendererVk); 1102 void releaseStagingBuffer(RendererVk *renderer); 1103 valid()1104 bool valid() const { return mImage.valid(); } 1105 1106 VkImageAspectFlags getAspectFlags() const; 1107 // True if image contains both depth & stencil aspects 1108 bool isCombinedDepthStencilFormat() const; 1109 void destroy(RendererVk *renderer); release(RendererVk * renderer)1110 void release(RendererVk *renderer) { destroy(renderer); } 1111 1112 void init2DWeakReference(Context *context, 1113 VkImage handle, 1114 const gl::Extents &glExtents, 1115 const Format &format, 1116 GLint samples); 1117 void resetImageWeakReference(); 1118 getImage()1119 const Image &getImage() const { return mImage; } getDeviceMemory()1120 const DeviceMemory &getDeviceMemory() const { return mDeviceMemory; } 1121 getType()1122 VkImageType getType() const { return mImageType; } getExtents()1123 const VkExtent3D &getExtents() const { return mExtents; } getLayerCount()1124 uint32_t getLayerCount() const { return mLayerCount; } getLevelCount()1125 uint32_t getLevelCount() const { return mLevelCount; } getFormat()1126 const Format &getFormat() const { return *mFormat; } getSamples()1127 GLint getSamples() const { return mSamples; } 1128 setCurrentImageLayout(ImageLayout newLayout)1129 void setCurrentImageLayout(ImageLayout newLayout) { mCurrentLayout = newLayout; } getCurrentImageLayout()1130 ImageLayout getCurrentImageLayout() const { return mCurrentLayout; } 1131 VkImageLayout getCurrentLayout() const; 1132 1133 // Helper function to calculate the extents of a render target created for a certain mip of the 1134 // image. 1135 gl::Extents getLevelExtents2D(uint32_t level) const; 1136 1137 // Clear either color or depth/stencil based on image format. 1138 void clear(VkImageAspectFlags aspectFlags, 1139 const VkClearValue &value, 1140 uint32_t mipLevel, 1141 uint32_t baseArrayLayer, 1142 uint32_t layerCount, 1143 CommandBuffer *commandBuffer); 1144 1145 gl::Extents getSize(const gl::ImageIndex &index) const; 1146 1147 // Return unique Serial for underlying image, first assigning it if it hasn't been set yet 1148 Serial getAssignSerial(ContextVk *contextVk); resetSerial()1149 void resetSerial() { mSerial = rx::kZeroSerial; } 1150 1151 static void Copy(ImageHelper *srcImage, 1152 ImageHelper *dstImage, 1153 const gl::Offset &srcOffset, 1154 const gl::Offset &dstOffset, 1155 const gl::Extents ©Size, 1156 const VkImageSubresourceLayers &srcSubresources, 1157 const VkImageSubresourceLayers &dstSubresources, 1158 CommandBuffer *commandBuffer); 1159 1160 angle::Result generateMipmapsWithBlit(ContextVk *contextVk, GLuint maxLevel); 1161 1162 // Resolve this image into a destination image. This image should be in the TransferSrc layout. 1163 // The destination image is automatically transitioned into TransferDst. 1164 void resolve(ImageHelper *dest, const VkImageResolve ®ion, CommandBuffer *commandBuffer); 1165 1166 // Data staging 1167 void removeStagedUpdates(ContextVk *contextVk, uint32_t levelIndex, uint32_t layerIndex); 1168 1169 angle::Result stageSubresourceUpdateImpl(ContextVk *contextVk, 1170 const gl::ImageIndex &index, 1171 const gl::Extents &glExtents, 1172 const gl::Offset &offset, 1173 const gl::InternalFormat &formatInfo, 1174 const gl::PixelUnpackState &unpack, 1175 GLenum type, 1176 const uint8_t *pixels, 1177 const Format &vkFormat, 1178 const GLuint inputRowPitch, 1179 const GLuint inputDepthPitch, 1180 const GLuint inputSkipBytes); 1181 1182 angle::Result stageSubresourceUpdate(ContextVk *contextVk, 1183 const gl::ImageIndex &index, 1184 const gl::Extents &glExtents, 1185 const gl::Offset &offset, 1186 const gl::InternalFormat &formatInfo, 1187 const gl::PixelUnpackState &unpack, 1188 GLenum type, 1189 const uint8_t *pixels, 1190 const Format &vkFormat); 1191 1192 angle::Result stageSubresourceUpdateAndGetData(ContextVk *contextVk, 1193 size_t allocationSize, 1194 const gl::ImageIndex &imageIndex, 1195 const gl::Extents &glExtents, 1196 const gl::Offset &offset, 1197 uint8_t **destData); 1198 1199 angle::Result stageSubresourceUpdateFromBuffer(ContextVk *contextVk, 1200 size_t allocationSize, 1201 uint32_t mipLevel, 1202 uint32_t baseArrayLayer, 1203 uint32_t layerCount, 1204 uint32_t bufferRowLength, 1205 uint32_t bufferImageHeight, 1206 const VkExtent3D &extent, 1207 const VkOffset3D &offset, 1208 BufferHelper *stagingBuffer, 1209 StagingBufferOffsetArray stagingOffsets); 1210 1211 angle::Result stageSubresourceUpdateFromFramebuffer(const gl::Context *context, 1212 const gl::ImageIndex &index, 1213 const gl::Rectangle &sourceArea, 1214 const gl::Offset &dstOffset, 1215 const gl::Extents &dstExtent, 1216 const gl::InternalFormat &formatInfo, 1217 FramebufferVk *framebufferVk); 1218 1219 void stageSubresourceUpdateFromImage(ImageHelper *image, 1220 const gl::ImageIndex &index, 1221 const gl::Offset &destOffset, 1222 const gl::Extents &glExtents, 1223 const VkImageType imageType); 1224 1225 // Stage a clear to an arbitrary value. 1226 void stageClear(const gl::ImageIndex &index, 1227 VkImageAspectFlags aspectFlags, 1228 const VkClearValue &clearValue); 1229 1230 // Stage a clear based on robust resource init. 1231 angle::Result stageRobustResourceClearWithFormat(ContextVk *contextVk, 1232 const gl::ImageIndex &index, 1233 const gl::Extents &glExtents, 1234 const vk::Format &format); 1235 void stageRobustResourceClear(const gl::ImageIndex &index); 1236 1237 // This will use the underlying dynamic buffer to allocate some memory to be used as a src or 1238 // dst. 1239 angle::Result allocateStagingMemory(ContextVk *contextVk, 1240 size_t sizeInBytes, 1241 uint8_t **ptrOut, 1242 BufferHelper **bufferOut, 1243 StagingBufferOffsetArray *offsetOut, 1244 bool *newBufferAllocatedOut); 1245 1246 // Flush staged updates for a single subresource. Can optionally take a parameter to defer 1247 // clears to a subsequent RenderPass load op. 1248 angle::Result flushSingleSubresourceStagedUpdates(ContextVk *contextVk, 1249 uint32_t level, 1250 uint32_t layer, 1251 CommandBuffer *commandBuffer, 1252 ClearValuesArray *deferredClears, 1253 uint32_t deferredClearIndex); 1254 1255 // Flushes staged updates to a range of levels and layers from start to (but not including) end. 1256 // Due to the nature of updates (done wholly to a VkImageSubresourceLayers), some unsolicited 1257 // layers may also be updated. 1258 angle::Result flushStagedUpdates(ContextVk *contextVk, 1259 uint32_t levelStart, 1260 uint32_t levelEnd, 1261 uint32_t layerStart, 1262 uint32_t layerEnd, 1263 CommandBuffer *commandBuffer); 1264 1265 // Creates a command buffer and flushes all staged updates. This is used for one-time 1266 // initialization of resources that we don't expect to accumulate further staged updates, such 1267 // as with renderbuffers or surface images. 1268 angle::Result flushAllStagedUpdates(ContextVk *contextVk); 1269 1270 bool isUpdateStaged(uint32_t level, uint32_t layer); hasStagedUpdates()1271 bool hasStagedUpdates() const { return !mSubresourceUpdates.empty(); } 1272 1273 // changeLayout automatically skips the layout change if it's unnecessary. This function can be 1274 // used to prevent creating a command graph node and subsequently a command buffer for the sole 1275 // purpose of performing a transition (which may then not be issued). 1276 bool isLayoutChangeNecessary(ImageLayout newLayout) const; 1277 1278 template <typename CommandBufferT> changeLayout(VkImageAspectFlags aspectMask,ImageLayout newLayout,CommandBufferT * commandBuffer)1279 void changeLayout(VkImageAspectFlags aspectMask, 1280 ImageLayout newLayout, 1281 CommandBufferT *commandBuffer) 1282 { 1283 if (!isLayoutChangeNecessary(newLayout)) 1284 { 1285 return; 1286 } 1287 1288 forceChangeLayoutAndQueue(aspectMask, newLayout, mCurrentQueueFamilyIndex, commandBuffer); 1289 } 1290 isQueueChangeNeccesary(uint32_t newQueueFamilyIndex)1291 bool isQueueChangeNeccesary(uint32_t newQueueFamilyIndex) const 1292 { 1293 return mCurrentQueueFamilyIndex != newQueueFamilyIndex; 1294 } 1295 1296 void changeLayoutAndQueue(VkImageAspectFlags aspectMask, 1297 ImageLayout newLayout, 1298 uint32_t newQueueFamilyIndex, 1299 CommandBuffer *commandBuffer); 1300 1301 // Returns true if barrier has been generated 1302 bool updateLayoutAndBarrier(VkImageAspectFlags aspectMask, 1303 ImageLayout newLayout, 1304 PipelineBarrier *barrier); 1305 1306 // Performs an ownership transfer from an external instance or API. 1307 void acquireFromExternal(ContextVk *contextVk, 1308 uint32_t externalQueueFamilyIndex, 1309 uint32_t rendererQueueFamilyIndex, 1310 ImageLayout currentLayout, 1311 CommandBuffer *commandBuffer); 1312 1313 // Performs an ownership transfer to an external instance or API. 1314 void releaseToExternal(ContextVk *contextVk, 1315 uint32_t rendererQueueFamilyIndex, 1316 uint32_t externalQueueFamilyIndex, 1317 ImageLayout desiredLayout, 1318 CommandBuffer *commandBuffer); 1319 1320 // Returns true if the image is owned by an external API or instance. 1321 bool isReleasedToExternal() const; 1322 1323 uint32_t getBaseLevel(); 1324 void setBaseAndMaxLevels(uint32_t baseLevel, uint32_t maxLevel); 1325 1326 angle::Result copyImageDataToBuffer(ContextVk *contextVk, 1327 size_t sourceLevel, 1328 uint32_t layerCount, 1329 uint32_t baseLayer, 1330 const gl::Box &sourceArea, 1331 BufferHelper **bufferOut, 1332 size_t *bufferSize, 1333 StagingBufferOffsetArray *bufferOffsetsOut, 1334 uint8_t **outDataPtr); 1335 1336 static angle::Result GetReadPixelsParams(ContextVk *contextVk, 1337 const gl::PixelPackState &packState, 1338 gl::Buffer *packBuffer, 1339 GLenum format, 1340 GLenum type, 1341 const gl::Rectangle &area, 1342 const gl::Rectangle &clippedArea, 1343 PackPixelsParams *paramsOut, 1344 GLuint *skipBytesOut); 1345 1346 angle::Result readPixelsForGetImage(ContextVk *contextVk, 1347 const gl::PixelPackState &packState, 1348 gl::Buffer *packBuffer, 1349 uint32_t level, 1350 uint32_t layer, 1351 GLenum format, 1352 GLenum type, 1353 void *pixels); 1354 1355 angle::Result readPixels(ContextVk *contextVk, 1356 const gl::Rectangle &area, 1357 const PackPixelsParams &packPixelsParams, 1358 VkImageAspectFlagBits copyAspectFlags, 1359 uint32_t level, 1360 uint32_t layer, 1361 void *pixels, 1362 DynamicBuffer *stagingBuffer); 1363 1364 angle::Result CalculateBufferInfo(ContextVk *contextVk, 1365 const gl::Extents &glExtents, 1366 const gl::InternalFormat &formatInfo, 1367 const gl::PixelUnpackState &unpack, 1368 GLenum type, 1369 bool is3D, 1370 GLuint *inputRowPitch, 1371 GLuint *inputDepthPitch, 1372 GLuint *inputSkipBytes); 1373 1374 private: 1375 enum class UpdateSource 1376 { 1377 Clear, 1378 Buffer, 1379 Image, 1380 }; 1381 struct ClearUpdate 1382 { 1383 VkImageAspectFlags aspectFlags; 1384 VkClearValue value; 1385 uint32_t levelIndex; 1386 uint32_t layerIndex; 1387 uint32_t layerCount; 1388 }; 1389 struct BufferUpdate 1390 { 1391 BufferHelper *bufferHelper; 1392 VkBufferImageCopy copyRegion; 1393 }; 1394 struct ImageUpdate 1395 { 1396 ImageHelper *image; 1397 VkImageCopy copyRegion; 1398 }; 1399 1400 struct SubresourceUpdate 1401 { 1402 SubresourceUpdate(); 1403 SubresourceUpdate(BufferHelper *bufferHelperIn, const VkBufferImageCopy ©Region); 1404 SubresourceUpdate(ImageHelper *image, const VkImageCopy ©Region); 1405 SubresourceUpdate(VkImageAspectFlags aspectFlags, 1406 const VkClearValue &clearValue, 1407 const gl::ImageIndex &imageIndex); 1408 SubresourceUpdate(const SubresourceUpdate &other); 1409 1410 void release(RendererVk *renderer); 1411 dstSubresourceSubresourceUpdate1412 const VkImageSubresourceLayers &dstSubresource() const 1413 { 1414 ASSERT(updateSource == UpdateSource::Buffer || updateSource == UpdateSource::Image); 1415 return updateSource == UpdateSource::Buffer ? buffer.copyRegion.imageSubresource 1416 : image.copyRegion.dstSubresource; 1417 } 1418 bool isUpdateToLayerLevel(uint32_t layerIndex, uint32_t levelIndex) const; 1419 1420 UpdateSource updateSource; 1421 union 1422 { 1423 ClearUpdate clear; 1424 BufferUpdate buffer; 1425 ImageUpdate image; 1426 }; 1427 }; 1428 1429 void initImageMemoryBarrierStruct(VkImageAspectFlags aspectMask, 1430 ImageLayout newLayout, 1431 uint32_t newQueueFamilyIndex, 1432 VkImageMemoryBarrier *imageMemoryBarrier) const; 1433 1434 // Generalized to accept both "primary" and "secondary" command buffers. 1435 template <typename CommandBufferT> 1436 void forceChangeLayoutAndQueue(VkImageAspectFlags aspectMask, 1437 ImageLayout newLayout, 1438 uint32_t newQueueFamilyIndex, 1439 CommandBufferT *commandBuffer); 1440 1441 // If the image has emulated channels, we clear them once so as not to leave garbage on those 1442 // channels. 1443 void stageClearIfEmulatedFormat(Context *context); 1444 1445 void clearColor(const VkClearColorValue &color, 1446 uint32_t baseMipLevel, 1447 uint32_t levelCount, 1448 uint32_t baseArrayLayer, 1449 uint32_t layerCount, 1450 CommandBuffer *commandBuffer); 1451 1452 void clearDepthStencil(VkImageAspectFlags clearAspectFlags, 1453 const VkClearDepthStencilValue &depthStencil, 1454 uint32_t baseMipLevel, 1455 uint32_t levelCount, 1456 uint32_t baseArrayLayer, 1457 uint32_t layerCount, 1458 CommandBuffer *commandBuffer); 1459 1460 angle::Result initializeNonZeroMemory(Context *context, VkDeviceSize size); 1461 1462 void appendSubresourceUpdate(SubresourceUpdate &&update); 1463 void prependSubresourceUpdate(SubresourceUpdate &&update); 1464 void resetCachedProperties(); 1465 1466 // Vulkan objects. 1467 Image mImage; 1468 DeviceMemory mDeviceMemory; 1469 1470 // Image properties. 1471 VkImageType mImageType; 1472 VkExtent3D mExtents; 1473 const Format *mFormat; 1474 GLint mSamples; 1475 Serial mSerial; 1476 1477 // Current state. 1478 ImageLayout mCurrentLayout; 1479 uint32_t mCurrentQueueFamilyIndex; 1480 // For optimizing transition between different shader readonly layouts 1481 ImageLayout mLastNonShaderReadOnlyLayout; 1482 VkPipelineStageFlags mCurrentShaderReadStageMask; 1483 1484 // Cached properties. 1485 uint32_t mBaseLevel; 1486 uint32_t mMaxLevel; 1487 uint32_t mLayerCount; 1488 uint32_t mLevelCount; 1489 1490 // Staging buffer 1491 DynamicBuffer mStagingBuffer; 1492 std::vector<SubresourceUpdate> mSubresourceUpdates; 1493 }; 1494 1495 // A vector of image views, such as one per level or one per layer. 1496 using ImageViewVector = std::vector<ImageView>; 1497 1498 // A vector of vector of image views. Primary index is layer, secondary index is level. 1499 using LayerLevelImageViewVector = std::vector<ImageViewVector>; 1500 1501 class ImageViewHelper : angle::NonCopyable 1502 { 1503 public: 1504 ImageViewHelper(); 1505 ImageViewHelper(ImageViewHelper &&other); 1506 ~ImageViewHelper(); 1507 1508 void release(RendererVk *renderer); 1509 void destroy(VkDevice device); 1510 getLinearReadImageView()1511 const ImageView &getLinearReadImageView() const { return mLinearReadImageView; } getNonLinearReadImageView()1512 const ImageView &getNonLinearReadImageView() const { return mNonLinearReadImageView; } getLinearFetchImageView()1513 const ImageView &getLinearFetchImageView() const { return mLinearFetchImageView; } getNonLinearFetchImageView()1514 const ImageView &getNonLinearFetchImageView() const { return mNonLinearFetchImageView; } getStencilReadImageView()1515 const ImageView &getStencilReadImageView() const { return mStencilReadImageView; } 1516 getReadImageView()1517 const ImageView &getReadImageView() const 1518 { 1519 return mLinearColorspace ? mLinearReadImageView : mNonLinearReadImageView; 1520 } 1521 getFetchImageView()1522 const ImageView &getFetchImageView() const 1523 { 1524 return mLinearColorspace ? mLinearFetchImageView : mNonLinearFetchImageView; 1525 } 1526 1527 // Used when initialized RenderTargets. hasStencilReadImageView()1528 bool hasStencilReadImageView() const { return mStencilReadImageView.valid(); } 1529 hasFetchImageView()1530 bool hasFetchImageView() const { return getFetchImageView().valid(); } 1531 1532 // Store reference to usage in graph. retain(ResourceUseList * resourceUseList)1533 void retain(ResourceUseList *resourceUseList) const { resourceUseList->add(mUse); } 1534 1535 // Creates views with multiple layers and levels. 1536 angle::Result initReadViews(ContextVk *contextVk, 1537 gl::TextureType viewType, 1538 const ImageHelper &image, 1539 const Format &format, 1540 const gl::SwizzleState &swizzleState, 1541 uint32_t baseLevel, 1542 uint32_t levelCount, 1543 uint32_t baseLayer, 1544 uint32_t layerCount); 1545 1546 // Create SRGB-reinterpreted read views 1547 angle::Result initSRGBReadViews(ContextVk *contextVk, 1548 gl::TextureType viewType, 1549 const ImageHelper &image, 1550 const Format &format, 1551 const gl::SwizzleState &swizzleState, 1552 uint32_t baseLevel, 1553 uint32_t levelCount, 1554 uint32_t baseLayer, 1555 uint32_t layerCount); 1556 1557 // Creates a view with all layers of the level. 1558 angle::Result getLevelDrawImageView(ContextVk *contextVk, 1559 gl::TextureType viewType, 1560 const ImageHelper &image, 1561 uint32_t level, 1562 uint32_t layer, 1563 const ImageView **imageViewOut); 1564 1565 // Creates a view with a single layer of the level. 1566 angle::Result getLevelLayerDrawImageView(ContextVk *contextVk, 1567 const ImageHelper &image, 1568 uint32_t level, 1569 uint32_t layer, 1570 const ImageView **imageViewOut); 1571 1572 private: getReadImageView()1573 ImageView &getReadImageView() 1574 { 1575 return mLinearColorspace ? mLinearReadImageView : mNonLinearReadImageView; 1576 } getFetchImageView()1577 ImageView &getFetchImageView() 1578 { 1579 return mLinearColorspace ? mLinearFetchImageView : mNonLinearFetchImageView; 1580 } 1581 1582 // Lifetime. 1583 SharedResourceUse mUse; 1584 1585 // Read views 1586 ImageView mLinearReadImageView; 1587 ImageView mNonLinearReadImageView; 1588 ImageView mLinearFetchImageView; 1589 ImageView mNonLinearFetchImageView; 1590 ImageView mStencilReadImageView; 1591 1592 bool mLinearColorspace; 1593 1594 // Draw views. 1595 ImageViewVector mLevelDrawImageViews; 1596 LayerLevelImageViewVector mLayerLevelDrawImageViews; 1597 }; 1598 1599 // The SamplerHelper allows a Sampler to be coupled with a resource lifetime. 1600 class SamplerHelper final : angle::NonCopyable 1601 { 1602 public: 1603 SamplerHelper(); 1604 ~SamplerHelper(); 1605 1606 angle::Result init(Context *context, const VkSamplerCreateInfo &createInfo); 1607 void release(RendererVk *renderer); 1608 valid()1609 bool valid() const { return mSampler.valid(); } get()1610 const Sampler &get() const { return mSampler; } 1611 retain(ResourceUseList * resourceUseList)1612 void retain(ResourceUseList *resourceUseList) { resourceUseList->add(mUse); } 1613 1614 private: 1615 SharedResourceUse mUse; 1616 Sampler mSampler; 1617 }; 1618 1619 class FramebufferHelper : public Resource 1620 { 1621 public: 1622 FramebufferHelper(); 1623 ~FramebufferHelper() override; 1624 1625 FramebufferHelper(FramebufferHelper &&other); 1626 FramebufferHelper &operator=(FramebufferHelper &&other); 1627 1628 angle::Result init(ContextVk *contextVk, const VkFramebufferCreateInfo &createInfo); 1629 void release(ContextVk *contextVk); 1630 valid()1631 bool valid() { return mFramebuffer.valid(); } 1632 getFramebuffer()1633 const Framebuffer &getFramebuffer() const 1634 { 1635 ASSERT(mFramebuffer.valid()); 1636 return mFramebuffer; 1637 } 1638 getFramebuffer()1639 Framebuffer &getFramebuffer() 1640 { 1641 ASSERT(mFramebuffer.valid()); 1642 return mFramebuffer; 1643 } 1644 1645 private: 1646 // Vulkan object. 1647 Framebuffer mFramebuffer; 1648 }; 1649 1650 // A special command graph resource to hold resource dependencies for dispatch calls. It's the 1651 // equivalent of FramebufferHelper, though it doesn't contain a Vulkan object. 1652 class DispatchHelper : public Resource 1653 { 1654 public: 1655 DispatchHelper(); 1656 ~DispatchHelper() override; 1657 }; 1658 1659 class ShaderProgramHelper : angle::NonCopyable 1660 { 1661 public: 1662 ShaderProgramHelper(); 1663 ~ShaderProgramHelper(); 1664 1665 bool valid(const gl::ShaderType shaderType) const; 1666 void destroy(VkDevice device); 1667 void release(ContextVk *contextVk); 1668 getShader(gl::ShaderType shaderType)1669 ShaderAndSerial &getShader(gl::ShaderType shaderType) { return mShaders[shaderType].get(); } 1670 1671 void setShader(gl::ShaderType shaderType, RefCounted<ShaderAndSerial> *shader); 1672 void enableSpecializationConstant(sh::vk::SpecializationConstantId id); 1673 1674 // For getting a Pipeline and from the pipeline cache. getGraphicsPipeline(ContextVk * contextVk,RenderPassCache * renderPassCache,const PipelineCache & pipelineCache,Serial currentQueueSerial,const PipelineLayout & pipelineLayout,const GraphicsPipelineDesc & pipelineDesc,const gl::AttributesMask & activeAttribLocationsMask,const gl::ComponentTypeMask & programAttribsTypeMask,const GraphicsPipelineDesc ** descPtrOut,PipelineHelper ** pipelineOut)1675 ANGLE_INLINE angle::Result getGraphicsPipeline( 1676 ContextVk *contextVk, 1677 RenderPassCache *renderPassCache, 1678 const PipelineCache &pipelineCache, 1679 Serial currentQueueSerial, 1680 const PipelineLayout &pipelineLayout, 1681 const GraphicsPipelineDesc &pipelineDesc, 1682 const gl::AttributesMask &activeAttribLocationsMask, 1683 const gl::ComponentTypeMask &programAttribsTypeMask, 1684 const GraphicsPipelineDesc **descPtrOut, 1685 PipelineHelper **pipelineOut) 1686 { 1687 // Pull in a compatible RenderPass. 1688 RenderPass *compatibleRenderPass = nullptr; 1689 ANGLE_TRY(renderPassCache->getCompatibleRenderPass(contextVk, currentQueueSerial, 1690 pipelineDesc.getRenderPassDesc(), 1691 &compatibleRenderPass)); 1692 1693 ShaderModule *vertexShader = &mShaders[gl::ShaderType::Vertex].get().get(); 1694 ShaderModule *fragmentShader = mShaders[gl::ShaderType::Fragment].valid() 1695 ? &mShaders[gl::ShaderType::Fragment].get().get() 1696 : nullptr; 1697 ShaderModule *geometryShader = mShaders[gl::ShaderType::Geometry].valid() 1698 ? &mShaders[gl::ShaderType::Geometry].get().get() 1699 : nullptr; 1700 1701 return mGraphicsPipelines.getPipeline( 1702 contextVk, pipelineCache, *compatibleRenderPass, pipelineLayout, 1703 activeAttribLocationsMask, programAttribsTypeMask, vertexShader, fragmentShader, 1704 geometryShader, mSpecializationConstants, pipelineDesc, descPtrOut, pipelineOut); 1705 } 1706 1707 angle::Result getComputePipeline(Context *context, 1708 const PipelineLayout &pipelineLayout, 1709 PipelineAndSerial **pipelineOut); 1710 1711 private: 1712 gl::ShaderMap<BindingPointer<ShaderAndSerial>> mShaders; 1713 GraphicsPipelineCache mGraphicsPipelines; 1714 1715 // We should probably use PipelineHelper here so we can remove PipelineAndSerial. 1716 PipelineAndSerial mComputePipeline; 1717 1718 // Specialization constants, currently only used by the graphics queue. 1719 vk::SpecializationConstantBitSet mSpecializationConstants; 1720 }; 1721 1722 // Tracks current handle allocation counts in the back-end. Useful for debugging and profiling. 1723 // Note: not all handle types are currently implemented. 1724 class ActiveHandleCounter final : angle::NonCopyable 1725 { 1726 public: 1727 ActiveHandleCounter(); 1728 ~ActiveHandleCounter(); 1729 onAllocate(HandleType handleType)1730 void onAllocate(HandleType handleType) 1731 { 1732 mActiveCounts[handleType]++; 1733 mAllocatedCounts[handleType]++; 1734 } 1735 onDeallocate(HandleType handleType)1736 void onDeallocate(HandleType handleType) { mActiveCounts[handleType]--; } 1737 getActive(HandleType handleType)1738 uint32_t getActive(HandleType handleType) const { return mActiveCounts[handleType]; } getAllocated(HandleType handleType)1739 uint32_t getAllocated(HandleType handleType) const { return mAllocatedCounts[handleType]; } 1740 1741 private: 1742 angle::PackedEnumMap<HandleType, uint32_t> mActiveCounts; 1743 angle::PackedEnumMap<HandleType, uint32_t> mAllocatedCounts; 1744 }; 1745 } // namespace vk 1746 } // namespace rx 1747 1748 #endif // LIBANGLE_RENDERER_VULKAN_VK_HELPERS_H_ 1749