• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ProgramVk.cpp:
7 //    Implements the class methods for ProgramVk.
8 //
9 
10 #include "libANGLE/renderer/vulkan/ProgramVk.h"
11 
12 #include "common/debug.h"
13 #include "libANGLE/Context.h"
14 #include "libANGLE/ProgramLinkedResources.h"
15 #include "libANGLE/renderer/renderer_utils.h"
16 #include "libANGLE/renderer/vulkan/BufferVk.h"
17 #include "libANGLE/renderer/vulkan/GlslangWrapper.h"
18 #include "libANGLE/renderer/vulkan/TextureVk.h"
19 
20 namespace rx
21 {
22 
23 namespace
24 {
25 
26 constexpr size_t kUniformBlockDynamicBufferMinSize = 256 * 128;
27 
28 // Identical to Std140 encoder in all aspects, except it ignores opaque uniform types.
29 class VulkanDefaultBlockEncoder : public sh::Std140BlockEncoder
30 {
31   public:
advanceOffset(GLenum type,const std::vector<unsigned int> & arraySizes,bool isRowMajorMatrix,int arrayStride,int matrixStride)32     void advanceOffset(GLenum type,
33                        const std::vector<unsigned int> &arraySizes,
34                        bool isRowMajorMatrix,
35                        int arrayStride,
36                        int matrixStride) override
37     {
38         if (gl::IsOpaqueType(type))
39         {
40             return;
41         }
42 
43         sh::Std140BlockEncoder::advanceOffset(type, arraySizes, isRowMajorMatrix, arrayStride,
44                                               matrixStride);
45     }
46 };
47 
InitDefaultUniformBlock(const std::vector<sh::Uniform> & uniforms,sh::BlockLayoutMap * blockLayoutMapOut,size_t * blockSizeOut)48 void InitDefaultUniformBlock(const std::vector<sh::Uniform> &uniforms,
49                              sh::BlockLayoutMap *blockLayoutMapOut,
50                              size_t *blockSizeOut)
51 {
52     if (uniforms.empty())
53     {
54         *blockSizeOut = 0;
55         return;
56     }
57 
58     VulkanDefaultBlockEncoder blockEncoder;
59     sh::GetUniformBlockInfo(uniforms, "", &blockEncoder, blockLayoutMapOut);
60 
61     size_t blockSize = blockEncoder.getCurrentOffset();
62 
63     // TODO(jmadill): I think we still need a valid block for the pipeline even if zero sized.
64     if (blockSize == 0)
65     {
66         *blockSizeOut = 0;
67         return;
68     }
69 
70     *blockSizeOut = blockSize;
71     return;
72 }
73 
74 template <typename T>
UpdateDefaultUniformBlock(GLsizei count,uint32_t arrayIndex,int componentCount,const T * v,const sh::BlockMemberInfo & layoutInfo,angle::MemoryBuffer * uniformData)75 void UpdateDefaultUniformBlock(GLsizei count,
76                                uint32_t arrayIndex,
77                                int componentCount,
78                                const T *v,
79                                const sh::BlockMemberInfo &layoutInfo,
80                                angle::MemoryBuffer *uniformData)
81 {
82     const int elementSize = sizeof(T) * componentCount;
83 
84     uint8_t *dst = uniformData->data() + layoutInfo.offset;
85     if (layoutInfo.arrayStride == 0 || layoutInfo.arrayStride == elementSize)
86     {
87         uint32_t arrayOffset = arrayIndex * layoutInfo.arrayStride;
88         uint8_t *writePtr    = dst + arrayOffset;
89         ASSERT(writePtr + (elementSize * count) <= uniformData->data() + uniformData->size());
90         memcpy(writePtr, v, elementSize * count);
91     }
92     else
93     {
94         // Have to respect the arrayStride between each element of the array.
95         int maxIndex = arrayIndex + count;
96         for (int writeIndex = arrayIndex, readIndex = 0; writeIndex < maxIndex;
97              writeIndex++, readIndex++)
98         {
99             const int arrayOffset = writeIndex * layoutInfo.arrayStride;
100             uint8_t *writePtr     = dst + arrayOffset;
101             const T *readPtr      = v + (readIndex * componentCount);
102             ASSERT(writePtr + elementSize <= uniformData->data() + uniformData->size());
103             memcpy(writePtr, readPtr, elementSize);
104         }
105     }
106 }
107 
108 template <typename T>
ReadFromDefaultUniformBlock(int componentCount,uint32_t arrayIndex,T * dst,const sh::BlockMemberInfo & layoutInfo,const angle::MemoryBuffer * uniformData)109 void ReadFromDefaultUniformBlock(int componentCount,
110                                  uint32_t arrayIndex,
111                                  T *dst,
112                                  const sh::BlockMemberInfo &layoutInfo,
113                                  const angle::MemoryBuffer *uniformData)
114 {
115     ASSERT(layoutInfo.offset != -1);
116 
117     const int elementSize = sizeof(T) * componentCount;
118     const uint8_t *source = uniformData->data() + layoutInfo.offset;
119 
120     if (layoutInfo.arrayStride == 0 || layoutInfo.arrayStride == elementSize)
121     {
122         const uint8_t *readPtr = source + arrayIndex * layoutInfo.arrayStride;
123         memcpy(dst, readPtr, elementSize);
124     }
125     else
126     {
127         // Have to respect the arrayStride between each element of the array.
128         const int arrayOffset  = arrayIndex * layoutInfo.arrayStride;
129         const uint8_t *readPtr = source + arrayOffset;
130         memcpy(dst, readPtr, elementSize);
131     }
132 }
133 
SyncDefaultUniformBlock(ContextVk * contextVk,vk::DynamicBuffer * dynamicBuffer,const angle::MemoryBuffer & bufferData,uint32_t * outOffset,bool * outBufferModified)134 angle::Result SyncDefaultUniformBlock(ContextVk *contextVk,
135                                       vk::DynamicBuffer *dynamicBuffer,
136                                       const angle::MemoryBuffer &bufferData,
137                                       uint32_t *outOffset,
138                                       bool *outBufferModified)
139 {
140     dynamicBuffer->releaseInFlightBuffers(contextVk);
141 
142     ASSERT(!bufferData.empty());
143     uint8_t *data       = nullptr;
144     VkBuffer *outBuffer = nullptr;
145     VkDeviceSize offset = 0;
146     ANGLE_TRY(dynamicBuffer->allocate(contextVk, bufferData.size(), &data, outBuffer, &offset,
147                                       outBufferModified));
148     *outOffset = static_cast<uint32_t>(offset);
149     memcpy(data, bufferData.data(), bufferData.size());
150     ANGLE_TRY(dynamicBuffer->flush(contextVk));
151     return angle::Result::Continue;
152 }
153 
GetInterfaceBlockArraySize(const std::vector<gl::InterfaceBlock> & blocks,uint32_t bufferIndex)154 uint32_t GetInterfaceBlockArraySize(const std::vector<gl::InterfaceBlock> &blocks,
155                                     uint32_t bufferIndex)
156 {
157     const gl::InterfaceBlock &block = blocks[bufferIndex];
158 
159     if (!block.isArray)
160     {
161         return 1;
162     }
163 
164     ASSERT(block.arrayElement == 0);
165 
166     // Search consecutively until all array indices of this block are visited.
167     uint32_t arraySize;
168     for (arraySize = 1; bufferIndex + arraySize < blocks.size(); ++arraySize)
169     {
170         const gl::InterfaceBlock &nextBlock = blocks[bufferIndex + arraySize];
171 
172         if (nextBlock.arrayElement != arraySize)
173         {
174             break;
175         }
176 
177         // It's unexpected for an array to start at a non-zero array size, so we can always rely on
178         // the sequential `arrayElement`s to belong to the same block.
179         ASSERT(nextBlock.name == block.name);
180         ASSERT(nextBlock.isArray);
181     }
182 
183     return arraySize;
184 }
185 
AddInterfaceBlockDescriptorSetDesc(const std::vector<gl::InterfaceBlock> & blocks,uint32_t bindingStart,VkDescriptorType descType,vk::DescriptorSetLayoutDesc * descOut)186 void AddInterfaceBlockDescriptorSetDesc(const std::vector<gl::InterfaceBlock> &blocks,
187                                         uint32_t bindingStart,
188                                         VkDescriptorType descType,
189                                         vk::DescriptorSetLayoutDesc *descOut)
190 {
191     uint32_t bindingIndex = 0;
192     for (uint32_t bufferIndex = 0; bufferIndex < blocks.size();)
193     {
194         const uint32_t arraySize = GetInterfaceBlockArraySize(blocks, bufferIndex);
195         VkShaderStageFlags activeStages =
196             gl_vk::GetShaderStageFlags(blocks[bufferIndex].activeShaders());
197 
198         descOut->update(bindingStart + bindingIndex, descType, arraySize, activeStages);
199 
200         bufferIndex += arraySize;
201         ++bindingIndex;
202     }
203 }
204 
AddAtomicCounterBufferDescriptorSetDesc(const std::vector<gl::AtomicCounterBuffer> & atomicCounterBuffers,uint32_t bindingStart,vk::DescriptorSetLayoutDesc * descOut)205 void AddAtomicCounterBufferDescriptorSetDesc(
206     const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers,
207     uint32_t bindingStart,
208     vk::DescriptorSetLayoutDesc *descOut)
209 {
210     if (atomicCounterBuffers.empty())
211     {
212         return;
213     }
214 
215     VkShaderStageFlags activeStages = 0;
216     for (const gl::AtomicCounterBuffer &buffer : atomicCounterBuffers)
217     {
218         activeStages |= gl_vk::GetShaderStageFlags(buffer.activeShaders());
219     }
220 
221     // A single storage buffer array is used for all stages for simplicity.
222     descOut->update(bindingStart, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
223                     gl::IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFERS, activeStages);
224 }
225 
WriteBufferDescriptorSetBinding(const gl::OffsetBindingPointer<gl::Buffer> & bufferBinding,VkDeviceSize maxSize,VkDescriptorSet descSet,VkDescriptorType descType,uint32_t bindingIndex,uint32_t arrayElement,VkDeviceSize requiredOffsetAlignment,VkDescriptorBufferInfo * bufferInfoOut,VkWriteDescriptorSet * writeInfoOut)226 void WriteBufferDescriptorSetBinding(const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding,
227                                      VkDeviceSize maxSize,
228                                      VkDescriptorSet descSet,
229                                      VkDescriptorType descType,
230                                      uint32_t bindingIndex,
231                                      uint32_t arrayElement,
232                                      VkDeviceSize requiredOffsetAlignment,
233                                      VkDescriptorBufferInfo *bufferInfoOut,
234                                      VkWriteDescriptorSet *writeInfoOut)
235 {
236     gl::Buffer *buffer = bufferBinding.get();
237     ASSERT(buffer != nullptr);
238 
239     // Make sure there's no possible under/overflow with binding size.
240     static_assert(sizeof(VkDeviceSize) >= sizeof(bufferBinding.getSize()),
241                   "VkDeviceSize too small");
242     ASSERT(bufferBinding.getSize() >= 0);
243 
244     BufferVk *bufferVk             = vk::GetImpl(buffer);
245     VkDeviceSize offset            = bufferBinding.getOffset();
246     VkDeviceSize size              = bufferBinding.getSize();
247     vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
248 
249     // If size is 0, we can't always use VK_WHOLE_SIZE (or bufferHelper.getSize()), as the
250     // backing buffer may be larger than max*BufferRange.  In that case, we use the minimum of
251     // the backing buffer size (what's left after offset) and the buffer size as defined by the
252     // shader.  That latter is only valid for UBOs, as SSBOs may have variable length arrays.
253     size = size > 0 ? size : (bufferHelper.getSize() - offset);
254     if (maxSize > 0)
255     {
256         size = std::min(size, maxSize);
257     }
258 
259     // If requiredOffsetAlignment is 0, the buffer offset is guaranteed to have the necessary
260     // alignment through other means (the backend specifying the alignment through a GLES limit that
261     // the frontend then enforces).  If it's not 0, we need to bind the buffer at an offset that's
262     // aligned.  The difference in offsets is communicated to the shader via driver uniforms.
263     if (requiredOffsetAlignment)
264     {
265         VkDeviceSize alignedOffset = (offset / requiredOffsetAlignment) * requiredOffsetAlignment;
266         VkDeviceSize offsetDiff    = offset - alignedOffset;
267 
268         offset = alignedOffset;
269         size += offsetDiff;
270     }
271 
272     bufferInfoOut->buffer = bufferHelper.getBuffer().getHandle();
273     bufferInfoOut->offset = offset;
274     bufferInfoOut->range  = size;
275 
276     writeInfoOut->sType            = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
277     writeInfoOut->pNext            = nullptr;
278     writeInfoOut->dstSet           = descSet;
279     writeInfoOut->dstBinding       = bindingIndex;
280     writeInfoOut->dstArrayElement  = arrayElement;
281     writeInfoOut->descriptorCount  = 1;
282     writeInfoOut->descriptorType   = descType;
283     writeInfoOut->pImageInfo       = nullptr;
284     writeInfoOut->pBufferInfo      = bufferInfoOut;
285     writeInfoOut->pTexelBufferView = nullptr;
286     ASSERT(writeInfoOut->pBufferInfo[0].buffer != VK_NULL_HANDLE);
287 }
288 
289 class Std140BlockLayoutEncoderFactory : public gl::CustomBlockLayoutEncoderFactory
290 {
291   public:
makeEncoder()292     sh::BlockLayoutEncoder *makeEncoder() override { return new sh::Std140BlockEncoder(); }
293 };
294 }  // anonymous namespace
295 
296 // ProgramVk::ShaderInfo implementation.
ShaderInfo()297 ProgramVk::ShaderInfo::ShaderInfo() {}
298 
299 ProgramVk::ShaderInfo::~ShaderInfo() = default;
300 
initShaders(ContextVk * contextVk,const gl::ShaderMap<std::string> & shaderSources,bool enableLineRasterEmulation)301 angle::Result ProgramVk::ShaderInfo::initShaders(ContextVk *contextVk,
302                                                  const gl::ShaderMap<std::string> &shaderSources,
303                                                  bool enableLineRasterEmulation)
304 {
305     ASSERT(!valid());
306 
307     bool useSubgroupOpsWithSeamfulCubeMapEmulation = false;
308     bool emulateSeamfulCubeMapSampling =
309         contextVk->emulateSeamfulCubeMapSampling(&useSubgroupOpsWithSeamfulCubeMapEmulation);
310     bool useSubgroupOps =
311         emulateSeamfulCubeMapSampling && useSubgroupOpsWithSeamfulCubeMapEmulation;
312 
313     gl::ShaderMap<std::vector<uint32_t>> shaderCodes;
314     ANGLE_TRY(GlslangWrapper::GetShaderCode(contextVk, contextVk->getCaps(),
315                                             enableLineRasterEmulation, useSubgroupOps,
316                                             shaderSources, &shaderCodes));
317 
318     for (const gl::ShaderType shaderType : gl::AllShaderTypes())
319     {
320         if (!shaderSources[shaderType].empty())
321         {
322             ANGLE_TRY(vk::InitShaderAndSerial(contextVk, &mShaders[shaderType].get(),
323                                               shaderCodes[shaderType].data(),
324                                               shaderCodes[shaderType].size() * sizeof(uint32_t)));
325 
326             mProgramHelper.setShader(shaderType, &mShaders[shaderType]);
327         }
328     }
329 
330     return angle::Result::Continue;
331 }
332 
loadShaderSource(ContextVk * contextVk,gl::BinaryInputStream * stream)333 angle::Result ProgramVk::loadShaderSource(ContextVk *contextVk, gl::BinaryInputStream *stream)
334 {
335     // Read in shader sources for all shader types
336     for (const gl::ShaderType shaderType : gl::AllShaderTypes())
337     {
338         mShaderSources[shaderType] = stream->readString();
339     }
340 
341     return angle::Result::Continue;
342 }
343 
saveShaderSource(gl::BinaryOutputStream * stream)344 void ProgramVk::saveShaderSource(gl::BinaryOutputStream *stream)
345 {
346     // Write out shader sources for all shader types
347     for (const gl::ShaderType shaderType : gl::AllShaderTypes())
348     {
349         stream->writeString(mShaderSources[shaderType]);
350     }
351 }
352 
release(ContextVk * contextVk)353 void ProgramVk::ShaderInfo::release(ContextVk *contextVk)
354 {
355     mProgramHelper.release(contextVk);
356 
357     for (vk::RefCounted<vk::ShaderAndSerial> &shader : mShaders)
358     {
359         shader.get().destroy(contextVk->getDevice());
360     }
361 }
362 
363 // ProgramVk implementation.
DefaultUniformBlock()364 ProgramVk::DefaultUniformBlock::DefaultUniformBlock() {}
365 
366 ProgramVk::DefaultUniformBlock::~DefaultUniformBlock() = default;
367 
ProgramVk(const gl::ProgramState & state)368 ProgramVk::ProgramVk(const gl::ProgramState &state)
369     : ProgramImpl(state),
370       mDynamicBufferOffsets{},
371       mStorageBlockBindingsOffset(0),
372       mAtomicCounterBufferBindingsOffset(0)
373 {}
374 
375 ProgramVk::~ProgramVk() = default;
376 
destroy(const gl::Context * context)377 void ProgramVk::destroy(const gl::Context *context)
378 {
379     ContextVk *contextVk = vk::GetImpl(context);
380     reset(contextVk);
381 }
382 
reset(ContextVk * contextVk)383 void ProgramVk::reset(ContextVk *contextVk)
384 {
385     for (auto &descriptorSetLayout : mDescriptorSetLayouts)
386     {
387         descriptorSetLayout.reset();
388     }
389     mPipelineLayout.reset();
390 
391     for (auto &uniformBlock : mDefaultUniformBlocks)
392     {
393         uniformBlock.storage.release(contextVk);
394     }
395 
396     mDefaultShaderInfo.release(contextVk);
397     mLineRasterShaderInfo.release(contextVk);
398 
399     mEmptyBuffer.release(contextVk);
400 
401     mDescriptorSets.clear();
402     mEmptyDescriptorSets.fill(VK_NULL_HANDLE);
403 
404     for (vk::RefCountedDescriptorPoolBinding &binding : mDescriptorPoolBindings)
405     {
406         binding.reset();
407     }
408 
409     for (vk::DynamicDescriptorPool &descriptorPool : mDynamicDescriptorPools)
410     {
411         descriptorPool.release(contextVk);
412     }
413 
414     mTextureDescriptorsCache.clear();
415 }
416 
load(const gl::Context * context,gl::BinaryInputStream * stream,gl::InfoLog & infoLog)417 std::unique_ptr<rx::LinkEvent> ProgramVk::load(const gl::Context *context,
418                                                gl::BinaryInputStream *stream,
419                                                gl::InfoLog &infoLog)
420 {
421     ContextVk *contextVk = vk::GetImpl(context);
422     gl::ShaderMap<size_t> requiredBufferSize;
423     requiredBufferSize.fill(0);
424 
425     angle::Result status = loadShaderSource(contextVk, stream);
426     if (status != angle::Result::Continue)
427     {
428         return std::make_unique<LinkEventDone>(status);
429     }
430 
431     // Deserializes the uniformLayout data of mDefaultUniformBlocks
432     for (gl::ShaderType shaderType : gl::AllShaderTypes())
433     {
434         const size_t uniformCount = stream->readInt<size_t>();
435         for (unsigned int uniformIndex = 0; uniformIndex < uniformCount; ++uniformIndex)
436         {
437             sh::BlockMemberInfo blockInfo;
438             gl::LoadBlockMemberInfo(stream, &blockInfo);
439             mDefaultUniformBlocks[shaderType].uniformLayout.push_back(blockInfo);
440         }
441     }
442 
443     // Deserializes required uniform block memory sizes
444     for (gl::ShaderType shaderType : gl::AllShaderTypes())
445     {
446         requiredBufferSize[shaderType] = stream->readInt<size_t>();
447     }
448 
449     reset(contextVk);
450 
451     // Initialize and resize the mDefaultUniformBlocks' memory
452     status = resizeUniformBlockMemory(contextVk, requiredBufferSize);
453     if (status != angle::Result::Continue)
454     {
455         return std::make_unique<LinkEventDone>(status);
456     }
457 
458     return std::make_unique<LinkEventDone>(linkImpl(context, infoLog));
459 }
460 
save(const gl::Context * context,gl::BinaryOutputStream * stream)461 void ProgramVk::save(const gl::Context *context, gl::BinaryOutputStream *stream)
462 {
463     // (geofflang): Look into saving shader modules in ShaderInfo objects (keep in mind that we
464     // compile shaders lazily)
465     saveShaderSource(stream);
466 
467     // Serializes the uniformLayout data of mDefaultUniformBlocks
468     for (gl::ShaderType shaderType : gl::AllShaderTypes())
469     {
470         const size_t uniformCount = mDefaultUniformBlocks[shaderType].uniformLayout.size();
471         stream->writeInt<size_t>(uniformCount);
472         for (unsigned int uniformIndex = 0; uniformIndex < uniformCount; ++uniformIndex)
473         {
474             sh::BlockMemberInfo &blockInfo =
475                 mDefaultUniformBlocks[shaderType].uniformLayout[uniformIndex];
476             gl::WriteBlockMemberInfo(stream, blockInfo);
477         }
478     }
479 
480     // Serializes required uniform block memory sizes
481     for (gl::ShaderType shaderType : gl::AllShaderTypes())
482     {
483         stream->writeInt(mDefaultUniformBlocks[shaderType].uniformData.size());
484     }
485 }
486 
setBinaryRetrievableHint(bool retrievable)487 void ProgramVk::setBinaryRetrievableHint(bool retrievable)
488 {
489     UNIMPLEMENTED();
490 }
491 
setSeparable(bool separable)492 void ProgramVk::setSeparable(bool separable)
493 {
494     UNIMPLEMENTED();
495 }
496 
link(const gl::Context * context,const gl::ProgramLinkedResources & resources,gl::InfoLog & infoLog)497 std::unique_ptr<LinkEvent> ProgramVk::link(const gl::Context *context,
498                                            const gl::ProgramLinkedResources &resources,
499                                            gl::InfoLog &infoLog)
500 {
501     ContextVk *contextVk = vk::GetImpl(context);
502     // Link resources before calling GetShaderSource to make sure they are ready for the set/binding
503     // assignment done in that function.
504     linkResources(resources);
505 
506     GlslangWrapper::GetShaderSource(mState, resources, &mShaderSources);
507 
508     reset(contextVk);
509 
510     angle::Result status = initDefaultUniformBlocks(context);
511     if (status != angle::Result::Continue)
512     {
513         return std::make_unique<LinkEventDone>(status);
514     }
515 
516     // TODO(jie.a.chen@intel.com): Parallelize linking.
517     // http://crbug.com/849576
518     return std::make_unique<LinkEventDone>(linkImpl(context, infoLog));
519 }
520 
linkImpl(const gl::Context * glContext,gl::InfoLog & infoLog)521 angle::Result ProgramVk::linkImpl(const gl::Context *glContext, gl::InfoLog &infoLog)
522 {
523     const gl::State &glState                 = glContext->getState();
524     ContextVk *contextVk                     = vk::GetImpl(glContext);
525     RendererVk *renderer                     = contextVk->getRenderer();
526     gl::TransformFeedback *transformFeedback = glState.getCurrentTransformFeedback();
527 
528     updateBindingOffsets();
529 
530     // Store a reference to the pipeline and descriptor set layouts. This will create them if they
531     // don't already exist in the cache.
532 
533     // Default uniforms and transform feedback:
534     vk::DescriptorSetLayoutDesc uniformsAndXfbSetDesc;
535     uint32_t uniformBindingIndex = 0;
536     for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
537     {
538         uniformsAndXfbSetDesc.update(uniformBindingIndex++,
539                                      VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1,
540                                      gl_vk::kShaderStageMap[shaderType]);
541     }
542     if (mState.hasLinkedShaderStage(gl::ShaderType::Vertex) && transformFeedback &&
543         !mState.getLinkedTransformFeedbackVaryings().empty())
544     {
545         vk::GetImpl(transformFeedback)->updateDescriptorSetLayout(mState, &uniformsAndXfbSetDesc);
546     }
547 
548     ANGLE_TRY(renderer->getDescriptorSetLayout(
549         contextVk, uniformsAndXfbSetDesc,
550         &mDescriptorSetLayouts[kUniformsAndXfbDescriptorSetIndex]));
551 
552     // Uniform and storage buffers:
553     vk::DescriptorSetLayoutDesc buffersSetDesc;
554 
555     AddInterfaceBlockDescriptorSetDesc(mState.getUniformBlocks(), getUniformBlockBindingsOffset(),
556                                        VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &buffersSetDesc);
557     AddInterfaceBlockDescriptorSetDesc(mState.getShaderStorageBlocks(),
558                                        getStorageBlockBindingsOffset(),
559                                        VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &buffersSetDesc);
560     AddAtomicCounterBufferDescriptorSetDesc(
561         mState.getAtomicCounterBuffers(), getAtomicCounterBufferBindingsOffset(), &buffersSetDesc);
562 
563     ANGLE_TRY(renderer->getDescriptorSetLayout(
564         contextVk, buffersSetDesc, &mDescriptorSetLayouts[kShaderResourceDescriptorSetIndex]));
565 
566     // Textures:
567     vk::DescriptorSetLayoutDesc texturesSetDesc;
568 
569     for (uint32_t textureIndex = 0; textureIndex < mState.getSamplerBindings().size();
570          ++textureIndex)
571     {
572         const gl::SamplerBinding &samplerBinding = mState.getSamplerBindings()[textureIndex];
573 
574         uint32_t uniformIndex = mState.getUniformIndexFromSamplerIndex(textureIndex);
575         const gl::LinkedUniform &samplerUniform = mState.getUniforms()[uniformIndex];
576 
577         // The front-end always binds array sampler units sequentially.
578         const uint32_t arraySize = static_cast<uint32_t>(samplerBinding.boundTextureUnits.size());
579         VkShaderStageFlags activeStages =
580             gl_vk::GetShaderStageFlags(samplerUniform.activeShaders());
581 
582         texturesSetDesc.update(textureIndex, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, arraySize,
583                                activeStages);
584     }
585 
586     ANGLE_TRY(renderer->getDescriptorSetLayout(contextVk, texturesSetDesc,
587                                                &mDescriptorSetLayouts[kTextureDescriptorSetIndex]));
588 
589     VkShaderStageFlags driverUniformsStages =
590         mState.isCompute() ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_ALL_GRAPHICS;
591     vk::DescriptorSetLayoutDesc driverUniformsSetDesc =
592         contextVk->getDriverUniformsDescriptorSetDesc(driverUniformsStages);
593     ANGLE_TRY(renderer->getDescriptorSetLayout(
594         contextVk, driverUniformsSetDesc,
595         &mDescriptorSetLayouts[kDriverUniformsDescriptorSetIndex]));
596 
597     vk::PipelineLayoutDesc pipelineLayoutDesc;
598     pipelineLayoutDesc.updateDescriptorSetLayout(kUniformsAndXfbDescriptorSetIndex,
599                                                  uniformsAndXfbSetDesc);
600     pipelineLayoutDesc.updateDescriptorSetLayout(kShaderResourceDescriptorSetIndex, buffersSetDesc);
601     pipelineLayoutDesc.updateDescriptorSetLayout(kTextureDescriptorSetIndex, texturesSetDesc);
602     pipelineLayoutDesc.updateDescriptorSetLayout(kDriverUniformsDescriptorSetIndex,
603                                                  driverUniformsSetDesc);
604 
605     ANGLE_TRY(renderer->getPipelineLayout(contextVk, pipelineLayoutDesc, mDescriptorSetLayouts,
606                                           &mPipelineLayout));
607 
608     std::array<VkDescriptorPoolSize, 2> uniformAndXfbSetSize = {
609         {{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
610           static_cast<uint32_t>(mState.getLinkedShaderStageCount())},
611          {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, gl::IMPLEMENTATION_MAX_TRANSFORM_FEEDBACK_BUFFERS}}};
612 
613     uint32_t uniformBlockCount = static_cast<uint32_t>(mState.getUniformBlocks().size());
614     uint32_t storageBlockCount = static_cast<uint32_t>(mState.getShaderStorageBlocks().size());
615     uint32_t atomicCounterBufferCount =
616         static_cast<uint32_t>(mState.getAtomicCounterBuffers().size());
617     uint32_t textureCount = static_cast<uint32_t>(mState.getSamplerBindings().size());
618 
619     if (renderer->getFeatures().bindEmptyForUnusedDescriptorSets.enabled)
620     {
621         // For this workaround, we have to create an empty descriptor set for each descriptor set
622         // index, so make sure their pools are initialized.
623         uniformBlockCount = std::max(uniformBlockCount, 1u);
624         textureCount      = std::max(textureCount, 1u);
625     }
626 
627     angle::FixedVector<VkDescriptorPoolSize, 2> bufferSetSize;
628     if (uniformBlockCount > 0)
629     {
630         bufferSetSize.push_back({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBlockCount});
631     }
632     if (storageBlockCount > 0 || atomicCounterBufferCount > 0)
633     {
634         const uint32_t storageBufferDescCount = storageBlockCount + atomicCounterBufferCount;
635         bufferSetSize.push_back({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, storageBufferDescCount});
636     }
637 
638     VkDescriptorPoolSize textureSetSize = {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, textureCount};
639 
640     ANGLE_TRY(mDynamicDescriptorPools[kUniformsAndXfbDescriptorSetIndex].init(
641         contextVk, uniformAndXfbSetSize.data(), uniformAndXfbSetSize.size()));
642     if (bufferSetSize.size() > 0)
643     {
644         ANGLE_TRY(mDynamicDescriptorPools[kShaderResourceDescriptorSetIndex].init(
645             contextVk, bufferSetSize.data(), static_cast<uint32_t>(bufferSetSize.size())));
646     }
647     if (textureCount > 0)
648     {
649         ANGLE_TRY(mDynamicDescriptorPools[kTextureDescriptorSetIndex].init(contextVk,
650                                                                            &textureSetSize, 1));
651     }
652 
653     mDynamicBufferOffsets.resize(mState.getLinkedShaderStageCount());
654 
655     // Initialize an "empty" buffer for use with default uniform blocks where there are no uniforms,
656     // or atomic counter buffer array indices that are unused.
657     constexpr VkBufferUsageFlags kEmptyBufferUsage =
658         VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
659 
660     VkBufferCreateInfo emptyBufferInfo    = {};
661     emptyBufferInfo.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
662     emptyBufferInfo.flags                 = 0;
663     emptyBufferInfo.size                  = 4;
664     emptyBufferInfo.usage                 = kEmptyBufferUsage;
665     emptyBufferInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
666     emptyBufferInfo.queueFamilyIndexCount = 0;
667     emptyBufferInfo.pQueueFamilyIndices   = nullptr;
668 
669     constexpr VkMemoryPropertyFlags kMemoryType = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
670     return mEmptyBuffer.init(contextVk, emptyBufferInfo, kMemoryType);
671 }
672 
updateBindingOffsets()673 void ProgramVk::updateBindingOffsets()
674 {
675     mStorageBlockBindingsOffset = static_cast<uint32_t>(mState.getUniqueUniformBlockCount());
676     mAtomicCounterBufferBindingsOffset =
677         static_cast<uint32_t>(mStorageBlockBindingsOffset + mState.getUniqueStorageBlockCount());
678 }
679 
linkResources(const gl::ProgramLinkedResources & resources)680 void ProgramVk::linkResources(const gl::ProgramLinkedResources &resources)
681 {
682     Std140BlockLayoutEncoderFactory std140EncoderFactory;
683     gl::ProgramLinkedResourcesLinker linker(&std140EncoderFactory);
684 
685     linker.linkResources(mState, resources);
686 }
687 
initDefaultUniformBlocks(const gl::Context * glContext)688 angle::Result ProgramVk::initDefaultUniformBlocks(const gl::Context *glContext)
689 {
690     ContextVk *contextVk = vk::GetImpl(glContext);
691 
692     // Process vertex and fragment uniforms into std140 packing.
693     gl::ShaderMap<sh::BlockLayoutMap> layoutMap;
694     gl::ShaderMap<size_t> requiredBufferSize;
695     requiredBufferSize.fill(0);
696 
697     generateUniformLayoutMapping(layoutMap, requiredBufferSize);
698     initDefaultUniformLayoutMapping(layoutMap);
699 
700     // All uniform initializations are complete, now resize the buffers accordingly and return
701     return resizeUniformBlockMemory(contextVk, requiredBufferSize);
702 }
703 
generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> & layoutMap,gl::ShaderMap<size_t> & requiredBufferSize)704 void ProgramVk::generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> &layoutMap,
705                                              gl::ShaderMap<size_t> &requiredBufferSize)
706 {
707     for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
708     {
709         gl::Shader *shader = mState.getAttachedShader(shaderType);
710 
711         if (shader)
712         {
713             const std::vector<sh::Uniform> &uniforms = shader->getUniforms();
714             InitDefaultUniformBlock(uniforms, &layoutMap[shaderType],
715                                     &requiredBufferSize[shaderType]);
716         }
717     }
718 }
719 
initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> & layoutMap)720 void ProgramVk::initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> &layoutMap)
721 {
722     // Init the default block layout info.
723     const auto &uniforms = mState.getUniforms();
724     for (const gl::VariableLocation &location : mState.getUniformLocations())
725     {
726         gl::ShaderMap<sh::BlockMemberInfo> layoutInfo;
727 
728         if (location.used() && !location.ignored)
729         {
730             const auto &uniform = uniforms[location.index];
731             if (uniform.isInDefaultBlock() && !uniform.isSampler())
732             {
733                 std::string uniformName = uniform.name;
734                 if (uniform.isArray())
735                 {
736                     // Gets the uniform name without the [0] at the end.
737                     uniformName = gl::StripLastArrayIndex(uniformName);
738                 }
739 
740                 bool found = false;
741 
742                 for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
743                 {
744                     auto it = layoutMap[shaderType].find(uniformName);
745                     if (it != layoutMap[shaderType].end())
746                     {
747                         found                  = true;
748                         layoutInfo[shaderType] = it->second;
749                     }
750                 }
751 
752                 ASSERT(found);
753             }
754         }
755 
756         for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
757         {
758             mDefaultUniformBlocks[shaderType].uniformLayout.push_back(layoutInfo[shaderType]);
759         }
760     }
761 }
762 
resizeUniformBlockMemory(ContextVk * contextVk,gl::ShaderMap<size_t> & requiredBufferSize)763 angle::Result ProgramVk::resizeUniformBlockMemory(ContextVk *contextVk,
764                                                   gl::ShaderMap<size_t> &requiredBufferSize)
765 {
766     RendererVk *renderer = contextVk->getRenderer();
767     for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
768     {
769         if (requiredBufferSize[shaderType] > 0)
770         {
771             if (!mDefaultUniformBlocks[shaderType].uniformData.resize(
772                     requiredBufferSize[shaderType]))
773             {
774                 ANGLE_VK_CHECK(contextVk, false, VK_ERROR_OUT_OF_HOST_MEMORY);
775             }
776             size_t minAlignment = static_cast<size_t>(
777                 renderer->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment);
778 
779             mDefaultUniformBlocks[shaderType].storage.init(
780                 renderer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
781                 minAlignment, kUniformBlockDynamicBufferMinSize, true);
782 
783             // Initialize uniform buffer memory to zero by default.
784             mDefaultUniformBlocks[shaderType].uniformData.fill(0);
785             mDefaultUniformBlocksDirty.set(shaderType);
786         }
787     }
788 
789     return angle::Result::Continue;
790 }
791 
validate(const gl::Caps & caps,gl::InfoLog * infoLog)792 GLboolean ProgramVk::validate(const gl::Caps &caps, gl::InfoLog *infoLog)
793 {
794     // No-op. The spec is very vague about the behavior of validation.
795     return GL_TRUE;
796 }
797 
798 template <typename T>
setUniformImpl(GLint location,GLsizei count,const T * v,GLenum entryPointType)799 void ProgramVk::setUniformImpl(GLint location, GLsizei count, const T *v, GLenum entryPointType)
800 {
801     const gl::VariableLocation &locationInfo = mState.getUniformLocations()[location];
802     const gl::LinkedUniform &linkedUniform   = mState.getUniforms()[locationInfo.index];
803 
804     ASSERT(!linkedUniform.isSampler());
805 
806     if (linkedUniform.typeInfo->type == entryPointType)
807     {
808         for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
809         {
810             DefaultUniformBlock &uniformBlock     = mDefaultUniformBlocks[shaderType];
811             const sh::BlockMemberInfo &layoutInfo = uniformBlock.uniformLayout[location];
812 
813             // Assume an offset of -1 means the block is unused.
814             if (layoutInfo.offset == -1)
815             {
816                 continue;
817             }
818 
819             const GLint componentCount = linkedUniform.typeInfo->componentCount;
820             UpdateDefaultUniformBlock(count, locationInfo.arrayIndex, componentCount, v, layoutInfo,
821                                       &uniformBlock.uniformData);
822             mDefaultUniformBlocksDirty.set(shaderType);
823         }
824     }
825     else
826     {
827         for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
828         {
829             DefaultUniformBlock &uniformBlock     = mDefaultUniformBlocks[shaderType];
830             const sh::BlockMemberInfo &layoutInfo = uniformBlock.uniformLayout[location];
831 
832             // Assume an offset of -1 means the block is unused.
833             if (layoutInfo.offset == -1)
834             {
835                 continue;
836             }
837 
838             const GLint componentCount = linkedUniform.typeInfo->componentCount;
839 
840             ASSERT(linkedUniform.typeInfo->type == gl::VariableBoolVectorType(entryPointType));
841 
842             GLint initialArrayOffset =
843                 locationInfo.arrayIndex * layoutInfo.arrayStride + layoutInfo.offset;
844             for (GLint i = 0; i < count; i++)
845             {
846                 GLint elementOffset = i * layoutInfo.arrayStride + initialArrayOffset;
847                 GLint *dest =
848                     reinterpret_cast<GLint *>(uniformBlock.uniformData.data() + elementOffset);
849                 const T *source = v + i * componentCount;
850 
851                 for (int c = 0; c < componentCount; c++)
852                 {
853                     dest[c] = (source[c] == static_cast<T>(0)) ? GL_FALSE : GL_TRUE;
854                 }
855             }
856 
857             mDefaultUniformBlocksDirty.set(shaderType);
858         }
859     }
860 }
861 
862 template <typename T>
getUniformImpl(GLint location,T * v,GLenum entryPointType) const863 void ProgramVk::getUniformImpl(GLint location, T *v, GLenum entryPointType) const
864 {
865     const gl::VariableLocation &locationInfo = mState.getUniformLocations()[location];
866     const gl::LinkedUniform &linkedUniform   = mState.getUniforms()[locationInfo.index];
867 
868     ASSERT(!linkedUniform.isSampler());
869 
870     const gl::ShaderType shaderType = linkedUniform.getFirstShaderTypeWhereActive();
871     ASSERT(shaderType != gl::ShaderType::InvalidEnum);
872 
873     const DefaultUniformBlock &uniformBlock = mDefaultUniformBlocks[shaderType];
874     const sh::BlockMemberInfo &layoutInfo   = uniformBlock.uniformLayout[location];
875 
876     ASSERT(linkedUniform.typeInfo->componentType == entryPointType ||
877            linkedUniform.typeInfo->componentType == gl::VariableBoolVectorType(entryPointType));
878 
879     if (gl::IsMatrixType(linkedUniform.type))
880     {
881         const uint8_t *ptrToElement = uniformBlock.uniformData.data() + layoutInfo.offset +
882                                       (locationInfo.arrayIndex * layoutInfo.arrayStride);
883         GetMatrixUniform(linkedUniform.type, v, reinterpret_cast<const T *>(ptrToElement), false);
884     }
885     else
886     {
887         ReadFromDefaultUniformBlock(linkedUniform.typeInfo->componentCount, locationInfo.arrayIndex,
888                                     v, layoutInfo, &uniformBlock.uniformData);
889     }
890 }
891 
setUniform1fv(GLint location,GLsizei count,const GLfloat * v)892 void ProgramVk::setUniform1fv(GLint location, GLsizei count, const GLfloat *v)
893 {
894     setUniformImpl(location, count, v, GL_FLOAT);
895 }
896 
setUniform2fv(GLint location,GLsizei count,const GLfloat * v)897 void ProgramVk::setUniform2fv(GLint location, GLsizei count, const GLfloat *v)
898 {
899     setUniformImpl(location, count, v, GL_FLOAT_VEC2);
900 }
901 
setUniform3fv(GLint location,GLsizei count,const GLfloat * v)902 void ProgramVk::setUniform3fv(GLint location, GLsizei count, const GLfloat *v)
903 {
904     setUniformImpl(location, count, v, GL_FLOAT_VEC3);
905 }
906 
setUniform4fv(GLint location,GLsizei count,const GLfloat * v)907 void ProgramVk::setUniform4fv(GLint location, GLsizei count, const GLfloat *v)
908 {
909     setUniformImpl(location, count, v, GL_FLOAT_VEC4);
910 }
911 
setUniform1iv(GLint location,GLsizei count,const GLint * v)912 void ProgramVk::setUniform1iv(GLint location, GLsizei count, const GLint *v)
913 {
914     const gl::VariableLocation &locationInfo = mState.getUniformLocations()[location];
915     const gl::LinkedUniform &linkedUniform   = mState.getUniforms()[locationInfo.index];
916     if (linkedUniform.isSampler())
917     {
918         // We could potentially cache some indexing here. For now this is a no-op since the mapping
919         // is handled entirely in ContextVk.
920         return;
921     }
922 
923     setUniformImpl(location, count, v, GL_INT);
924 }
925 
setUniform2iv(GLint location,GLsizei count,const GLint * v)926 void ProgramVk::setUniform2iv(GLint location, GLsizei count, const GLint *v)
927 {
928     setUniformImpl(location, count, v, GL_INT_VEC2);
929 }
930 
setUniform3iv(GLint location,GLsizei count,const GLint * v)931 void ProgramVk::setUniform3iv(GLint location, GLsizei count, const GLint *v)
932 {
933     setUniformImpl(location, count, v, GL_INT_VEC3);
934 }
935 
setUniform4iv(GLint location,GLsizei count,const GLint * v)936 void ProgramVk::setUniform4iv(GLint location, GLsizei count, const GLint *v)
937 {
938     setUniformImpl(location, count, v, GL_INT_VEC4);
939 }
940 
setUniform1uiv(GLint location,GLsizei count,const GLuint * v)941 void ProgramVk::setUniform1uiv(GLint location, GLsizei count, const GLuint *v)
942 {
943     setUniformImpl(location, count, v, GL_UNSIGNED_INT);
944 }
945 
setUniform2uiv(GLint location,GLsizei count,const GLuint * v)946 void ProgramVk::setUniform2uiv(GLint location, GLsizei count, const GLuint *v)
947 {
948     setUniformImpl(location, count, v, GL_UNSIGNED_INT_VEC2);
949 }
950 
setUniform3uiv(GLint location,GLsizei count,const GLuint * v)951 void ProgramVk::setUniform3uiv(GLint location, GLsizei count, const GLuint *v)
952 {
953     setUniformImpl(location, count, v, GL_UNSIGNED_INT_VEC3);
954 }
955 
setUniform4uiv(GLint location,GLsizei count,const GLuint * v)956 void ProgramVk::setUniform4uiv(GLint location, GLsizei count, const GLuint *v)
957 {
958     setUniformImpl(location, count, v, GL_UNSIGNED_INT_VEC4);
959 }
960 
961 template <int cols, int rows>
setUniformMatrixfv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)962 void ProgramVk::setUniformMatrixfv(GLint location,
963                                    GLsizei count,
964                                    GLboolean transpose,
965                                    const GLfloat *value)
966 {
967     const gl::VariableLocation &locationInfo = mState.getUniformLocations()[location];
968     const gl::LinkedUniform &linkedUniform   = mState.getUniforms()[locationInfo.index];
969 
970     for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
971     {
972         DefaultUniformBlock &uniformBlock     = mDefaultUniformBlocks[shaderType];
973         const sh::BlockMemberInfo &layoutInfo = uniformBlock.uniformLayout[location];
974 
975         // Assume an offset of -1 means the block is unused.
976         if (layoutInfo.offset == -1)
977         {
978             continue;
979         }
980 
981         SetFloatUniformMatrixGLSL<cols, rows>::Run(
982             locationInfo.arrayIndex, linkedUniform.getArraySizeProduct(), count, transpose, value,
983             uniformBlock.uniformData.data() + layoutInfo.offset);
984 
985         mDefaultUniformBlocksDirty.set(shaderType);
986     }
987 }
988 
setUniformMatrix2fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)989 void ProgramVk::setUniformMatrix2fv(GLint location,
990                                     GLsizei count,
991                                     GLboolean transpose,
992                                     const GLfloat *value)
993 {
994     setUniformMatrixfv<2, 2>(location, count, transpose, value);
995 }
996 
setUniformMatrix3fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)997 void ProgramVk::setUniformMatrix3fv(GLint location,
998                                     GLsizei count,
999                                     GLboolean transpose,
1000                                     const GLfloat *value)
1001 {
1002     setUniformMatrixfv<3, 3>(location, count, transpose, value);
1003 }
1004 
setUniformMatrix4fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1005 void ProgramVk::setUniformMatrix4fv(GLint location,
1006                                     GLsizei count,
1007                                     GLboolean transpose,
1008                                     const GLfloat *value)
1009 {
1010     setUniformMatrixfv<4, 4>(location, count, transpose, value);
1011 }
1012 
setUniformMatrix2x3fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1013 void ProgramVk::setUniformMatrix2x3fv(GLint location,
1014                                       GLsizei count,
1015                                       GLboolean transpose,
1016                                       const GLfloat *value)
1017 {
1018     setUniformMatrixfv<2, 3>(location, count, transpose, value);
1019 }
1020 
setUniformMatrix3x2fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1021 void ProgramVk::setUniformMatrix3x2fv(GLint location,
1022                                       GLsizei count,
1023                                       GLboolean transpose,
1024                                       const GLfloat *value)
1025 {
1026     setUniformMatrixfv<3, 2>(location, count, transpose, value);
1027 }
1028 
setUniformMatrix2x4fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1029 void ProgramVk::setUniformMatrix2x4fv(GLint location,
1030                                       GLsizei count,
1031                                       GLboolean transpose,
1032                                       const GLfloat *value)
1033 {
1034     setUniformMatrixfv<2, 4>(location, count, transpose, value);
1035 }
1036 
setUniformMatrix4x2fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1037 void ProgramVk::setUniformMatrix4x2fv(GLint location,
1038                                       GLsizei count,
1039                                       GLboolean transpose,
1040                                       const GLfloat *value)
1041 {
1042     setUniformMatrixfv<4, 2>(location, count, transpose, value);
1043 }
1044 
setUniformMatrix3x4fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1045 void ProgramVk::setUniformMatrix3x4fv(GLint location,
1046                                       GLsizei count,
1047                                       GLboolean transpose,
1048                                       const GLfloat *value)
1049 {
1050     setUniformMatrixfv<3, 4>(location, count, transpose, value);
1051 }
1052 
setUniformMatrix4x3fv(GLint location,GLsizei count,GLboolean transpose,const GLfloat * value)1053 void ProgramVk::setUniformMatrix4x3fv(GLint location,
1054                                       GLsizei count,
1055                                       GLboolean transpose,
1056                                       const GLfloat *value)
1057 {
1058     setUniformMatrixfv<4, 3>(location, count, transpose, value);
1059 }
1060 
setPathFragmentInputGen(const std::string & inputName,GLenum genMode,GLint components,const GLfloat * coeffs)1061 void ProgramVk::setPathFragmentInputGen(const std::string &inputName,
1062                                         GLenum genMode,
1063                                         GLint components,
1064                                         const GLfloat *coeffs)
1065 {
1066     UNIMPLEMENTED();
1067 }
1068 
allocateDescriptorSet(ContextVk * contextVk,uint32_t descriptorSetIndex)1069 angle::Result ProgramVk::allocateDescriptorSet(ContextVk *contextVk, uint32_t descriptorSetIndex)
1070 {
1071     bool ignoreNewPoolAllocated;
1072     return allocateDescriptorSetAndGetInfo(contextVk, descriptorSetIndex, &ignoreNewPoolAllocated);
1073 }
1074 
allocateDescriptorSetAndGetInfo(ContextVk * contextVk,uint32_t descriptorSetIndex,bool * newPoolAllocatedOut)1075 angle::Result ProgramVk::allocateDescriptorSetAndGetInfo(ContextVk *contextVk,
1076                                                          uint32_t descriptorSetIndex,
1077                                                          bool *newPoolAllocatedOut)
1078 {
1079     vk::DynamicDescriptorPool &dynamicDescriptorPool = mDynamicDescriptorPools[descriptorSetIndex];
1080 
1081     uint32_t potentialNewCount = descriptorSetIndex + 1;
1082     if (potentialNewCount > mDescriptorSets.size())
1083     {
1084         mDescriptorSets.resize(potentialNewCount, VK_NULL_HANDLE);
1085     }
1086 
1087     const vk::DescriptorSetLayout &descriptorSetLayout =
1088         mDescriptorSetLayouts[descriptorSetIndex].get();
1089     ANGLE_TRY(dynamicDescriptorPool.allocateSetsAndGetInfo(
1090         contextVk, descriptorSetLayout.ptr(), 1, &mDescriptorPoolBindings[descriptorSetIndex],
1091         &mDescriptorSets[descriptorSetIndex], newPoolAllocatedOut));
1092     mEmptyDescriptorSets[descriptorSetIndex] = VK_NULL_HANDLE;
1093 
1094     return angle::Result::Continue;
1095 }
1096 
getUniformfv(const gl::Context * context,GLint location,GLfloat * params) const1097 void ProgramVk::getUniformfv(const gl::Context *context, GLint location, GLfloat *params) const
1098 {
1099     getUniformImpl(location, params, GL_FLOAT);
1100 }
1101 
getUniformiv(const gl::Context * context,GLint location,GLint * params) const1102 void ProgramVk::getUniformiv(const gl::Context *context, GLint location, GLint *params) const
1103 {
1104     getUniformImpl(location, params, GL_INT);
1105 }
1106 
getUniformuiv(const gl::Context * context,GLint location,GLuint * params) const1107 void ProgramVk::getUniformuiv(const gl::Context *context, GLint location, GLuint *params) const
1108 {
1109     getUniformImpl(location, params, GL_UNSIGNED_INT);
1110 }
1111 
updateUniforms(ContextVk * contextVk)1112 angle::Result ProgramVk::updateUniforms(ContextVk *contextVk)
1113 {
1114     ASSERT(dirtyUniforms());
1115 
1116     bool anyNewBufferAllocated = false;
1117     uint32_t offsetIndex       = 0;
1118 
1119     // Update buffer memory by immediate mapping. This immediate update only works once.
1120     for (gl::ShaderType shaderType : mState.getLinkedShaderStages())
1121     {
1122         DefaultUniformBlock &uniformBlock = mDefaultUniformBlocks[shaderType];
1123 
1124         if (mDefaultUniformBlocksDirty[shaderType])
1125         {
1126             bool bufferModified = false;
1127             ANGLE_TRY(
1128                 SyncDefaultUniformBlock(contextVk, &uniformBlock.storage, uniformBlock.uniformData,
1129                                         &mDynamicBufferOffsets[offsetIndex], &bufferModified));
1130             mDefaultUniformBlocksDirty.reset(shaderType);
1131 
1132             if (bufferModified)
1133             {
1134                 anyNewBufferAllocated = true;
1135             }
1136         }
1137 
1138         ++offsetIndex;
1139     }
1140 
1141     if (anyNewBufferAllocated)
1142     {
1143         // We need to reinitialize the descriptor sets if we newly allocated buffers since we can't
1144         // modify the descriptor sets once initialized.
1145         ANGLE_TRY(allocateDescriptorSet(contextVk, kUniformsAndXfbDescriptorSetIndex));
1146         updateDefaultUniformsDescriptorSet(contextVk);
1147         updateTransformFeedbackDescriptorSetImpl(contextVk);
1148     }
1149 
1150     return angle::Result::Continue;
1151 }
1152 
updateDefaultUniformsDescriptorSet(ContextVk * contextVk)1153 void ProgramVk::updateDefaultUniformsDescriptorSet(ContextVk *contextVk)
1154 {
1155     uint32_t shaderStageCount = static_cast<uint32_t>(mState.getLinkedShaderStageCount());
1156 
1157     gl::ShaderVector<VkDescriptorBufferInfo> descriptorBufferInfo(shaderStageCount);
1158     gl::ShaderVector<VkWriteDescriptorSet> writeDescriptorInfo(shaderStageCount);
1159 
1160     uint32_t bindingIndex = 0;
1161 
1162     // Write default uniforms for each shader type.
1163     for (const gl::ShaderType shaderType : mState.getLinkedShaderStages())
1164     {
1165         DefaultUniformBlock &uniformBlock  = mDefaultUniformBlocks[shaderType];
1166         VkDescriptorBufferInfo &bufferInfo = descriptorBufferInfo[bindingIndex];
1167         VkWriteDescriptorSet &writeInfo    = writeDescriptorInfo[bindingIndex];
1168 
1169         if (!uniformBlock.uniformData.empty())
1170         {
1171             const vk::BufferHelper *bufferHelper = uniformBlock.storage.getCurrentBuffer();
1172             bufferInfo.buffer                    = bufferHelper->getBuffer().getHandle();
1173         }
1174         else
1175         {
1176             mEmptyBuffer.updateQueueSerial(contextVk->getCurrentQueueSerial());
1177             bufferInfo.buffer = mEmptyBuffer.getBuffer().getHandle();
1178         }
1179 
1180         bufferInfo.offset = 0;
1181         bufferInfo.range  = VK_WHOLE_SIZE;
1182 
1183         writeInfo.sType            = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1184         writeInfo.pNext            = nullptr;
1185         writeInfo.dstSet           = mDescriptorSets[kUniformsAndXfbDescriptorSetIndex];
1186         writeInfo.dstBinding       = bindingIndex;
1187         writeInfo.dstArrayElement  = 0;
1188         writeInfo.descriptorCount  = 1;
1189         writeInfo.descriptorType   = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1190         writeInfo.pImageInfo       = nullptr;
1191         writeInfo.pBufferInfo      = &bufferInfo;
1192         writeInfo.pTexelBufferView = nullptr;
1193 
1194         ++bindingIndex;
1195     }
1196 
1197     VkDevice device = contextVk->getDevice();
1198 
1199     ASSERT(bindingIndex == shaderStageCount);
1200     ASSERT(shaderStageCount <= kReservedDefaultUniformBindingCount);
1201 
1202     vkUpdateDescriptorSets(device, shaderStageCount, writeDescriptorInfo.data(), 0, nullptr);
1203 }
1204 
updateBuffersDescriptorSet(ContextVk * contextVk,vk::CommandGraphResource * recorder,const std::vector<gl::InterfaceBlock> & blocks,VkDescriptorType descriptorType)1205 void ProgramVk::updateBuffersDescriptorSet(ContextVk *contextVk,
1206                                            vk::CommandGraphResource *recorder,
1207                                            const std::vector<gl::InterfaceBlock> &blocks,
1208                                            VkDescriptorType descriptorType)
1209 {
1210     if (blocks.empty())
1211     {
1212         return;
1213     }
1214 
1215     VkDescriptorSet descriptorSet = mDescriptorSets[kShaderResourceDescriptorSetIndex];
1216 
1217     ASSERT(descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1218            descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1219     const bool isStorageBuffer = descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1220     const uint32_t bindingStart =
1221         isStorageBuffer ? getStorageBlockBindingsOffset() : getUniformBlockBindingsOffset();
1222 
1223     static_assert(
1224         gl::IMPLEMENTATION_MAX_SHADER_STORAGE_BUFFER_BINDINGS >=
1225             gl::IMPLEMENTATION_MAX_UNIFORM_BUFFER_BINDINGS,
1226         "The descriptor arrays here would have inadequate size for uniform buffer objects");
1227 
1228     gl::StorageBuffersArray<VkDescriptorBufferInfo> descriptorBufferInfo;
1229     gl::StorageBuffersArray<VkWriteDescriptorSet> writeDescriptorInfo;
1230     uint32_t writeCount = 0;
1231     // The binding is incremented every time arrayElement 0 is encountered, which means there will
1232     // be an increment right at the start.  Start from -1 to get 0 as the first binding.
1233     int32_t currentBinding = -1;
1234 
1235     // Write uniform or storage buffers.
1236     const gl::State &glState = contextVk->getState();
1237     for (uint32_t bufferIndex = 0; bufferIndex < blocks.size(); ++bufferIndex)
1238     {
1239         const gl::InterfaceBlock &block = blocks[bufferIndex];
1240         const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1241             isStorageBuffer ? glState.getIndexedShaderStorageBuffer(block.binding)
1242                             : glState.getIndexedUniformBuffer(block.binding);
1243 
1244         if (!block.isArray || block.arrayElement == 0)
1245         {
1246             // Array indices of the same buffer binding are placed sequentially in `blocks`.
1247             // Thus, the block binding is updated only when array index 0 is encountered.
1248             ++currentBinding;
1249         }
1250 
1251         if (bufferBinding.get() == nullptr)
1252         {
1253             continue;
1254         }
1255 
1256         uint32_t binding          = bindingStart + currentBinding;
1257         uint32_t arrayElement     = block.isArray ? block.arrayElement : 0;
1258         VkDeviceSize maxBlockSize = isStorageBuffer ? 0 : block.dataSize;
1259 
1260         VkDescriptorBufferInfo &bufferInfo = descriptorBufferInfo[writeCount];
1261         VkWriteDescriptorSet &writeInfo    = writeDescriptorInfo[writeCount];
1262 
1263         WriteBufferDescriptorSetBinding(bufferBinding, maxBlockSize, descriptorSet, descriptorType,
1264                                         binding, arrayElement, 0, &bufferInfo, &writeInfo);
1265 
1266         BufferVk *bufferVk             = vk::GetImpl(bufferBinding.get());
1267         vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
1268 
1269         if (isStorageBuffer)
1270         {
1271             bufferHelper.onWrite(contextVk, recorder, VK_ACCESS_SHADER_READ_BIT,
1272                                  VK_ACCESS_SHADER_WRITE_BIT);
1273         }
1274         else
1275         {
1276             bufferHelper.onRead(recorder, VK_ACCESS_UNIFORM_READ_BIT);
1277         }
1278 
1279         ++writeCount;
1280     }
1281 
1282     VkDevice device = contextVk->getDevice();
1283 
1284     vkUpdateDescriptorSets(device, writeCount, writeDescriptorInfo.data(), 0, nullptr);
1285 }
1286 
updateAtomicCounterBuffersDescriptorSet(ContextVk * contextVk,vk::CommandGraphResource * recorder)1287 void ProgramVk::updateAtomicCounterBuffersDescriptorSet(ContextVk *contextVk,
1288                                                         vk::CommandGraphResource *recorder)
1289 {
1290     const gl::State &glState = contextVk->getState();
1291     const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers =
1292         mState.getAtomicCounterBuffers();
1293 
1294     if (atomicCounterBuffers.empty())
1295     {
1296         return;
1297     }
1298 
1299     VkDescriptorSet descriptorSet = mDescriptorSets[kShaderResourceDescriptorSetIndex];
1300 
1301     const uint32_t bindingStart = getAtomicCounterBufferBindingsOffset();
1302 
1303     gl::AtomicCounterBuffersArray<VkDescriptorBufferInfo> descriptorBufferInfo;
1304     gl::AtomicCounterBuffersArray<VkWriteDescriptorSet> writeDescriptorInfo;
1305     gl::AtomicCounterBufferMask writtenBindings;
1306 
1307     RendererVk *rendererVk = contextVk->getRenderer();
1308     const VkDeviceSize requiredOffsetAlignment =
1309         rendererVk->getPhysicalDeviceProperties().limits.minStorageBufferOffsetAlignment;
1310 
1311     // Write atomic counter buffers.
1312     for (uint32_t bufferIndex = 0; bufferIndex < atomicCounterBuffers.size(); ++bufferIndex)
1313     {
1314         const gl::AtomicCounterBuffer &atomicCounterBuffer = atomicCounterBuffers[bufferIndex];
1315         uint32_t binding                                   = atomicCounterBuffer.binding;
1316         const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1317             glState.getIndexedAtomicCounterBuffer(binding);
1318 
1319         if (bufferBinding.get() == nullptr)
1320         {
1321             continue;
1322         }
1323 
1324         VkDescriptorBufferInfo &bufferInfo = descriptorBufferInfo[binding];
1325         VkWriteDescriptorSet &writeInfo    = writeDescriptorInfo[binding];
1326 
1327         WriteBufferDescriptorSetBinding(bufferBinding, 0, descriptorSet,
1328                                         VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, bindingStart, binding,
1329                                         requiredOffsetAlignment, &bufferInfo, &writeInfo);
1330 
1331         BufferVk *bufferVk             = vk::GetImpl(bufferBinding.get());
1332         vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
1333 
1334         bufferHelper.onWrite(contextVk, recorder, VK_ACCESS_SHADER_READ_BIT,
1335                              VK_ACCESS_SHADER_WRITE_BIT);
1336 
1337         writtenBindings.set(binding);
1338     }
1339 
1340     // Bind the empty buffer to every array slot that's unused.
1341     mEmptyBuffer.updateQueueSerial(contextVk->getCurrentQueueSerial());
1342     for (size_t binding : ~writtenBindings)
1343     {
1344         VkDescriptorBufferInfo &bufferInfo = descriptorBufferInfo[binding];
1345         VkWriteDescriptorSet &writeInfo    = writeDescriptorInfo[binding];
1346 
1347         bufferInfo.buffer = mEmptyBuffer.getBuffer().getHandle();
1348         bufferInfo.offset = 0;
1349         bufferInfo.range  = VK_WHOLE_SIZE;
1350 
1351         writeInfo.sType            = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1352         writeInfo.pNext            = nullptr;
1353         writeInfo.dstSet           = descriptorSet;
1354         writeInfo.dstBinding       = bindingStart;
1355         writeInfo.dstArrayElement  = static_cast<uint32_t>(binding);
1356         writeInfo.descriptorCount  = 1;
1357         writeInfo.descriptorType   = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1358         writeInfo.pImageInfo       = nullptr;
1359         writeInfo.pBufferInfo      = &bufferInfo;
1360         writeInfo.pTexelBufferView = nullptr;
1361     }
1362 
1363     VkDevice device = contextVk->getDevice();
1364 
1365     vkUpdateDescriptorSets(device, gl::IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFERS,
1366                            writeDescriptorInfo.data(), 0, nullptr);
1367 }
1368 
updateShaderResourcesDescriptorSet(ContextVk * contextVk,vk::CommandGraphResource * recorder)1369 angle::Result ProgramVk::updateShaderResourcesDescriptorSet(ContextVk *contextVk,
1370                                                             vk::CommandGraphResource *recorder)
1371 {
1372     ANGLE_TRY(allocateDescriptorSet(contextVk, kShaderResourceDescriptorSetIndex));
1373 
1374     updateBuffersDescriptorSet(contextVk, recorder, mState.getUniformBlocks(),
1375                                VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
1376     updateBuffersDescriptorSet(contextVk, recorder, mState.getShaderStorageBlocks(),
1377                                VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1378     updateAtomicCounterBuffersDescriptorSet(contextVk, recorder);
1379 
1380     return angle::Result::Continue;
1381 }
1382 
updateTransformFeedbackDescriptorSet(ContextVk * contextVk,vk::FramebufferHelper * framebuffer)1383 angle::Result ProgramVk::updateTransformFeedbackDescriptorSet(ContextVk *contextVk,
1384                                                               vk::FramebufferHelper *framebuffer)
1385 {
1386     const gl::State &glState = contextVk->getState();
1387     ASSERT(hasTransformFeedbackOutput());
1388 
1389     TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(glState.getCurrentTransformFeedback());
1390     transformFeedbackVk->addFramebufferDependency(contextVk, mState, framebuffer);
1391 
1392     ANGLE_TRY(allocateDescriptorSet(contextVk, kUniformsAndXfbDescriptorSetIndex));
1393 
1394     updateDefaultUniformsDescriptorSet(contextVk);
1395     updateTransformFeedbackDescriptorSetImpl(contextVk);
1396 
1397     return angle::Result::Continue;
1398 }
1399 
updateTransformFeedbackDescriptorSetImpl(ContextVk * contextVk)1400 void ProgramVk::updateTransformFeedbackDescriptorSetImpl(ContextVk *contextVk)
1401 {
1402     const gl::State &glState = contextVk->getState();
1403     if (!hasTransformFeedbackOutput())
1404     {
1405         // NOTE(syoussefi): a possible optimization is to skip this if transform feedback is
1406         // paused.  However, even if paused, |updateDescriptorSet| must be called at least once for
1407         // the sake of validation.
1408         return;
1409     }
1410 
1411     TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(glState.getCurrentTransformFeedback());
1412     transformFeedbackVk->updateDescriptorSet(contextVk, mState,
1413                                              mDescriptorSets[kUniformsAndXfbDescriptorSetIndex]);
1414 }
1415 
updateTexturesDescriptorSet(ContextVk * contextVk)1416 angle::Result ProgramVk::updateTexturesDescriptorSet(ContextVk *contextVk)
1417 {
1418     const vk::TextureDescriptorDesc &texturesDesc = contextVk->getActiveTexturesDesc();
1419 
1420     auto iter = mTextureDescriptorsCache.find(texturesDesc);
1421     if (iter != mTextureDescriptorsCache.end())
1422     {
1423         mDescriptorSets[kTextureDescriptorSetIndex] = iter->second;
1424         return angle::Result::Continue;
1425     }
1426 
1427     ASSERT(hasTextures());
1428     bool newPoolAllocated;
1429     ANGLE_TRY(
1430         allocateDescriptorSetAndGetInfo(contextVk, kTextureDescriptorSetIndex, &newPoolAllocated));
1431 
1432     // Clear descriptor set cache. It may no longer be valid.
1433     if (newPoolAllocated)
1434     {
1435         mTextureDescriptorsCache.clear();
1436     }
1437 
1438     VkDescriptorSet descriptorSet = mDescriptorSets[kTextureDescriptorSetIndex];
1439 
1440     gl::ActiveTextureArray<VkDescriptorImageInfo> descriptorImageInfo;
1441     gl::ActiveTextureArray<VkWriteDescriptorSet> writeDescriptorInfo;
1442     uint32_t writeCount = 0;
1443 
1444     const gl::ActiveTextureArray<vk::TextureUnit> &activeTextures = contextVk->getActiveTextures();
1445 
1446     bool useSubgroupOps                = false;
1447     bool emulateSeamfulCubeMapSampling = contextVk->emulateSeamfulCubeMapSampling(&useSubgroupOps);
1448 
1449     for (uint32_t textureIndex = 0; textureIndex < mState.getSamplerBindings().size();
1450          ++textureIndex)
1451     {
1452         const gl::SamplerBinding &samplerBinding = mState.getSamplerBindings()[textureIndex];
1453 
1454         ASSERT(!samplerBinding.unreferenced);
1455 
1456         for (uint32_t arrayElement = 0; arrayElement < samplerBinding.boundTextureUnits.size();
1457              ++arrayElement)
1458         {
1459             GLuint textureUnit   = samplerBinding.boundTextureUnits[arrayElement];
1460             TextureVk *textureVk = activeTextures[textureUnit].texture;
1461             SamplerVk *samplerVk = activeTextures[textureUnit].sampler;
1462 
1463             vk::ImageHelper &image = textureVk->getImage();
1464 
1465             VkDescriptorImageInfo &imageInfo = descriptorImageInfo[writeCount];
1466 
1467             // Use bound sampler object if one present, otherwise use texture's sampler
1468             imageInfo.sampler = (samplerVk != nullptr) ? samplerVk->getSampler().getHandle()
1469                                                        : textureVk->getSampler().getHandle();
1470             imageInfo.imageView   = textureVk->getReadImageView().getHandle();
1471             imageInfo.imageLayout = image.getCurrentLayout();
1472 
1473             if (emulateSeamfulCubeMapSampling)
1474             {
1475                 // If emulating seamful cubemapping, use the fetch image view.  This is basically
1476                 // the same image view as read, except it's a 2DArray view for cube maps.
1477                 imageInfo.imageView = textureVk->getFetchImageView().getHandle();
1478             }
1479 
1480             VkWriteDescriptorSet &writeInfo = writeDescriptorInfo[writeCount];
1481 
1482             writeInfo.sType            = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1483             writeInfo.pNext            = nullptr;
1484             writeInfo.dstSet           = descriptorSet;
1485             writeInfo.dstBinding       = textureIndex;
1486             writeInfo.dstArrayElement  = arrayElement;
1487             writeInfo.descriptorCount  = 1;
1488             writeInfo.descriptorType   = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1489             writeInfo.pImageInfo       = &imageInfo;
1490             writeInfo.pBufferInfo      = nullptr;
1491             writeInfo.pTexelBufferView = nullptr;
1492 
1493             ++writeCount;
1494         }
1495     }
1496 
1497     VkDevice device = contextVk->getDevice();
1498 
1499     ASSERT(writeCount > 0);
1500 
1501     vkUpdateDescriptorSets(device, writeCount, writeDescriptorInfo.data(), 0, nullptr);
1502 
1503     mTextureDescriptorsCache.emplace(texturesDesc, descriptorSet);
1504 
1505     return angle::Result::Continue;
1506 }
1507 
setDefaultUniformBlocksMinSizeForTesting(size_t minSize)1508 void ProgramVk::setDefaultUniformBlocksMinSizeForTesting(size_t minSize)
1509 {
1510     for (DefaultUniformBlock &block : mDefaultUniformBlocks)
1511     {
1512         block.storage.setMinimumSizeForTesting(minSize);
1513     }
1514 }
1515 
updateDescriptorSets(ContextVk * contextVk,vk::CommandBuffer * commandBuffer)1516 angle::Result ProgramVk::updateDescriptorSets(ContextVk *contextVk,
1517                                               vk::CommandBuffer *commandBuffer)
1518 {
1519     // Can probably use better dirty bits here.
1520 
1521     if (mDescriptorSets.empty())
1522         return angle::Result::Continue;
1523 
1524     // Find the maximum non-null descriptor set.  This is used in conjunction with a driver
1525     // workaround to bind empty descriptor sets only for gaps in between 0 and max and avoid
1526     // binding unnecessary empty descriptor sets for the sets beyond max.
1527     size_t descriptorSetRange = 0;
1528     for (size_t descriptorSetIndex = 0; descriptorSetIndex < mDescriptorSets.size();
1529          ++descriptorSetIndex)
1530     {
1531         if (mDescriptorSets[descriptorSetIndex] != VK_NULL_HANDLE)
1532         {
1533             descriptorSetRange = descriptorSetIndex + 1;
1534         }
1535     }
1536 
1537     const VkPipelineBindPoint pipelineBindPoint =
1538         mState.isCompute() ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
1539 
1540     for (uint32_t descriptorSetIndex = 0; descriptorSetIndex < descriptorSetRange;
1541          ++descriptorSetIndex)
1542     {
1543         VkDescriptorSet descSet = mDescriptorSets[descriptorSetIndex];
1544         if (descSet == VK_NULL_HANDLE)
1545         {
1546             if (!contextVk->getRenderer()->getFeatures().bindEmptyForUnusedDescriptorSets.enabled)
1547             {
1548                 continue;
1549             }
1550 
1551             // Workaround a driver bug where missing (though unused) descriptor sets indices cause
1552             // later sets to misbehave.
1553             if (mEmptyDescriptorSets[descriptorSetIndex] == VK_NULL_HANDLE)
1554             {
1555                 const vk::DescriptorSetLayout &descriptorSetLayout =
1556                     mDescriptorSetLayouts[descriptorSetIndex].get();
1557 
1558                 ANGLE_TRY(mDynamicDescriptorPools[descriptorSetIndex].allocateSets(
1559                     contextVk, descriptorSetLayout.ptr(), 1,
1560                     &mDescriptorPoolBindings[descriptorSetIndex],
1561                     &mEmptyDescriptorSets[descriptorSetIndex]));
1562             }
1563             descSet = mEmptyDescriptorSets[descriptorSetIndex];
1564         }
1565 
1566         // Default uniforms are encompassed in a block per shader stage, and they are assigned
1567         // through dynamic uniform buffers (requiring dynamic offsets).  No other descriptor
1568         // requires a dynamic offset.
1569         const uint32_t uniformBlockOffsetCount =
1570             descriptorSetIndex == kUniformsAndXfbDescriptorSetIndex
1571                 ? static_cast<uint32_t>(mDynamicBufferOffsets.size())
1572                 : 0;
1573 
1574         commandBuffer->bindDescriptorSets(mPipelineLayout.get(), pipelineBindPoint,
1575                                           descriptorSetIndex, 1, &descSet, uniformBlockOffsetCount,
1576                                           mDynamicBufferOffsets.data());
1577     }
1578 
1579     return angle::Result::Continue;
1580 }
1581 }  // namespace rx
1582