• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2020 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ProgramExecutableVk.cpp: Collects the information and interfaces common to both ProgramVks and
7 // ProgramPipelineVks in order to execute/draw with either.
8 
9 #include "libANGLE/renderer/vulkan/ProgramExecutableVk.h"
10 
11 #include "common/string_utils.h"
12 #include "libANGLE/renderer/vulkan/BufferVk.h"
13 #include "libANGLE/renderer/vulkan/DisplayVk.h"
14 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
15 #include "libANGLE/renderer/vulkan/ProgramPipelineVk.h"
16 #include "libANGLE/renderer/vulkan/ProgramVk.h"
17 #include "libANGLE/renderer/vulkan/TextureVk.h"
18 #include "libANGLE/renderer/vulkan/TransformFeedbackVk.h"
19 #include "libANGLE/renderer/vulkan/vk_helpers.h"
20 #include "libANGLE/renderer/vulkan/vk_utils.h"
21 
22 namespace rx
23 {
24 namespace
25 {
GetGraphicsProgramIndex(ProgramTransformOptions transformOptions)26 uint8_t GetGraphicsProgramIndex(ProgramTransformOptions transformOptions)
27 {
28     return gl::bitCast<uint8_t, ProgramTransformOptions>(transformOptions);
29 }
30 
LoadShaderInterfaceVariableXfbInfo(gl::BinaryInputStream * stream,ShaderInterfaceVariableXfbInfo * xfb)31 void LoadShaderInterfaceVariableXfbInfo(gl::BinaryInputStream *stream,
32                                         ShaderInterfaceVariableXfbInfo *xfb)
33 {
34     xfb->buffer        = stream->readInt<uint32_t>();
35     xfb->offset        = stream->readInt<uint32_t>();
36     xfb->stride        = stream->readInt<uint32_t>();
37     xfb->arraySize     = stream->readInt<uint32_t>();
38     xfb->columnCount   = stream->readInt<uint32_t>();
39     xfb->rowCount      = stream->readInt<uint32_t>();
40     xfb->arrayIndex    = stream->readInt<uint32_t>();
41     xfb->componentType = stream->readInt<uint32_t>();
42     xfb->arrayElements.resize(stream->readInt<size_t>());
43     for (ShaderInterfaceVariableXfbInfo &arrayElement : xfb->arrayElements)
44     {
45         LoadShaderInterfaceVariableXfbInfo(stream, &arrayElement);
46     }
47 }
48 
SaveShaderInterfaceVariableXfbInfo(const ShaderInterfaceVariableXfbInfo & xfb,gl::BinaryOutputStream * stream)49 void SaveShaderInterfaceVariableXfbInfo(const ShaderInterfaceVariableXfbInfo &xfb,
50                                         gl::BinaryOutputStream *stream)
51 {
52     stream->writeInt(xfb.buffer);
53     stream->writeInt(xfb.offset);
54     stream->writeInt(xfb.stride);
55     stream->writeInt(xfb.arraySize);
56     stream->writeInt(xfb.columnCount);
57     stream->writeInt(xfb.rowCount);
58     stream->writeInt(xfb.arrayIndex);
59     stream->writeInt(xfb.componentType);
60     stream->writeInt(xfb.arrayElements.size());
61     for (const ShaderInterfaceVariableXfbInfo &arrayElement : xfb.arrayElements)
62     {
63         SaveShaderInterfaceVariableXfbInfo(arrayElement, stream);
64     }
65 }
66 
ValidateTransformedSpirV(const ContextVk * contextVk,const gl::ShaderBitSet & linkedShaderStages,const ShaderInterfaceVariableInfoMap & variableInfoMap,const gl::ShaderMap<angle::spirv::Blob> & spirvBlobs)67 bool ValidateTransformedSpirV(const ContextVk *contextVk,
68                               const gl::ShaderBitSet &linkedShaderStages,
69                               const ShaderInterfaceVariableInfoMap &variableInfoMap,
70                               const gl::ShaderMap<angle::spirv::Blob> &spirvBlobs)
71 {
72     gl::ShaderType lastPreFragmentStage = gl::GetLastPreFragmentStage(linkedShaderStages);
73 
74     for (gl::ShaderType shaderType : linkedShaderStages)
75     {
76         SpvTransformOptions options;
77         options.shaderType                = shaderType;
78         options.negativeViewportSupported = false;
79         options.isLastPreFragmentStage =
80             shaderType == lastPreFragmentStage && shaderType != gl::ShaderType::TessControl;
81         options.isTransformFeedbackStage = options.isLastPreFragmentStage;
82         options.useSpirvVaryingPrecisionFixer =
83             contextVk->getFeatures().varyingsRequireMatchingPrecisionInSpirv.enabled;
84 
85         angle::spirv::Blob transformed;
86         if (SpvTransformSpirvCode(options, variableInfoMap, spirvBlobs[shaderType], &transformed) !=
87             angle::Result::Continue)
88         {
89             return false;
90         }
91     }
92     return true;
93 }
94 
GetInterfaceBlockArraySize(const std::vector<gl::InterfaceBlock> & blocks,uint32_t bufferIndex)95 uint32_t GetInterfaceBlockArraySize(const std::vector<gl::InterfaceBlock> &blocks,
96                                     uint32_t bufferIndex)
97 {
98     const gl::InterfaceBlock &block = blocks[bufferIndex];
99 
100     if (!block.isArray)
101     {
102         return 1;
103     }
104 
105     ASSERT(block.arrayElement == 0);
106 
107     // Search consecutively until all array indices of this block are visited.
108     uint32_t arraySize;
109     for (arraySize = 1; bufferIndex + arraySize < blocks.size(); ++arraySize)
110     {
111         const gl::InterfaceBlock &nextBlock = blocks[bufferIndex + arraySize];
112 
113         if (nextBlock.arrayElement != arraySize)
114         {
115             break;
116         }
117 
118         // It's unexpected for an array to start at a non-zero array size, so we can always rely on
119         // the sequential `arrayElement`s to belong to the same block.
120         ASSERT(nextBlock.name == block.name);
121         ASSERT(nextBlock.isArray);
122     }
123 
124     return arraySize;
125 }
126 
SetupDefaultPipelineState(const ContextVk * contextVk,const gl::ProgramExecutable & glExecutable,gl::PrimitiveMode mode,vk::GraphicsPipelineDesc * graphicsPipelineDescOut)127 void SetupDefaultPipelineState(const ContextVk *contextVk,
128                                const gl::ProgramExecutable &glExecutable,
129                                gl::PrimitiveMode mode,
130                                vk::GraphicsPipelineDesc *graphicsPipelineDescOut)
131 {
132     graphicsPipelineDescOut->initDefaults(contextVk, vk::GraphicsPipelineSubset::Complete);
133     graphicsPipelineDescOut->setTopology(mode);
134     graphicsPipelineDescOut->setRenderPassSampleCount(1);
135     graphicsPipelineDescOut->setRenderPassFramebufferFetchMode(glExecutable.usesFramebufferFetch());
136 
137     graphicsPipelineDescOut->setVertexShaderComponentTypes(
138         glExecutable.getNonBuiltinAttribLocationsMask(), glExecutable.getAttributesTypeMask());
139 
140     const std::vector<sh::ShaderVariable> &outputVariables   = glExecutable.getOutputVariables();
141     const std::vector<gl::VariableLocation> &outputLocations = glExecutable.getOutputLocations();
142 
143     for (const gl::VariableLocation &outputLocation : outputLocations)
144     {
145         if (outputLocation.arrayIndex == 0 && outputLocation.used() && !outputLocation.ignored)
146         {
147             const sh::ShaderVariable &outputVar = outputVariables[outputLocation.index];
148 
149             if (angle::BeginsWith(outputVar.name, "gl_") && outputVar.name != "gl_FragColor")
150             {
151                 continue;
152             }
153 
154             uint32_t location = 0;
155             if (outputVar.location != -1)
156             {
157                 location = outputVar.location;
158             }
159 
160             GLenum type            = gl::VariableComponentType(outputVar.type);
161             angle::FormatID format = angle::FormatID::R8G8B8A8_UNORM;
162             if (type == GL_INT)
163             {
164                 format = angle::FormatID::R8G8B8A8_SINT;
165             }
166             else if (type == GL_UNSIGNED_INT)
167             {
168                 format = angle::FormatID::R8G8B8A8_UINT;
169             }
170 
171             const size_t arraySize = outputVar.isArray() ? outputVar.getOutermostArraySize() : 1;
172             for (size_t arrayIndex = 0; arrayIndex < arraySize; ++arrayIndex)
173             {
174                 graphicsPipelineDescOut->setRenderPassColorAttachmentFormat(location + arrayIndex,
175                                                                             format);
176             }
177         }
178     }
179 
180     for (const sh::ShaderVariable &outputVar : outputVariables)
181     {
182         if (outputVar.name == "gl_FragColor" || outputVar.name == "gl_FragData")
183         {
184             const size_t arraySize = outputVar.isArray() ? outputVar.getOutermostArraySize() : 1;
185             for (size_t arrayIndex = 0; arrayIndex < arraySize; ++arrayIndex)
186             {
187                 graphicsPipelineDescOut->setRenderPassColorAttachmentFormat(
188                     arrayIndex, angle::FormatID::R8G8B8A8_UNORM);
189             }
190         }
191     }
192 }
193 
GetPipelineCacheData(ContextVk * contextVk,const vk::PipelineCache & pipelineCache,angle::MemoryBuffer * cacheDataOut)194 void GetPipelineCacheData(ContextVk *contextVk,
195                           const vk::PipelineCache &pipelineCache,
196                           angle::MemoryBuffer *cacheDataOut)
197 {
198     ASSERT(pipelineCache.valid() || contextVk->getState().isGLES1() ||
199            !contextVk->getFeatures().warmUpPipelineCacheAtLink.enabled ||
200            !contextVk->getFeatures().hasEffectivePipelineCacheSerialization.enabled);
201     if (!pipelineCache.valid() ||
202         !contextVk->getFeatures().hasEffectivePipelineCacheSerialization.enabled)
203     {
204         return;
205     }
206 
207     // Extract the pipeline data.  If failed, or empty, it's simply not stored on disk.
208     size_t pipelineCacheSize = 0;
209     VkResult result =
210         pipelineCache.getCacheData(contextVk->getDevice(), &pipelineCacheSize, nullptr);
211     if (result != VK_SUCCESS || pipelineCacheSize == 0)
212     {
213         return;
214     }
215 
216     if (contextVk->getFeatures().enablePipelineCacheDataCompression.enabled)
217     {
218         std::vector<uint8_t> pipelineCacheData(pipelineCacheSize);
219         result = pipelineCache.getCacheData(contextVk->getDevice(), &pipelineCacheSize,
220                                             pipelineCacheData.data());
221         if (result != VK_SUCCESS && result != VK_INCOMPLETE)
222         {
223             return;
224         }
225 
226         // Compress it.
227         if (!egl::CompressBlobCacheData(pipelineCacheData.size(), pipelineCacheData.data(),
228                                         cacheDataOut))
229         {
230             cacheDataOut->clear();
231         }
232     }
233     else
234     {
235         if (!cacheDataOut->resize(pipelineCacheSize))
236         {
237             ERR() << "Failed to allocate memory for pipeline cache data.";
238             return;
239         }
240         result = pipelineCache.getCacheData(contextVk->getDevice(), &pipelineCacheSize,
241                                             cacheDataOut->data());
242         if (result != VK_SUCCESS && result != VK_INCOMPLETE)
243         {
244             cacheDataOut->clear();
245         }
246     }
247 }
248 
MakeSpecConsts(ProgramTransformOptions transformOptions,const vk::GraphicsPipelineDesc & desc)249 vk::SpecializationConstants MakeSpecConsts(ProgramTransformOptions transformOptions,
250                                            const vk::GraphicsPipelineDesc &desc)
251 {
252     vk::SpecializationConstants specConsts;
253 
254     specConsts.surfaceRotation = transformOptions.surfaceRotation;
255     specConsts.dither          = desc.getEmulatedDitherControl();
256 
257     return specConsts;
258 }
259 }  // namespace
260 
261 DefaultUniformBlock::DefaultUniformBlock() = default;
262 
263 DefaultUniformBlock::~DefaultUniformBlock() = default;
264 
265 // ShaderInfo implementation.
ShaderInfo()266 ShaderInfo::ShaderInfo() {}
267 
268 ShaderInfo::~ShaderInfo() = default;
269 
initShaders(ContextVk * contextVk,const gl::ShaderBitSet & linkedShaderStages,const gl::ShaderMap<const angle::spirv::Blob * > & spirvBlobs,const ShaderInterfaceVariableInfoMap & variableInfoMap)270 angle::Result ShaderInfo::initShaders(ContextVk *contextVk,
271                                       const gl::ShaderBitSet &linkedShaderStages,
272                                       const gl::ShaderMap<const angle::spirv::Blob *> &spirvBlobs,
273                                       const ShaderInterfaceVariableInfoMap &variableInfoMap)
274 {
275     clear();
276 
277     for (gl::ShaderType shaderType : gl::AllShaderTypes())
278     {
279         if (spirvBlobs[shaderType] != nullptr)
280         {
281             mSpirvBlobs[shaderType] = *spirvBlobs[shaderType];
282         }
283     }
284 
285     // Assert that SPIR-V transformation is correct, even if the test never issues a draw call.
286     // Don't validate GLES1 programs because they are always created right before a draw, so they
287     // will naturally be validated.  This improves GLES1 test run times.
288     if (!contextVk->getState().isGLES1())
289     {
290         ASSERT(
291             ValidateTransformedSpirV(contextVk, linkedShaderStages, variableInfoMap, mSpirvBlobs));
292     }
293 
294     mIsInitialized = true;
295     return angle::Result::Continue;
296 }
297 
initShaderFromProgram(gl::ShaderType shaderType,const ShaderInfo & programShaderInfo)298 void ShaderInfo::initShaderFromProgram(gl::ShaderType shaderType,
299                                        const ShaderInfo &programShaderInfo)
300 {
301     mSpirvBlobs[shaderType] = programShaderInfo.mSpirvBlobs[shaderType];
302     mIsInitialized          = true;
303 }
304 
clear()305 void ShaderInfo::clear()
306 {
307     for (angle::spirv::Blob &spirvBlob : mSpirvBlobs)
308     {
309         spirvBlob.clear();
310     }
311     mIsInitialized = false;
312 }
313 
load(gl::BinaryInputStream * stream)314 void ShaderInfo::load(gl::BinaryInputStream *stream)
315 {
316     clear();
317 
318     // Read in shader codes for all shader types
319     for (gl::ShaderType shaderType : gl::AllShaderTypes())
320     {
321         angle::spirv::Blob *spirvBlob = &mSpirvBlobs[shaderType];
322 
323         // Read the SPIR-V
324         stream->readIntVector<uint32_t>(spirvBlob);
325     }
326 
327     mIsInitialized = true;
328 }
329 
save(gl::BinaryOutputStream * stream)330 void ShaderInfo::save(gl::BinaryOutputStream *stream)
331 {
332     ASSERT(valid());
333 
334     // Write out shader codes for all shader types
335     for (gl::ShaderType shaderType : gl::AllShaderTypes())
336     {
337         const angle::spirv::Blob &spirvBlob = mSpirvBlobs[shaderType];
338 
339         // Write the SPIR-V
340         stream->writeIntVector(spirvBlob);
341     }
342 }
343 
344 // ProgramInfo implementation.
ProgramInfo()345 ProgramInfo::ProgramInfo() {}
346 
347 ProgramInfo::~ProgramInfo() = default;
348 
initProgram(ContextVk * contextVk,gl::ShaderType shaderType,bool isLastPreFragmentStage,bool isTransformFeedbackProgram,const ShaderInfo & shaderInfo,ProgramTransformOptions optionBits,const ShaderInterfaceVariableInfoMap & variableInfoMap)349 angle::Result ProgramInfo::initProgram(ContextVk *contextVk,
350                                        gl::ShaderType shaderType,
351                                        bool isLastPreFragmentStage,
352                                        bool isTransformFeedbackProgram,
353                                        const ShaderInfo &shaderInfo,
354                                        ProgramTransformOptions optionBits,
355                                        const ShaderInterfaceVariableInfoMap &variableInfoMap)
356 {
357     const gl::ShaderMap<angle::spirv::Blob> &originalSpirvBlobs = shaderInfo.getSpirvBlobs();
358     const angle::spirv::Blob &originalSpirvBlob                 = originalSpirvBlobs[shaderType];
359     gl::ShaderMap<angle::spirv::Blob> transformedSpirvBlobs;
360     angle::spirv::Blob &transformedSpirvBlob = transformedSpirvBlobs[shaderType];
361 
362     SpvTransformOptions options;
363     options.shaderType               = shaderType;
364     options.isLastPreFragmentStage   = isLastPreFragmentStage;
365     options.isTransformFeedbackStage = isLastPreFragmentStage && isTransformFeedbackProgram &&
366                                        !optionBits.removeTransformFeedbackEmulation;
367     options.isTransformFeedbackEmulated = contextVk->getFeatures().emulateTransformFeedback.enabled;
368     options.negativeViewportSupported   = contextVk->getFeatures().supportsNegativeViewport.enabled;
369     options.isMultisampledFramebufferFetch =
370         optionBits.multiSampleFramebufferFetch && shaderType == gl::ShaderType::Fragment;
371     options.enableSampleShading = optionBits.enableSampleShading;
372 
373     // Don't validate SPIR-V generated for GLES1 shaders when validation layers are enabled.  The
374     // layers already validate SPIR-V, and since GLES1 shaders are controlled by ANGLE, they don't
375     // typically require debugging at the SPIR-V level.  This improves GLES1 conformance test run
376     // time.
377     options.validate =
378         !(contextVk->getState().isGLES1() && contextVk->getRenderer()->getEnableValidationLayers());
379 
380     options.useSpirvVaryingPrecisionFixer =
381         contextVk->getFeatures().varyingsRequireMatchingPrecisionInSpirv.enabled;
382 
383     ANGLE_TRY(
384         SpvTransformSpirvCode(options, variableInfoMap, originalSpirvBlob, &transformedSpirvBlob));
385     ANGLE_TRY(vk::InitShaderModule(contextVk, &mShaders[shaderType].get(),
386                                    transformedSpirvBlob.data(),
387                                    transformedSpirvBlob.size() * sizeof(uint32_t)));
388 
389     mProgramHelper.setShader(shaderType, &mShaders[shaderType]);
390 
391     return angle::Result::Continue;
392 }
393 
release(ContextVk * contextVk)394 void ProgramInfo::release(ContextVk *contextVk)
395 {
396     mProgramHelper.release(contextVk);
397 
398     for (vk::RefCounted<vk::ShaderModule> &shader : mShaders)
399     {
400         shader.get().destroy(contextVk->getDevice());
401     }
402 }
403 
ProgramExecutableVk()404 ProgramExecutableVk::ProgramExecutableVk()
405     : mNumDefaultUniformDescriptors(0),
406       mImmutableSamplersMaxDescriptorCount(1),
407       mUniformBufferDescriptorType(VK_DESCRIPTOR_TYPE_MAX_ENUM),
408       mDynamicUniformDescriptorOffsets{}
409 {
410     for (std::shared_ptr<DefaultUniformBlock> &defaultBlock : mDefaultUniformBlocks)
411     {
412         defaultBlock = std::make_shared<DefaultUniformBlock>();
413     }
414 }
415 
~ProgramExecutableVk()416 ProgramExecutableVk::~ProgramExecutableVk()
417 {
418     ASSERT(!mPipelineCache.valid());
419 }
420 
resetLayout(ContextVk * contextVk)421 void ProgramExecutableVk::resetLayout(ContextVk *contextVk)
422 {
423     for (auto &descriptorSetLayout : mDescriptorSetLayouts)
424     {
425         descriptorSetLayout.reset();
426     }
427     mImmutableSamplersMaxDescriptorCount = 1;
428     mImmutableSamplerIndexMap.clear();
429 
430     mDescriptorSets.fill(VK_NULL_HANDLE);
431     mNumDefaultUniformDescriptors = 0;
432 
433     for (vk::RefCountedDescriptorPoolBinding &binding : mDescriptorPoolBindings)
434     {
435         binding.reset();
436     }
437 
438     for (vk::DescriptorPoolPointer &pool : mDescriptorPools)
439     {
440         pool.reset();
441     }
442 
443     // Initialize with an invalid BufferSerial
444     mCurrentDefaultUniformBufferSerial = vk::BufferSerial();
445 
446     for (CompleteGraphicsPipelineCache &pipelines : mCompleteGraphicsPipelines)
447     {
448         pipelines.release(contextVk);
449     }
450     for (ShadersGraphicsPipelineCache &pipelines : mShadersGraphicsPipelines)
451     {
452         pipelines.release(contextVk);
453     }
454     for (vk::PipelineHelper &pipeline : mComputePipelines)
455     {
456         pipeline.release(contextVk);
457     }
458 
459     // Program infos and pipeline layout must be released after pipelines are; they might be having
460     // pending jobs that are referencing them.
461     for (ProgramInfo &programInfo : mGraphicsProgramInfos)
462     {
463         programInfo.release(contextVk);
464     }
465     mComputeProgramInfo.release(contextVk);
466 
467     mPipelineLayout.reset();
468 
469     contextVk->onProgramExecutableReset(this);
470 }
471 
reset(ContextVk * contextVk)472 void ProgramExecutableVk::reset(ContextVk *contextVk)
473 {
474     resetLayout(contextVk);
475 
476     if (mPipelineCache.valid())
477     {
478         mPipelineCache.destroy(contextVk->getDevice());
479     }
480 }
481 
initializePipelineCache(ContextVk * contextVk,bool compressed,const std::vector<uint8_t> & pipelineData)482 angle::Result ProgramExecutableVk::initializePipelineCache(ContextVk *contextVk,
483                                                            bool compressed,
484                                                            const std::vector<uint8_t> &pipelineData)
485 {
486     ASSERT(!mPipelineCache.valid());
487 
488     size_t dataSize            = pipelineData.size();
489     const uint8_t *dataPointer = pipelineData.data();
490 
491     angle::MemoryBuffer uncompressedData;
492     if (compressed)
493     {
494         if (!egl::DecompressBlobCacheData(dataPointer, dataSize, &uncompressedData))
495         {
496             return angle::Result::Stop;
497         }
498         dataSize    = uncompressedData.size();
499         dataPointer = uncompressedData.data();
500     }
501 
502     VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {};
503     pipelineCacheCreateInfo.sType           = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
504     pipelineCacheCreateInfo.initialDataSize = dataSize;
505     pipelineCacheCreateInfo.pInitialData    = dataPointer;
506 
507     if (contextVk->getFeatures().supportsPipelineCreationCacheControl.enabled)
508     {
509         pipelineCacheCreateInfo.flags |= VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT;
510     }
511 
512     ANGLE_VK_TRY(contextVk, mPipelineCache.init(contextVk->getDevice(), pipelineCacheCreateInfo));
513 
514     // Merge the pipeline cache into RendererVk's.
515     if (contextVk->getFeatures().mergeProgramPipelineCachesToGlobalCache.enabled)
516     {
517         ANGLE_TRY(contextVk->getRenderer()->mergeIntoPipelineCache(mPipelineCache));
518     }
519 
520     return angle::Result::Continue;
521 }
522 
ensurePipelineCacheInitialized(ContextVk * contextVk)523 angle::Result ProgramExecutableVk::ensurePipelineCacheInitialized(ContextVk *contextVk)
524 {
525     if (!mPipelineCache.valid())
526     {
527         VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {};
528         pipelineCacheCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
529 
530         if (contextVk->getFeatures().supportsPipelineCreationCacheControl.enabled)
531         {
532             pipelineCacheCreateInfo.flags |=
533                 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT;
534         }
535 
536         ANGLE_VK_TRY(contextVk,
537                      mPipelineCache.init(contextVk->getDevice(), pipelineCacheCreateInfo));
538     }
539 
540     return angle::Result::Continue;
541 }
542 
load(ContextVk * contextVk,const gl::ProgramExecutable & glExecutable,bool isSeparable,gl::BinaryInputStream * stream)543 std::unique_ptr<rx::LinkEvent> ProgramExecutableVk::load(ContextVk *contextVk,
544                                                          const gl::ProgramExecutable &glExecutable,
545                                                          bool isSeparable,
546                                                          gl::BinaryInputStream *stream)
547 {
548     ShaderInterfaceVariableInfoMap::VariableInfoArray data;
549     gl::ShaderMap<ShaderInterfaceVariableInfoMap::IdToIndexMap> idToIndexMap;
550     gl::ShaderMap<gl::PerVertexMemberBitSet> inputPerVertexActiveMembers;
551     gl::ShaderMap<gl::PerVertexMemberBitSet> outputPerVertexActiveMembers;
552 
553     for (gl::ShaderType shaderType : gl::AllShaderTypes())
554     {
555         size_t idCount = stream->readInt<size_t>();
556         for (uint32_t id = 0; id < idCount; ++id)
557         {
558             uint32_t index               = stream->readInt<uint32_t>();
559             idToIndexMap[shaderType][id] = {index};
560         }
561     }
562 
563     size_t dataSize = stream->readInt<size_t>();
564     for (size_t infoIndex = 0; infoIndex < dataSize; ++infoIndex)
565     {
566         ShaderInterfaceVariableInfo info;
567 
568         info.descriptorSet = stream->readInt<uint32_t>();
569         info.binding       = stream->readInt<uint32_t>();
570         info.location      = stream->readInt<uint32_t>();
571         info.component     = stream->readInt<uint32_t>();
572         info.index         = stream->readInt<uint32_t>();
573         // PackedEnumBitSet uses uint8_t
574         info.activeStages = gl::ShaderBitSet(stream->readInt<uint8_t>());
575         LoadShaderInterfaceVariableXfbInfo(stream, &info.xfb);
576         info.fieldXfb.resize(stream->readInt<size_t>());
577         for (ShaderInterfaceVariableXfbInfo &xfb : info.fieldXfb)
578         {
579             LoadShaderInterfaceVariableXfbInfo(stream, &xfb);
580         }
581         info.useRelaxedPrecision     = stream->readBool();
582         info.varyingIsInput          = stream->readBool();
583         info.varyingIsOutput         = stream->readBool();
584         info.attributeComponentCount = stream->readInt<uint8_t>();
585         info.attributeLocationCount  = stream->readInt<uint8_t>();
586 
587         data.push_back(info);
588     }
589 
590     outputPerVertexActiveMembers[gl::ShaderType::Vertex] =
591         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
592     inputPerVertexActiveMembers[gl::ShaderType::TessControl] =
593         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
594     outputPerVertexActiveMembers[gl::ShaderType::TessControl] =
595         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
596     inputPerVertexActiveMembers[gl::ShaderType::TessEvaluation] =
597         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
598     outputPerVertexActiveMembers[gl::ShaderType::TessEvaluation] =
599         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
600     inputPerVertexActiveMembers[gl::ShaderType::Geometry] =
601         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
602     outputPerVertexActiveMembers[gl::ShaderType::Geometry] =
603         gl::PerVertexMemberBitSet(stream->readInt<uint8_t>());
604 
605     mVariableInfoMap.load(std::move(data), std::move(idToIndexMap),
606                           std::move(inputPerVertexActiveMembers),
607                           std::move(outputPerVertexActiveMembers));
608 
609     mOriginalShaderInfo.load(stream);
610 
611     // Deserializes the uniformLayout data of mDefaultUniformBlocks
612     for (gl::ShaderType shaderType : gl::AllShaderTypes())
613     {
614         const size_t uniformCount = stream->readInt<size_t>();
615         for (unsigned int uniformIndex = 0; uniformIndex < uniformCount; ++uniformIndex)
616         {
617             sh::BlockMemberInfo blockInfo;
618             gl::LoadBlockMemberInfo(stream, &blockInfo);
619             mDefaultUniformBlocks[shaderType]->uniformLayout.push_back(blockInfo);
620         }
621     }
622 
623     gl::ShaderMap<size_t> requiredBufferSize;
624     requiredBufferSize.fill(0);
625     // Deserializes required uniform block memory sizes
626     for (gl::ShaderType shaderType : gl::AllShaderTypes())
627     {
628         requiredBufferSize[shaderType] = stream->readInt<size_t>();
629     }
630 
631     if (!isSeparable)
632     {
633         size_t compressedPipelineDataSize = 0;
634         stream->readInt<size_t>(&compressedPipelineDataSize);
635 
636         std::vector<uint8_t> compressedPipelineData(compressedPipelineDataSize);
637         if (compressedPipelineDataSize > 0)
638         {
639             bool compressedData = false;
640             stream->readBool(&compressedData);
641             stream->readBytes(compressedPipelineData.data(), compressedPipelineDataSize);
642             // Initialize the pipeline cache based on cached data.
643             angle::Result status =
644                 initializePipelineCache(contextVk, compressedData, compressedPipelineData);
645             if (status != angle::Result::Continue)
646             {
647                 return std::make_unique<LinkEventDone>(status);
648             }
649         }
650     }
651 
652     // Initialize and resize the mDefaultUniformBlocks' memory
653     angle::Result status = resizeUniformBlockMemory(contextVk, glExecutable, requiredBufferSize);
654     if (status != angle::Result::Continue)
655     {
656         return std::make_unique<LinkEventDone>(status);
657     }
658 
659     status = createPipelineLayout(contextVk, glExecutable, nullptr);
660     return std::make_unique<LinkEventDone>(status);
661 }
662 
save(ContextVk * contextVk,bool isSeparable,gl::BinaryOutputStream * stream)663 void ProgramExecutableVk::save(ContextVk *contextVk,
664                                bool isSeparable,
665                                gl::BinaryOutputStream *stream)
666 {
667     const ShaderInterfaceVariableInfoMap::VariableInfoArray &data = mVariableInfoMap.getData();
668     const gl::ShaderMap<ShaderInterfaceVariableInfoMap::IdToIndexMap> &idToIndexMap =
669         mVariableInfoMap.getIdToIndexMap();
670     const gl::ShaderMap<gl::PerVertexMemberBitSet> &inputPerVertexActiveMembers =
671         mVariableInfoMap.getInputPerVertexActiveMembers();
672     const gl::ShaderMap<gl::PerVertexMemberBitSet> &outputPerVertexActiveMembers =
673         mVariableInfoMap.getOutputPerVertexActiveMembers();
674 
675     for (gl::ShaderType shaderType : gl::AllShaderTypes())
676     {
677         stream->writeInt(idToIndexMap[shaderType].size());
678         for (const VariableIndex &variableIndex : idToIndexMap[shaderType])
679         {
680             stream->writeInt(variableIndex.index);
681         }
682     }
683 
684     stream->writeInt(data.size());
685     for (const ShaderInterfaceVariableInfo &info : data)
686     {
687         stream->writeInt(info.descriptorSet);
688         stream->writeInt(info.binding);
689         stream->writeInt(info.location);
690         stream->writeInt(info.component);
691         stream->writeInt(info.index);
692         // PackedEnumBitSet uses uint8_t
693         stream->writeInt(info.activeStages.bits());
694         SaveShaderInterfaceVariableXfbInfo(info.xfb, stream);
695         stream->writeInt(info.fieldXfb.size());
696         for (const ShaderInterfaceVariableXfbInfo &xfb : info.fieldXfb)
697         {
698             SaveShaderInterfaceVariableXfbInfo(xfb, stream);
699         }
700         stream->writeBool(info.useRelaxedPrecision);
701         stream->writeBool(info.varyingIsInput);
702         stream->writeBool(info.varyingIsOutput);
703         stream->writeInt(info.attributeComponentCount);
704         stream->writeInt(info.attributeLocationCount);
705     }
706 
707     // Store gl_PerVertex members only for stages that have it.
708     stream->writeInt(outputPerVertexActiveMembers[gl::ShaderType::Vertex].bits());
709     stream->writeInt(inputPerVertexActiveMembers[gl::ShaderType::TessControl].bits());
710     stream->writeInt(outputPerVertexActiveMembers[gl::ShaderType::TessControl].bits());
711     stream->writeInt(inputPerVertexActiveMembers[gl::ShaderType::TessEvaluation].bits());
712     stream->writeInt(outputPerVertexActiveMembers[gl::ShaderType::TessEvaluation].bits());
713     stream->writeInt(inputPerVertexActiveMembers[gl::ShaderType::Geometry].bits());
714     stream->writeInt(outputPerVertexActiveMembers[gl::ShaderType::Geometry].bits());
715 
716     mOriginalShaderInfo.save(stream);
717 
718     // Serializes the uniformLayout data of mDefaultUniformBlocks
719     for (gl::ShaderType shaderType : gl::AllShaderTypes())
720     {
721         const size_t uniformCount = mDefaultUniformBlocks[shaderType]->uniformLayout.size();
722         stream->writeInt(uniformCount);
723         for (unsigned int uniformIndex = 0; uniformIndex < uniformCount; ++uniformIndex)
724         {
725             sh::BlockMemberInfo &blockInfo =
726                 mDefaultUniformBlocks[shaderType]->uniformLayout[uniformIndex];
727             gl::WriteBlockMemberInfo(stream, blockInfo);
728         }
729     }
730 
731     // Serializes required uniform block memory sizes
732     for (gl::ShaderType shaderType : gl::AllShaderTypes())
733     {
734         stream->writeInt(mDefaultUniformBlocks[shaderType]->uniformData.size());
735     }
736 
737     // Compress and save mPipelineCache.  Separable programs don't warm up the cache, while program
738     // pipelines do.  However, currently ANGLE doesn't sync program pipelines to cache.  ANGLE could
739     // potentially use VK_EXT_graphics_pipeline_library to create separate pipelines for
740     // pre-rasterization and fragment subsets, but currently those subsets are bundled together.
741     if (!isSeparable)
742     {
743         angle::MemoryBuffer cacheData;
744 
745         GetPipelineCacheData(contextVk, mPipelineCache, &cacheData);
746         stream->writeInt(cacheData.size());
747         if (cacheData.size() > 0)
748         {
749             stream->writeBool(contextVk->getFeatures().enablePipelineCacheDataCompression.enabled);
750             stream->writeBytes(cacheData.data(), cacheData.size());
751         }
752     }
753 }
754 
clearVariableInfoMap()755 void ProgramExecutableVk::clearVariableInfoMap()
756 {
757     mVariableInfoMap.clear();
758 }
759 
warmUpPipelineCache(ContextVk * contextVk,const gl::ProgramExecutable & glExecutable)760 angle::Result ProgramExecutableVk::warmUpPipelineCache(ContextVk *contextVk,
761                                                        const gl::ProgramExecutable &glExecutable)
762 {
763     // The cache warm up is skipped for GLES1 for two reasons:
764     //
765     // - Since GLES1 shaders are limited, the individual programs don't necessarily add new
766     //   pipelines, but rather it's draw time state that controls that.  Since the programs are
767     //   generated at draw time, it's just as well to let the pipelines be created using the
768     //   renderer's shared cache.
769     // - Individual GLES1 tests are long, and this adds a considerable overhead to those tests
770     if (contextVk->getState().isGLES1())
771     {
772         return angle::Result::Continue;
773     }
774 
775     if (!contextVk->getFeatures().warmUpPipelineCacheAtLink.enabled)
776     {
777         return angle::Result::Continue;
778     }
779 
780     ANGLE_TRY(ensurePipelineCacheInitialized(contextVk));
781 
782     // No synchronization necessary when accessing the program executable's cache as there is no
783     // access to it from other threads at this point.
784     vk::PipelineCacheAccess pipelineCache;
785     pipelineCache.init(&mPipelineCache, nullptr);
786 
787     // Create a set of pipelines.  Ideally, that would be the entire set of possible pipelines so
788     // there would be none created at draw time.  This is gated on the removal of some
789     // specialization constants and adoption of VK_EXT_graphics_pipeline_library.
790     const bool isCompute = glExecutable.hasLinkedShaderStage(gl::ShaderType::Compute);
791     if (isCompute)
792     {
793         // There is no state associated with compute programs, so only one pipeline needs creation
794         // to warm up the cache.
795         vk::PipelineHelper *pipeline = nullptr;
796         ANGLE_TRY(getOrCreateComputePipeline(contextVk, &pipelineCache, PipelineSource::WarmUp,
797                                              glExecutable, &pipeline));
798 
799         // Merge the cache with RendererVk's
800         if (contextVk->getFeatures().mergeProgramPipelineCachesToGlobalCache.enabled)
801         {
802             ANGLE_TRY(contextVk->getRenderer()->mergeIntoPipelineCache(mPipelineCache));
803         }
804 
805         return angle::Result::Continue;
806     }
807 
808     const vk::GraphicsPipelineDesc *descPtr = nullptr;
809     vk::PipelineHelper *pipeline            = nullptr;
810     vk::GraphicsPipelineDesc graphicsPipelineDesc;
811 
812     // It is only at drawcall time that we will have complete information required to build the
813     // graphics pipeline descriptor. Use the most "commonly seen" state values and create the
814     // pipeline.
815     gl::PrimitiveMode mode = (glExecutable.hasLinkedShaderStage(gl::ShaderType::TessControl) ||
816                               glExecutable.hasLinkedShaderStage(gl::ShaderType::TessEvaluation))
817                                  ? gl::PrimitiveMode::Patches
818                                  : gl::PrimitiveMode::TriangleStrip;
819     SetupDefaultPipelineState(contextVk, glExecutable, mode, &graphicsPipelineDesc);
820 
821     // Variations that definitely matter:
822     //
823     // - PreRotation: It's a boolean specialization constant
824     // - Depth correction: It's a SPIR-V transformation
825     //
826     // There are a number of states that are not currently dynamic (and may never be, such as sample
827     // shading), but pre-creating shaders for them is impractical.  Most such state is likely unused
828     // by most applications, but variations can be added here for certain apps that are known to
829     // benefit from it.
830     ProgramTransformOptions transformOptions = {};
831 
832     angle::FixedVector<bool, 2> surfaceRotationVariations = {false};
833     if (contextVk->getFeatures().enablePreRotateSurfaces.enabled &&
834         !contextVk->getFeatures().preferDriverUniformOverSpecConst.enabled)
835     {
836         surfaceRotationVariations.push_back(true);
837     }
838 
839     // Only build the shaders subset of the pipeline if VK_EXT_graphics_pipeline_library is
840     // supported, especially since the vertex input and fragment output state set up here is
841     // completely bogus.
842     vk::GraphicsPipelineSubset subset =
843         contextVk->getFeatures().supportsGraphicsPipelineLibrary.enabled
844             ? vk::GraphicsPipelineSubset::Shaders
845             : vk::GraphicsPipelineSubset::Complete;
846 
847     for (bool rotation : surfaceRotationVariations)
848     {
849         transformOptions.surfaceRotation = rotation;
850 
851         ANGLE_TRY(createGraphicsPipelineImpl(contextVk, transformOptions, subset, &pipelineCache,
852                                              PipelineSource::WarmUp, graphicsPipelineDesc,
853                                              glExecutable, &descPtr, &pipeline));
854     }
855 
856     // Merge the cache with RendererVk's
857     if (contextVk->getFeatures().mergeProgramPipelineCachesToGlobalCache.enabled)
858     {
859         ANGLE_TRY(contextVk->getRenderer()->mergeIntoPipelineCache(mPipelineCache));
860     }
861 
862     return angle::Result::Continue;
863 }
864 
addInterfaceBlockDescriptorSetDesc(const std::vector<gl::InterfaceBlock> & blocks,gl::ShaderBitSet shaderTypes,VkDescriptorType descType,vk::DescriptorSetLayoutDesc * descOut)865 void ProgramExecutableVk::addInterfaceBlockDescriptorSetDesc(
866     const std::vector<gl::InterfaceBlock> &blocks,
867     gl::ShaderBitSet shaderTypes,
868     VkDescriptorType descType,
869     vk::DescriptorSetLayoutDesc *descOut)
870 {
871     for (uint32_t bufferIndex = 0, arraySize = 0; bufferIndex < blocks.size();
872          bufferIndex += arraySize)
873     {
874         gl::InterfaceBlock block = blocks[bufferIndex];
875         arraySize                = GetInterfaceBlockArraySize(blocks, bufferIndex);
876 
877         if (block.activeShaders().none())
878         {
879             continue;
880         }
881 
882         const gl::ShaderType firstShaderType = block.getFirstActiveShaderType();
883         const ShaderInterfaceVariableInfo &info =
884             mVariableInfoMap.getVariableById(firstShaderType, block.getId(firstShaderType));
885 
886         const VkShaderStageFlags activeStages = gl_vk::GetShaderStageFlags(info.activeStages);
887 
888         descOut->update(info.binding, descType, arraySize, activeStages, nullptr);
889     }
890 }
891 
addAtomicCounterBufferDescriptorSetDesc(const std::vector<gl::AtomicCounterBuffer> & atomicCounterBuffers,vk::DescriptorSetLayoutDesc * descOut)892 void ProgramExecutableVk::addAtomicCounterBufferDescriptorSetDesc(
893     const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers,
894     vk::DescriptorSetLayoutDesc *descOut)
895 {
896     if (atomicCounterBuffers.empty())
897     {
898         return;
899     }
900 
901     const ShaderInterfaceVariableInfo &info =
902         mVariableInfoMap.getAtomicCounterInfo(atomicCounterBuffers[0].getFirstActiveShaderType());
903     VkShaderStageFlags activeStages = gl_vk::GetShaderStageFlags(info.activeStages);
904 
905     // A single storage buffer array is used for all stages for simplicity.
906     descOut->update(info.binding, vk::kStorageBufferDescriptorType,
907                     gl::IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS, activeStages, nullptr);
908 }
909 
addImageDescriptorSetDesc(const gl::ProgramExecutable & executable,vk::DescriptorSetLayoutDesc * descOut)910 void ProgramExecutableVk::addImageDescriptorSetDesc(const gl::ProgramExecutable &executable,
911                                                     vk::DescriptorSetLayoutDesc *descOut)
912 {
913     const std::vector<gl::ImageBinding> &imageBindings = executable.getImageBindings();
914     const std::vector<gl::LinkedUniform> &uniforms     = executable.getUniforms();
915 
916     for (uint32_t imageIndex = 0; imageIndex < imageBindings.size(); ++imageIndex)
917     {
918         uint32_t uniformIndex = executable.getUniformIndexFromImageIndex(imageIndex);
919         const gl::LinkedUniform &imageUniform = uniforms[uniformIndex];
920 
921         // 2D arrays are split into multiple 1D arrays when generating LinkedUniforms. Since they
922         // are flattened into one array, ignore the nonzero elements and expand the array to the
923         // total array size.
924         if (imageUniform.activeShaders().none() || imageUniform.getOuterArrayOffset() > 0)
925         {
926             ASSERT(gl::SamplerNameContainsNonZeroArrayElement(imageUniform.name));
927             continue;
928         }
929 
930         ASSERT(!gl::SamplerNameContainsNonZeroArrayElement(imageUniform.name));
931 
932         // The front-end always binds array image units sequentially.
933         const gl::ImageBinding &imageBinding = imageBindings[imageIndex];
934         uint32_t arraySize = static_cast<uint32_t>(imageBinding.boundImageUnits.size());
935         arraySize *= imageUniform.getOuterArraySizeProduct();
936 
937         const gl::ShaderType firstShaderType = imageUniform.getFirstActiveShaderType();
938         const ShaderInterfaceVariableInfo &info =
939             mVariableInfoMap.getVariableById(firstShaderType, imageUniform.getId(firstShaderType));
940 
941         const VkShaderStageFlags activeStages = gl_vk::GetShaderStageFlags(info.activeStages);
942 
943         const VkDescriptorType descType = imageBinding.textureType == gl::TextureType::Buffer
944                                               ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
945                                               : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
946         descOut->update(info.binding, descType, arraySize, activeStages, nullptr);
947     }
948 }
949 
addInputAttachmentDescriptorSetDesc(const gl::ProgramExecutable & executable,vk::DescriptorSetLayoutDesc * descOut)950 void ProgramExecutableVk::addInputAttachmentDescriptorSetDesc(
951     const gl::ProgramExecutable &executable,
952     vk::DescriptorSetLayoutDesc *descOut)
953 {
954     if (!executable.getLinkedShaderStages()[gl::ShaderType::Fragment])
955     {
956         return;
957     }
958 
959     if (!executable.usesFramebufferFetch())
960     {
961         return;
962     }
963 
964     const std::vector<gl::LinkedUniform> &uniforms = executable.getUniforms();
965     const uint32_t baseUniformIndex                = executable.getFragmentInoutRange().low();
966     const gl::LinkedUniform &baseInputAttachment   = uniforms.at(baseUniformIndex);
967 
968     const ShaderInterfaceVariableInfo &baseInfo = mVariableInfoMap.getVariableById(
969         gl::ShaderType::Fragment, baseInputAttachment.getId(gl::ShaderType::Fragment));
970 
971     uint32_t baseBinding = baseInfo.binding - baseInputAttachment.getLocation();
972 
973     for (uint32_t colorIndex = 0; colorIndex < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS; ++colorIndex)
974     {
975         descOut->update(baseBinding, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1,
976                         VK_SHADER_STAGE_FRAGMENT_BIT, nullptr);
977         baseBinding++;
978     }
979 }
980 
addTextureDescriptorSetDesc(ContextVk * contextVk,const gl::ProgramExecutable & executable,const gl::ActiveTextureArray<TextureVk * > * activeTextures,vk::DescriptorSetLayoutDesc * descOut)981 angle::Result ProgramExecutableVk::addTextureDescriptorSetDesc(
982     ContextVk *contextVk,
983     const gl::ProgramExecutable &executable,
984     const gl::ActiveTextureArray<TextureVk *> *activeTextures,
985     vk::DescriptorSetLayoutDesc *descOut)
986 {
987     const std::vector<gl::SamplerBinding> &samplerBindings = executable.getSamplerBindings();
988     const std::vector<gl::LinkedUniform> &uniforms         = executable.getUniforms();
989 
990     for (uint32_t textureIndex = 0; textureIndex < samplerBindings.size(); ++textureIndex)
991     {
992         uint32_t uniformIndex = executable.getUniformIndexFromSamplerIndex(textureIndex);
993         const gl::LinkedUniform &samplerUniform = uniforms[uniformIndex];
994 
995         // 2D arrays are split into multiple 1D arrays when generating LinkedUniforms. Since they
996         // are flattened into one array, ignore the nonzero elements and expand the array to the
997         // total array size.
998         if (samplerUniform.activeShaders().none() || samplerUniform.getOuterArrayOffset() > 0)
999         {
1000             ASSERT(gl::SamplerNameContainsNonZeroArrayElement(samplerUniform.name));
1001             continue;
1002         }
1003 
1004         ASSERT(!gl::SamplerNameContainsNonZeroArrayElement(samplerUniform.name));
1005 
1006         // The front-end always binds array sampler units sequentially.
1007         const gl::SamplerBinding &samplerBinding = samplerBindings[textureIndex];
1008         uint32_t arraySize = static_cast<uint32_t>(samplerBinding.boundTextureUnits.size());
1009         arraySize *= samplerUniform.getOuterArraySizeProduct();
1010 
1011         const gl::ShaderType firstShaderType    = samplerUniform.getFirstActiveShaderType();
1012         const ShaderInterfaceVariableInfo &info = mVariableInfoMap.getVariableById(
1013             firstShaderType, samplerUniform.getId(firstShaderType));
1014 
1015         const VkShaderStageFlags activeStages = gl_vk::GetShaderStageFlags(info.activeStages);
1016 
1017         // TODO: https://issuetracker.google.com/issues/158215272: how do we handle array of
1018         // immutable samplers?
1019         GLuint textureUnit = samplerBinding.boundTextureUnits[0];
1020         if (activeTextures != nullptr &&
1021             (*activeTextures)[textureUnit]->getImage().hasImmutableSampler())
1022         {
1023             ASSERT(samplerBinding.boundTextureUnits.size() == 1);
1024 
1025             // In the case of samplerExternal2DY2YEXT, we need
1026             // samplerYcbcrConversion object with IDENTITY conversion model
1027             bool isSamplerExternalY2Y =
1028                 samplerBinding.samplerType == GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT;
1029 
1030             // Always take the texture's sampler, that's only way to get to yuv conversion for
1031             // externalFormat
1032             const TextureVk *textureVk          = (*activeTextures)[textureUnit];
1033             const vk::Sampler &immutableSampler = textureVk->getSampler(isSamplerExternalY2Y).get();
1034             descOut->update(info.binding, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, arraySize,
1035                             activeStages, &immutableSampler);
1036             const vk::ImageHelper &image = textureVk->getImage();
1037             const vk::YcbcrConversionDesc ycbcrConversionDesc =
1038                 isSamplerExternalY2Y ? image.getY2YConversionDesc()
1039                                      : image.getYcbcrConversionDesc();
1040             mImmutableSamplerIndexMap[ycbcrConversionDesc] = textureIndex;
1041             // The Vulkan spec has the following note -
1042             // All descriptors in a binding use the same maximum
1043             // combinedImageSamplerDescriptorCount descriptors to allow implementations to use a
1044             // uniform stride for dynamic indexing of the descriptors in the binding.
1045             uint64_t externalFormat        = image.getExternalFormat();
1046             uint32_t formatDescriptorCount = 0;
1047 
1048             RendererVk *renderer = contextVk->getRenderer();
1049 
1050             if (externalFormat != 0)
1051             {
1052                 ANGLE_TRY(renderer->getFormatDescriptorCountForExternalFormat(
1053                     contextVk, externalFormat, &formatDescriptorCount));
1054             }
1055             else
1056             {
1057                 VkFormat vkFormat = image.getActualVkFormat();
1058                 ASSERT(vkFormat != 0);
1059                 ANGLE_TRY(renderer->getFormatDescriptorCountForVkFormat(contextVk, vkFormat,
1060                                                                         &formatDescriptorCount));
1061             }
1062 
1063             ASSERT(formatDescriptorCount > 0);
1064             mImmutableSamplersMaxDescriptorCount =
1065                 std::max(mImmutableSamplersMaxDescriptorCount, formatDescriptorCount);
1066         }
1067         else
1068         {
1069             const VkDescriptorType descType = samplerBinding.textureType == gl::TextureType::Buffer
1070                                                   ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
1071                                                   : VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1072             descOut->update(info.binding, descType, arraySize, activeStages, nullptr);
1073         }
1074     }
1075 
1076     return angle::Result::Continue;
1077 }
1078 
initializeWriteDescriptorDesc(ContextVk * contextVk,const gl::ProgramExecutable & glExecutable)1079 void ProgramExecutableVk::initializeWriteDescriptorDesc(ContextVk *contextVk,
1080                                                         const gl::ProgramExecutable &glExecutable)
1081 {
1082     const gl::ShaderBitSet &linkedShaderStages = glExecutable.getLinkedShaderStages();
1083 
1084     // Update mShaderResourceWriteDescriptorDescBuilder
1085     mShaderResourceWriteDescriptorDescs.reset();
1086     mShaderResourceWriteDescriptorDescs.updateShaderBuffers(
1087         mVariableInfoMap, glExecutable.getUniformBlocks(), getUniformBufferDescriptorType());
1088     mShaderResourceWriteDescriptorDescs.updateShaderBuffers(
1089         mVariableInfoMap, glExecutable.getShaderStorageBlocks(), getStorageBufferDescriptorType());
1090     mShaderResourceWriteDescriptorDescs.updateAtomicCounters(
1091         mVariableInfoMap, glExecutable.getAtomicCounterBuffers());
1092     mShaderResourceWriteDescriptorDescs.updateImages(glExecutable, mVariableInfoMap);
1093     mShaderResourceWriteDescriptorDescs.updateDynamicDescriptorsCount();
1094 
1095     // Update mTextureWriteDescriptors
1096     mTextureWriteDescriptorDescs.reset();
1097     mTextureWriteDescriptorDescs.updateExecutableActiveTextures(mVariableInfoMap, glExecutable);
1098     mTextureWriteDescriptorDescs.updateDynamicDescriptorsCount();
1099 
1100     // Update mDefaultUniformWriteDescriptors
1101     mDefaultUniformWriteDescriptorDescs.reset();
1102     mDefaultUniformWriteDescriptorDescs.updateDefaultUniform(linkedShaderStages, mVariableInfoMap,
1103                                                              glExecutable);
1104     mDefaultUniformWriteDescriptorDescs.updateDynamicDescriptorsCount();
1105 
1106     mDefaultUniformAndXfbWriteDescriptorDescs.reset();
1107     if (glExecutable.hasTransformFeedbackOutput() &&
1108         contextVk->getRenderer()->getFeatures().emulateTransformFeedback.enabled)
1109     {
1110         // Update mDefaultUniformAndXfbWriteDescriptorDescs for the emulation code path.
1111         mDefaultUniformAndXfbWriteDescriptorDescs.updateDefaultUniform(
1112             linkedShaderStages, mVariableInfoMap, glExecutable);
1113         if (linkedShaderStages[gl::ShaderType::Vertex])
1114         {
1115             mDefaultUniformAndXfbWriteDescriptorDescs.updateTransformFeedbackWrite(mVariableInfoMap,
1116                                                                                    glExecutable);
1117         }
1118         mDefaultUniformAndXfbWriteDescriptorDescs.updateDynamicDescriptorsCount();
1119     }
1120     else
1121     {
1122         // Otherwise it will be the same as default uniform
1123         mDefaultUniformAndXfbWriteDescriptorDescs = mDefaultUniformWriteDescriptorDescs;
1124     }
1125 }
1126 
getTransformOptions(ContextVk * contextVk,const vk::GraphicsPipelineDesc & desc,const gl::ProgramExecutable & glExecutable)1127 ProgramTransformOptions ProgramExecutableVk::getTransformOptions(
1128     ContextVk *contextVk,
1129     const vk::GraphicsPipelineDesc &desc,
1130     const gl::ProgramExecutable &glExecutable)
1131 {
1132     ProgramTransformOptions transformOptions = {};
1133 
1134     transformOptions.surfaceRotation = desc.getSurfaceRotation();
1135     transformOptions.removeTransformFeedbackEmulation =
1136         contextVk->getFeatures().emulateTransformFeedback.enabled &&
1137         !contextVk->getState().isTransformFeedbackActiveUnpaused();
1138     FramebufferVk *drawFrameBuffer = vk::GetImpl(contextVk->getState().getDrawFramebuffer());
1139     const bool hasFramebufferFetch = glExecutable.usesFramebufferFetch();
1140     const bool isMultisampled      = drawFrameBuffer->getSamples() > 1;
1141     transformOptions.multiSampleFramebufferFetch = hasFramebufferFetch && isMultisampled;
1142     transformOptions.enableSampleShading =
1143         contextVk->getState().isSampleShadingEnabled() && isMultisampled;
1144 
1145     return transformOptions;
1146 }
1147 
initGraphicsShaderPrograms(ContextVk * contextVk,ProgramTransformOptions transformOptions,const gl::ProgramExecutable & glExecutable,vk::ShaderProgramHelper ** shaderProgramOut)1148 angle::Result ProgramExecutableVk::initGraphicsShaderPrograms(
1149     ContextVk *contextVk,
1150     ProgramTransformOptions transformOptions,
1151     const gl::ProgramExecutable &glExecutable,
1152     vk::ShaderProgramHelper **shaderProgramOut)
1153 {
1154     ASSERT(glExecutable.hasLinkedShaderStage(gl::ShaderType::Vertex));
1155 
1156     const uint8_t programIndex                = GetGraphicsProgramIndex(transformOptions);
1157     ProgramInfo &programInfo                  = mGraphicsProgramInfos[programIndex];
1158     const gl::ShaderBitSet linkedShaderStages = glExecutable.getLinkedShaderStages();
1159     gl::ShaderType lastPreFragmentStage       = gl::GetLastPreFragmentStage(linkedShaderStages);
1160 
1161     const bool isTransformFeedbackProgram =
1162         !glExecutable.getLinkedTransformFeedbackVaryings().empty();
1163 
1164     for (gl::ShaderType shaderType : linkedShaderStages)
1165     {
1166         ANGLE_TRY(initGraphicsShaderProgram(
1167             contextVk, shaderType, shaderType == lastPreFragmentStage, isTransformFeedbackProgram,
1168             transformOptions, &programInfo, mVariableInfoMap));
1169     }
1170 
1171     *shaderProgramOut = programInfo.getShaderProgram();
1172     ASSERT(*shaderProgramOut);
1173 
1174     return angle::Result::Continue;
1175 }
1176 
createGraphicsPipelineImpl(ContextVk * contextVk,ProgramTransformOptions transformOptions,vk::GraphicsPipelineSubset pipelineSubset,vk::PipelineCacheAccess * pipelineCache,PipelineSource source,const vk::GraphicsPipelineDesc & desc,const gl::ProgramExecutable & glExecutable,const vk::GraphicsPipelineDesc ** descPtrOut,vk::PipelineHelper ** pipelineOut)1177 angle::Result ProgramExecutableVk::createGraphicsPipelineImpl(
1178     ContextVk *contextVk,
1179     ProgramTransformOptions transformOptions,
1180     vk::GraphicsPipelineSubset pipelineSubset,
1181     vk::PipelineCacheAccess *pipelineCache,
1182     PipelineSource source,
1183     const vk::GraphicsPipelineDesc &desc,
1184     const gl::ProgramExecutable &glExecutable,
1185     const vk::GraphicsPipelineDesc **descPtrOut,
1186     vk::PipelineHelper **pipelineOut)
1187 {
1188     vk::ShaderProgramHelper *shaderProgram = nullptr;
1189     ANGLE_TRY(
1190         initGraphicsShaderPrograms(contextVk, transformOptions, glExecutable, &shaderProgram));
1191 
1192     const uint8_t programIndex = GetGraphicsProgramIndex(transformOptions);
1193 
1194     // Set specialization constants.  These are also a part of GraphicsPipelineDesc, so that a
1195     // change in specialization constants also results in a new pipeline.
1196     vk::SpecializationConstants specConsts = MakeSpecConsts(transformOptions, desc);
1197 
1198     // Pull in a compatible RenderPass.
1199     const vk::RenderPass *compatibleRenderPass = nullptr;
1200     ANGLE_TRY(contextVk->getRenderPassCache().getCompatibleRenderPass(
1201         contextVk, desc.getRenderPassDesc(), &compatibleRenderPass));
1202 
1203     if (pipelineSubset == vk::GraphicsPipelineSubset::Complete)
1204     {
1205         CompleteGraphicsPipelineCache &pipelines = mCompleteGraphicsPipelines[programIndex];
1206         return shaderProgram->createGraphicsPipeline(
1207             contextVk, &pipelines, pipelineCache, *compatibleRenderPass, getPipelineLayout(),
1208             source, desc, specConsts, descPtrOut, pipelineOut);
1209     }
1210     else
1211     {
1212         // Vertex input and fragment output subsets are independent of shaders, and are not created
1213         // through the program executable.
1214         ASSERT(pipelineSubset == vk::GraphicsPipelineSubset::Shaders);
1215 
1216         ShadersGraphicsPipelineCache &pipelines = mShadersGraphicsPipelines[programIndex];
1217         return shaderProgram->createGraphicsPipeline(
1218             contextVk, &pipelines, pipelineCache, *compatibleRenderPass, getPipelineLayout(),
1219             source, desc, specConsts, descPtrOut, pipelineOut);
1220     }
1221 }
1222 
getGraphicsPipeline(ContextVk * contextVk,vk::GraphicsPipelineSubset pipelineSubset,const vk::GraphicsPipelineDesc & desc,const gl::ProgramExecutable & glExecutable,const vk::GraphicsPipelineDesc ** descPtrOut,vk::PipelineHelper ** pipelineOut)1223 angle::Result ProgramExecutableVk::getGraphicsPipeline(ContextVk *contextVk,
1224                                                        vk::GraphicsPipelineSubset pipelineSubset,
1225                                                        const vk::GraphicsPipelineDesc &desc,
1226                                                        const gl::ProgramExecutable &glExecutable,
1227                                                        const vk::GraphicsPipelineDesc **descPtrOut,
1228                                                        vk::PipelineHelper **pipelineOut)
1229 {
1230     ProgramTransformOptions transformOptions = getTransformOptions(contextVk, desc, glExecutable);
1231 
1232     vk::ShaderProgramHelper *shaderProgram = nullptr;
1233     ANGLE_TRY(
1234         initGraphicsShaderPrograms(contextVk, transformOptions, glExecutable, &shaderProgram));
1235 
1236     const uint8_t programIndex = GetGraphicsProgramIndex(transformOptions);
1237 
1238     *descPtrOut  = nullptr;
1239     *pipelineOut = nullptr;
1240 
1241     if (pipelineSubset == vk::GraphicsPipelineSubset::Complete)
1242     {
1243         mCompleteGraphicsPipelines[programIndex].getPipeline(desc, descPtrOut, pipelineOut);
1244     }
1245     else
1246     {
1247         // Vertex input and fragment output subsets are independent of shaders, and are not created
1248         // through the program executable.
1249         ASSERT(pipelineSubset == vk::GraphicsPipelineSubset::Shaders);
1250 
1251         mShadersGraphicsPipelines[programIndex].getPipeline(desc, descPtrOut, pipelineOut);
1252     }
1253 
1254     return angle::Result::Continue;
1255 }
1256 
createGraphicsPipeline(ContextVk * contextVk,vk::GraphicsPipelineSubset pipelineSubset,vk::PipelineCacheAccess * pipelineCache,PipelineSource source,const vk::GraphicsPipelineDesc & desc,const gl::ProgramExecutable & glExecutable,const vk::GraphicsPipelineDesc ** descPtrOut,vk::PipelineHelper ** pipelineOut)1257 angle::Result ProgramExecutableVk::createGraphicsPipeline(
1258     ContextVk *contextVk,
1259     vk::GraphicsPipelineSubset pipelineSubset,
1260     vk::PipelineCacheAccess *pipelineCache,
1261     PipelineSource source,
1262     const vk::GraphicsPipelineDesc &desc,
1263     const gl::ProgramExecutable &glExecutable,
1264     const vk::GraphicsPipelineDesc **descPtrOut,
1265     vk::PipelineHelper **pipelineOut)
1266 {
1267     ProgramTransformOptions transformOptions = getTransformOptions(contextVk, desc, glExecutable);
1268 
1269     // When creating monolithic pipelines, the renderer's pipeline cache is used as passed in.
1270     // When creating the shaders subset of pipelines, the program's own pipeline cache is used.
1271     vk::PipelineCacheAccess perProgramPipelineCache;
1272     const bool useProgramPipelineCache = pipelineSubset == vk::GraphicsPipelineSubset::Shaders;
1273     if (useProgramPipelineCache)
1274     {
1275         ANGLE_TRY(ensurePipelineCacheInitialized(contextVk));
1276 
1277         perProgramPipelineCache.init(&mPipelineCache, nullptr);
1278         pipelineCache = &perProgramPipelineCache;
1279     }
1280 
1281     ANGLE_TRY(createGraphicsPipelineImpl(contextVk, transformOptions, pipelineSubset, pipelineCache,
1282                                          source, desc, glExecutable, descPtrOut, pipelineOut));
1283 
1284     if (useProgramPipelineCache &&
1285         contextVk->getFeatures().mergeProgramPipelineCachesToGlobalCache.enabled)
1286     {
1287         ANGLE_TRY(contextVk->getRenderer()->mergeIntoPipelineCache(mPipelineCache));
1288     }
1289 
1290     return angle::Result::Continue;
1291 }
1292 
linkGraphicsPipelineLibraries(ContextVk * contextVk,vk::PipelineCacheAccess * pipelineCache,const vk::GraphicsPipelineDesc & desc,const gl::ProgramExecutable & glExecutable,vk::PipelineHelper * vertexInputPipeline,vk::PipelineHelper * shadersPipeline,vk::PipelineHelper * fragmentOutputPipeline,const vk::GraphicsPipelineDesc ** descPtrOut,vk::PipelineHelper ** pipelineOut)1293 angle::Result ProgramExecutableVk::linkGraphicsPipelineLibraries(
1294     ContextVk *contextVk,
1295     vk::PipelineCacheAccess *pipelineCache,
1296     const vk::GraphicsPipelineDesc &desc,
1297     const gl::ProgramExecutable &glExecutable,
1298     vk::PipelineHelper *vertexInputPipeline,
1299     vk::PipelineHelper *shadersPipeline,
1300     vk::PipelineHelper *fragmentOutputPipeline,
1301     const vk::GraphicsPipelineDesc **descPtrOut,
1302     vk::PipelineHelper **pipelineOut)
1303 {
1304     ProgramTransformOptions transformOptions = getTransformOptions(contextVk, desc, glExecutable);
1305     const uint8_t programIndex               = GetGraphicsProgramIndex(transformOptions);
1306 
1307     ANGLE_TRY(mCompleteGraphicsPipelines[programIndex].linkLibraries(
1308         contextVk, pipelineCache, desc, getPipelineLayout(), vertexInputPipeline, shadersPipeline,
1309         fragmentOutputPipeline, descPtrOut, pipelineOut));
1310 
1311     // If monolithic pipelines are preferred over libraries, create a task so that it can be created
1312     // asynchronously.
1313     if (contextVk->getFeatures().preferMonolithicPipelinesOverLibraries.enabled)
1314     {
1315         vk::SpecializationConstants specConsts = MakeSpecConsts(transformOptions, desc);
1316 
1317         mGraphicsProgramInfos[programIndex]
1318             .getShaderProgram()
1319             ->createMonolithicPipelineCreationTask(contextVk, pipelineCache, desc,
1320                                                    getPipelineLayout(), specConsts, *pipelineOut);
1321     }
1322 
1323     return angle::Result::Continue;
1324 }
1325 
getOrCreateComputePipeline(ContextVk * contextVk,vk::PipelineCacheAccess * pipelineCache,PipelineSource source,const gl::ProgramExecutable & glExecutable,vk::PipelineHelper ** pipelineOut)1326 angle::Result ProgramExecutableVk::getOrCreateComputePipeline(
1327     ContextVk *contextVk,
1328     vk::PipelineCacheAccess *pipelineCache,
1329     PipelineSource source,
1330     const gl::ProgramExecutable &glExecutable,
1331     vk::PipelineHelper **pipelineOut)
1332 {
1333     ASSERT(glExecutable.hasLinkedShaderStage(gl::ShaderType::Compute));
1334 
1335     ANGLE_TRY(initComputeProgram(contextVk, &mComputeProgramInfo, mVariableInfoMap));
1336 
1337     vk::ShaderProgramHelper *shaderProgram = mComputeProgramInfo.getShaderProgram();
1338     ASSERT(shaderProgram);
1339     return shaderProgram->getOrCreateComputePipeline(
1340         contextVk, &mComputePipelines, pipelineCache, getPipelineLayout(),
1341         contextVk->getComputePipelineFlags(), source, pipelineOut);
1342 }
1343 
createPipelineLayout(ContextVk * contextVk,const gl::ProgramExecutable & glExecutable,gl::ActiveTextureArray<TextureVk * > * activeTextures)1344 angle::Result ProgramExecutableVk::createPipelineLayout(
1345     ContextVk *contextVk,
1346     const gl::ProgramExecutable &glExecutable,
1347     gl::ActiveTextureArray<TextureVk *> *activeTextures)
1348 {
1349     gl::TransformFeedback *transformFeedback = contextVk->getState().getCurrentTransformFeedback();
1350     const gl::ShaderBitSet &linkedShaderStages = glExecutable.getLinkedShaderStages();
1351 
1352     resetLayout(contextVk);
1353 
1354     // Store a reference to the pipeline and descriptor set layouts. This will create them if they
1355     // don't already exist in the cache.
1356 
1357     // Default uniforms and transform feedback:
1358     vk::DescriptorSetLayoutDesc uniformsAndXfbSetDesc;
1359     mNumDefaultUniformDescriptors = 0;
1360     for (gl::ShaderType shaderType : linkedShaderStages)
1361     {
1362         const ShaderInterfaceVariableInfo &info =
1363             mVariableInfoMap.getDefaultUniformInfo(shaderType);
1364         // Note that currently the default uniform block is added unconditionally.
1365         ASSERT(info.activeStages[shaderType]);
1366 
1367         uniformsAndXfbSetDesc.update(info.binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1,
1368                                      gl_vk::kShaderStageMap[shaderType], nullptr);
1369         mNumDefaultUniformDescriptors++;
1370     }
1371 
1372     gl::ShaderType linkedTransformFeedbackStage = glExecutable.getLinkedTransformFeedbackStage();
1373     bool hasXfbVaryings = linkedTransformFeedbackStage != gl::ShaderType::InvalidEnum &&
1374                           !glExecutable.getLinkedTransformFeedbackVaryings().empty();
1375     if (transformFeedback && hasXfbVaryings)
1376     {
1377         size_t xfbBufferCount                    = glExecutable.getTransformFeedbackBufferCount();
1378         TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(transformFeedback);
1379         transformFeedbackVk->updateDescriptorSetLayout(contextVk, mVariableInfoMap, xfbBufferCount,
1380                                                        &uniformsAndXfbSetDesc);
1381     }
1382 
1383     ANGLE_TRY(contextVk->getDescriptorSetLayoutCache().getDescriptorSetLayout(
1384         contextVk, uniformsAndXfbSetDesc,
1385         &mDescriptorSetLayouts[DescriptorSetIndex::UniformsAndXfb]));
1386 
1387     // Uniform and storage buffers, atomic counter buffers and images:
1388     vk::DescriptorSetLayoutDesc resourcesSetDesc;
1389 
1390     // Count the number of active uniform buffer descriptors.
1391     uint32_t numActiveUniformBufferDescriptors    = 0;
1392     const std::vector<gl::InterfaceBlock> &blocks = glExecutable.getUniformBlocks();
1393     for (uint32_t bufferIndex = 0; bufferIndex < blocks.size();)
1394     {
1395         const gl::InterfaceBlock &block = blocks[bufferIndex];
1396         const uint32_t arraySize        = GetInterfaceBlockArraySize(blocks, bufferIndex);
1397         bufferIndex += arraySize;
1398 
1399         if (block.activeShaders().any())
1400         {
1401             numActiveUniformBufferDescriptors += arraySize;
1402         }
1403     }
1404 
1405     // Decide if we should use dynamic or fixed descriptor types.
1406     VkPhysicalDeviceLimits limits = contextVk->getRenderer()->getPhysicalDeviceProperties().limits;
1407     uint32_t totalDynamicUniformBufferCount =
1408         numActiveUniformBufferDescriptors + mNumDefaultUniformDescriptors;
1409     if (totalDynamicUniformBufferCount <= limits.maxDescriptorSetUniformBuffersDynamic)
1410     {
1411         mUniformBufferDescriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1412     }
1413     else
1414     {
1415         mUniformBufferDescriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
1416     }
1417 
1418     addInterfaceBlockDescriptorSetDesc(glExecutable.getUniformBlocks(), linkedShaderStages,
1419                                        mUniformBufferDescriptorType, &resourcesSetDesc);
1420     addInterfaceBlockDescriptorSetDesc(glExecutable.getShaderStorageBlocks(), linkedShaderStages,
1421                                        vk::kStorageBufferDescriptorType, &resourcesSetDesc);
1422     addAtomicCounterBufferDescriptorSetDesc(glExecutable.getAtomicCounterBuffers(),
1423                                             &resourcesSetDesc);
1424     addImageDescriptorSetDesc(glExecutable, &resourcesSetDesc);
1425     addInputAttachmentDescriptorSetDesc(glExecutable, &resourcesSetDesc);
1426 
1427     ANGLE_TRY(contextVk->getDescriptorSetLayoutCache().getDescriptorSetLayout(
1428         contextVk, resourcesSetDesc, &mDescriptorSetLayouts[DescriptorSetIndex::ShaderResource]));
1429 
1430     // Textures:
1431     vk::DescriptorSetLayoutDesc texturesSetDesc;
1432     ANGLE_TRY(
1433         addTextureDescriptorSetDesc(contextVk, glExecutable, activeTextures, &texturesSetDesc));
1434 
1435     ANGLE_TRY(contextVk->getDescriptorSetLayoutCache().getDescriptorSetLayout(
1436         contextVk, texturesSetDesc, &mDescriptorSetLayouts[DescriptorSetIndex::Texture]));
1437 
1438     // Create pipeline layout with these 3 descriptor sets.
1439     vk::PipelineLayoutDesc pipelineLayoutDesc;
1440     pipelineLayoutDesc.updateDescriptorSetLayout(DescriptorSetIndex::UniformsAndXfb,
1441                                                  uniformsAndXfbSetDesc);
1442     pipelineLayoutDesc.updateDescriptorSetLayout(DescriptorSetIndex::ShaderResource,
1443                                                  resourcesSetDesc);
1444     pipelineLayoutDesc.updateDescriptorSetLayout(DescriptorSetIndex::Texture, texturesSetDesc);
1445 
1446     // Set up driver uniforms as push constants. The size is set for a graphics pipeline, as there
1447     // are more driver uniforms for a graphics pipeline than there are for a compute pipeline. As
1448     // for the shader stages, both graphics and compute stages are used.
1449     VkShaderStageFlags pushConstantShaderStageFlags =
1450         contextVk->getRenderer()->getSupportedVulkanShaderStageMask();
1451 
1452     uint32_t pushConstantSize = contextVk->getDriverUniformSize(PipelineType::Graphics);
1453     pipelineLayoutDesc.updatePushConstantRange(pushConstantShaderStageFlags, 0, pushConstantSize);
1454 
1455     ANGLE_TRY(contextVk->getPipelineLayoutCache().getPipelineLayout(
1456         contextVk, pipelineLayoutDesc, mDescriptorSetLayouts, &mPipelineLayout));
1457 
1458     // Initialize descriptor pools.
1459     ANGLE_TRY(contextVk->bindCachedDescriptorPool(
1460         DescriptorSetIndex::UniformsAndXfb, uniformsAndXfbSetDesc, 1,
1461         &mDescriptorPools[DescriptorSetIndex::UniformsAndXfb]));
1462     ANGLE_TRY(contextVk->bindCachedDescriptorPool(DescriptorSetIndex::Texture, texturesSetDesc,
1463                                                   mImmutableSamplersMaxDescriptorCount,
1464                                                   &mDescriptorPools[DescriptorSetIndex::Texture]));
1465     ANGLE_TRY(
1466         contextVk->bindCachedDescriptorPool(DescriptorSetIndex::ShaderResource, resourcesSetDesc, 1,
1467                                             &mDescriptorPools[DescriptorSetIndex::ShaderResource]));
1468 
1469     mDynamicUniformDescriptorOffsets.clear();
1470     mDynamicUniformDescriptorOffsets.resize(glExecutable.getLinkedShaderStageCount(), 0);
1471 
1472     // If the program uses framebuffer fetch and this is the first time this happens, switch the
1473     // context to "framebuffer fetch mode".  In this mode, all render passes assume framebuffer
1474     // fetch may be used, so they are prepared to accept a program that uses input attachments.
1475     // This is done only when a program with framebuffer fetch is created to avoid potential
1476     // performance impact on applications that don't use this extension.  If other contexts in the
1477     // share group use this program, they will lazily switch to this mode.
1478     if (contextVk->getFeatures().permanentlySwitchToFramebufferFetchMode.enabled &&
1479         glExecutable.usesFramebufferFetch())
1480     {
1481         ANGLE_TRY(contextVk->switchToFramebufferFetchMode(true));
1482     }
1483 
1484     initializeWriteDescriptorDesc(contextVk, glExecutable);
1485 
1486     return angle::Result::Continue;
1487 }
1488 
resolvePrecisionMismatch(const gl::ProgramMergedVaryings & mergedVaryings)1489 void ProgramExecutableVk::resolvePrecisionMismatch(const gl::ProgramMergedVaryings &mergedVaryings)
1490 {
1491     for (const gl::ProgramVaryingRef &mergedVarying : mergedVaryings)
1492     {
1493         if (!mergedVarying.frontShader || !mergedVarying.backShader)
1494         {
1495             continue;
1496         }
1497 
1498         GLenum frontPrecision = mergedVarying.frontShader->precision;
1499         GLenum backPrecision  = mergedVarying.backShader->precision;
1500         if (frontPrecision == backPrecision)
1501         {
1502             continue;
1503         }
1504 
1505         ASSERT(frontPrecision >= GL_LOW_FLOAT && frontPrecision <= GL_HIGH_INT);
1506         ASSERT(backPrecision >= GL_LOW_FLOAT && backPrecision <= GL_HIGH_INT);
1507 
1508         if (frontPrecision > backPrecision)
1509         {
1510             // The output is higher precision than the input
1511             ShaderInterfaceVariableInfo &info = mVariableInfoMap.getMutable(
1512                 mergedVarying.frontShaderStage, mergedVarying.frontShader->id);
1513             info.varyingIsOutput     = true;
1514             info.useRelaxedPrecision = true;
1515         }
1516         else
1517         {
1518             // The output is lower precision than the input, adjust the input
1519             ASSERT(backPrecision > frontPrecision);
1520             ShaderInterfaceVariableInfo &info = mVariableInfoMap.getMutable(
1521                 mergedVarying.backShaderStage, mergedVarying.backShader->id);
1522             info.varyingIsInput      = true;
1523             info.useRelaxedPrecision = true;
1524         }
1525     }
1526 }
1527 
getOrAllocateDescriptorSet(vk::Context * context,UpdateDescriptorSetsBuilder * updateBuilder,vk::CommandBufferHelperCommon * commandBufferHelper,const vk::DescriptorSetDescBuilder & descriptorSetDesc,const vk::WriteDescriptorDescs & writeDescriptorDescs,DescriptorSetIndex setIndex,vk::SharedDescriptorSetCacheKey * newSharedCacheKeyOut)1528 angle::Result ProgramExecutableVk::getOrAllocateDescriptorSet(
1529     vk::Context *context,
1530     UpdateDescriptorSetsBuilder *updateBuilder,
1531     vk::CommandBufferHelperCommon *commandBufferHelper,
1532     const vk::DescriptorSetDescBuilder &descriptorSetDesc,
1533     const vk::WriteDescriptorDescs &writeDescriptorDescs,
1534     DescriptorSetIndex setIndex,
1535     vk::SharedDescriptorSetCacheKey *newSharedCacheKeyOut)
1536 {
1537     ANGLE_TRY(mDescriptorPools[setIndex].get().getOrAllocateDescriptorSet(
1538         context, commandBufferHelper, descriptorSetDesc.getDesc(),
1539         mDescriptorSetLayouts[setIndex].get(), &mDescriptorPoolBindings[setIndex],
1540         &mDescriptorSets[setIndex], newSharedCacheKeyOut));
1541     ASSERT(mDescriptorSets[setIndex] != VK_NULL_HANDLE);
1542 
1543     if (*newSharedCacheKeyOut != nullptr)
1544     {
1545         // Cache miss. A new cache entry has been created.
1546         descriptorSetDesc.updateDescriptorSet(context, writeDescriptorDescs, updateBuilder,
1547                                               mDescriptorSets[setIndex]);
1548     }
1549     else
1550     {
1551         commandBufferHelper->retainResource(&mDescriptorPoolBindings[setIndex].get());
1552     }
1553 
1554     return angle::Result::Continue;
1555 }
1556 
updateShaderResourcesDescriptorSet(vk::Context * context,UpdateDescriptorSetsBuilder * updateBuilder,const vk::WriteDescriptorDescs & writeDescriptorDescs,vk::CommandBufferHelperCommon * commandBufferHelper,const vk::DescriptorSetDescBuilder & shaderResourcesDesc,vk::SharedDescriptorSetCacheKey * newSharedCacheKeyOut)1557 angle::Result ProgramExecutableVk::updateShaderResourcesDescriptorSet(
1558     vk::Context *context,
1559     UpdateDescriptorSetsBuilder *updateBuilder,
1560     const vk::WriteDescriptorDescs &writeDescriptorDescs,
1561     vk::CommandBufferHelperCommon *commandBufferHelper,
1562     const vk::DescriptorSetDescBuilder &shaderResourcesDesc,
1563     vk::SharedDescriptorSetCacheKey *newSharedCacheKeyOut)
1564 {
1565     if (!mDescriptorPools[DescriptorSetIndex::ShaderResource].get().valid())
1566     {
1567         *newSharedCacheKeyOut = nullptr;
1568         return angle::Result::Continue;
1569     }
1570 
1571     ANGLE_TRY(getOrAllocateDescriptorSet(context, updateBuilder, commandBufferHelper,
1572                                          shaderResourcesDesc, writeDescriptorDescs,
1573                                          DescriptorSetIndex::ShaderResource, newSharedCacheKeyOut));
1574 
1575     size_t numOffsets = writeDescriptorDescs.getDynamicDescriptorSetCount();
1576     mDynamicShaderResourceDescriptorOffsets.resize(numOffsets);
1577     if (numOffsets > 0)
1578     {
1579         memcpy(mDynamicShaderResourceDescriptorOffsets.data(),
1580                shaderResourcesDesc.getDynamicOffsets(), numOffsets * sizeof(uint32_t));
1581     }
1582 
1583     return angle::Result::Continue;
1584 }
1585 
updateUniformsAndXfbDescriptorSet(vk::Context * context,UpdateDescriptorSetsBuilder * updateBuilder,const vk::WriteDescriptorDescs & writeDescriptorDescs,vk::CommandBufferHelperCommon * commandBufferHelper,vk::BufferHelper * defaultUniformBuffer,vk::DescriptorSetDescBuilder * uniformsAndXfbDesc,vk::SharedDescriptorSetCacheKey * sharedCacheKeyOut)1586 angle::Result ProgramExecutableVk::updateUniformsAndXfbDescriptorSet(
1587     vk::Context *context,
1588     UpdateDescriptorSetsBuilder *updateBuilder,
1589     const vk::WriteDescriptorDescs &writeDescriptorDescs,
1590     vk::CommandBufferHelperCommon *commandBufferHelper,
1591     vk::BufferHelper *defaultUniformBuffer,
1592     vk::DescriptorSetDescBuilder *uniformsAndXfbDesc,
1593     vk::SharedDescriptorSetCacheKey *sharedCacheKeyOut)
1594 {
1595     mCurrentDefaultUniformBufferSerial =
1596         defaultUniformBuffer ? defaultUniformBuffer->getBufferSerial() : vk::kInvalidBufferSerial;
1597 
1598     return getOrAllocateDescriptorSet(context, updateBuilder, commandBufferHelper,
1599                                       *uniformsAndXfbDesc, writeDescriptorDescs,
1600                                       DescriptorSetIndex::UniformsAndXfb, sharedCacheKeyOut);
1601 }
1602 
updateTexturesDescriptorSet(vk::Context * context,const gl::ProgramExecutable & executable,const gl::ActiveTextureArray<TextureVk * > & textures,const gl::SamplerBindingVector & samplers,bool emulateSeamfulCubeMapSampling,PipelineType pipelineType,UpdateDescriptorSetsBuilder * updateBuilder,vk::CommandBufferHelperCommon * commandBufferHelper,const vk::DescriptorSetDesc & texturesDesc)1603 angle::Result ProgramExecutableVk::updateTexturesDescriptorSet(
1604     vk::Context *context,
1605     const gl::ProgramExecutable &executable,
1606     const gl::ActiveTextureArray<TextureVk *> &textures,
1607     const gl::SamplerBindingVector &samplers,
1608     bool emulateSeamfulCubeMapSampling,
1609     PipelineType pipelineType,
1610     UpdateDescriptorSetsBuilder *updateBuilder,
1611     vk::CommandBufferHelperCommon *commandBufferHelper,
1612     const vk::DescriptorSetDesc &texturesDesc)
1613 {
1614     vk::SharedDescriptorSetCacheKey newSharedCacheKey;
1615     ANGLE_TRY(mDescriptorPools[DescriptorSetIndex::Texture].get().getOrAllocateDescriptorSet(
1616         context, commandBufferHelper, texturesDesc,
1617         mDescriptorSetLayouts[DescriptorSetIndex::Texture].get(),
1618         &mDescriptorPoolBindings[DescriptorSetIndex::Texture],
1619         &mDescriptorSets[DescriptorSetIndex::Texture], &newSharedCacheKey));
1620     ASSERT(mDescriptorSets[DescriptorSetIndex::Texture] != VK_NULL_HANDLE);
1621 
1622     if (newSharedCacheKey != nullptr)
1623     {
1624         vk::DescriptorSetDescBuilder fullDesc(
1625             mTextureWriteDescriptorDescs.getTotalDescriptorCount());
1626         // Cache miss. A new cache entry has been created.
1627         ANGLE_TRY(fullDesc.updateFullActiveTextures(
1628             context, mVariableInfoMap, mTextureWriteDescriptorDescs, executable, textures, samplers,
1629             emulateSeamfulCubeMapSampling, pipelineType, newSharedCacheKey));
1630         fullDesc.updateDescriptorSet(context, mTextureWriteDescriptorDescs, updateBuilder,
1631                                      mDescriptorSets[DescriptorSetIndex::Texture]);
1632     }
1633     else
1634     {
1635         commandBufferHelper->retainResource(
1636             &mDescriptorPoolBindings[DescriptorSetIndex::Texture].get());
1637     }
1638 
1639     return angle::Result::Continue;
1640 }
1641 
1642 template <typename CommandBufferT>
bindDescriptorSets(vk::Context * context,vk::CommandBufferHelperCommon * commandBufferHelper,CommandBufferT * commandBuffer,PipelineType pipelineType)1643 angle::Result ProgramExecutableVk::bindDescriptorSets(
1644     vk::Context *context,
1645     vk::CommandBufferHelperCommon *commandBufferHelper,
1646     CommandBufferT *commandBuffer,
1647     PipelineType pipelineType)
1648 {
1649     // Can probably use better dirty bits here.
1650 
1651     // Find the maximum non-null descriptor set.  This is used in conjunction with a driver
1652     // workaround to bind empty descriptor sets only for gaps in between 0 and max and avoid
1653     // binding unnecessary empty descriptor sets for the sets beyond max.
1654     DescriptorSetIndex lastNonNullDescriptorSetIndex = DescriptorSetIndex::InvalidEnum;
1655     for (DescriptorSetIndex descriptorSetIndex : angle::AllEnums<DescriptorSetIndex>())
1656     {
1657         if (mDescriptorSets[descriptorSetIndex] != VK_NULL_HANDLE)
1658         {
1659             lastNonNullDescriptorSetIndex = descriptorSetIndex;
1660         }
1661     }
1662 
1663     const VkPipelineBindPoint pipelineBindPoint = pipelineType == PipelineType::Compute
1664                                                       ? VK_PIPELINE_BIND_POINT_COMPUTE
1665                                                       : VK_PIPELINE_BIND_POINT_GRAPHICS;
1666 
1667     for (DescriptorSetIndex descriptorSetIndex : angle::AllEnums<DescriptorSetIndex>())
1668     {
1669         if (ToUnderlying(descriptorSetIndex) > ToUnderlying(lastNonNullDescriptorSetIndex))
1670         {
1671             continue;
1672         }
1673 
1674         VkDescriptorSet descSet = mDescriptorSets[descriptorSetIndex];
1675         if (descSet == VK_NULL_HANDLE)
1676         {
1677             continue;
1678         }
1679 
1680         // Default uniforms are encompassed in a block per shader stage, and they are assigned
1681         // through dynamic uniform buffers (requiring dynamic offsets).  No other descriptor
1682         // requires a dynamic offset.
1683         if (descriptorSetIndex == DescriptorSetIndex::UniformsAndXfb)
1684         {
1685             commandBuffer->bindDescriptorSets(
1686                 getPipelineLayout(), pipelineBindPoint, descriptorSetIndex, 1, &descSet,
1687                 static_cast<uint32_t>(mDynamicUniformDescriptorOffsets.size()),
1688                 mDynamicUniformDescriptorOffsets.data());
1689         }
1690         else if (descriptorSetIndex == DescriptorSetIndex::ShaderResource)
1691         {
1692             commandBuffer->bindDescriptorSets(
1693                 getPipelineLayout(), pipelineBindPoint, descriptorSetIndex, 1, &descSet,
1694                 static_cast<uint32_t>(mDynamicShaderResourceDescriptorOffsets.size()),
1695                 mDynamicShaderResourceDescriptorOffsets.data());
1696         }
1697         else
1698         {
1699             commandBuffer->bindDescriptorSets(getPipelineLayout(), pipelineBindPoint,
1700                                               descriptorSetIndex, 1, &descSet, 0, nullptr);
1701         }
1702     }
1703 
1704     return angle::Result::Continue;
1705 }
1706 
1707 template angle::Result ProgramExecutableVk::bindDescriptorSets<vk::priv::SecondaryCommandBuffer>(
1708     vk::Context *context,
1709     vk::CommandBufferHelperCommon *commandBufferHelper,
1710     vk::priv::SecondaryCommandBuffer *commandBuffer,
1711     PipelineType pipelineType);
1712 template angle::Result ProgramExecutableVk::bindDescriptorSets<vk::VulkanSecondaryCommandBuffer>(
1713     vk::Context *context,
1714     vk::CommandBufferHelperCommon *commandBufferHelper,
1715     vk::VulkanSecondaryCommandBuffer *commandBuffer,
1716     PipelineType pipelineType);
1717 
setAllDefaultUniformsDirty(const gl::ProgramExecutable & executable)1718 void ProgramExecutableVk::setAllDefaultUniformsDirty(const gl::ProgramExecutable &executable)
1719 {
1720     mDefaultUniformBlocksDirty.reset();
1721     for (gl::ShaderType shaderType : executable.getLinkedShaderStages())
1722     {
1723         if (!mDefaultUniformBlocks[shaderType]->uniformData.empty())
1724         {
1725             mDefaultUniformBlocksDirty.set(shaderType);
1726         }
1727     }
1728 }
1729 
updateUniforms(vk::Context * context,UpdateDescriptorSetsBuilder * updateBuilder,vk::CommandBufferHelperCommon * commandBufferHelper,vk::BufferHelper * emptyBuffer,const gl::ProgramExecutable & glExecutable,vk::DynamicBuffer * defaultUniformStorage,bool isTransformFeedbackActiveUnpaused,TransformFeedbackVk * transformFeedbackVk)1730 angle::Result ProgramExecutableVk::updateUniforms(
1731     vk::Context *context,
1732     UpdateDescriptorSetsBuilder *updateBuilder,
1733     vk::CommandBufferHelperCommon *commandBufferHelper,
1734     vk::BufferHelper *emptyBuffer,
1735     const gl::ProgramExecutable &glExecutable,
1736     vk::DynamicBuffer *defaultUniformStorage,
1737     bool isTransformFeedbackActiveUnpaused,
1738     TransformFeedbackVk *transformFeedbackVk)
1739 {
1740     ASSERT(hasDirtyUniforms());
1741 
1742     vk::BufferHelper *defaultUniformBuffer;
1743     bool anyNewBufferAllocated          = false;
1744     gl::ShaderMap<VkDeviceSize> offsets = {};  // offset to the beginning of bufferData
1745     uint32_t offsetIndex                = 0;
1746     size_t requiredSpace;
1747 
1748     // We usually only update uniform data for shader stages that are actually dirty. But when the
1749     // buffer for uniform data have switched, because all shader stages are using the same buffer,
1750     // we then must update uniform data for all shader stages to keep all shader stages' uniform
1751     // data in the same buffer.
1752     requiredSpace = calcUniformUpdateRequiredSpace(context, glExecutable, &offsets);
1753     ASSERT(requiredSpace > 0);
1754 
1755     // Allocate space from dynamicBuffer. Always try to allocate from the current buffer first.
1756     // If that failed, we deal with fall out and try again.
1757     if (!defaultUniformStorage->allocateFromCurrentBuffer(requiredSpace, &defaultUniformBuffer))
1758     {
1759         setAllDefaultUniformsDirty(glExecutable);
1760 
1761         requiredSpace = calcUniformUpdateRequiredSpace(context, glExecutable, &offsets);
1762         ANGLE_TRY(defaultUniformStorage->allocate(context, requiredSpace, &defaultUniformBuffer,
1763                                                   &anyNewBufferAllocated));
1764     }
1765 
1766     ASSERT(defaultUniformBuffer);
1767 
1768     uint8_t *bufferData       = defaultUniformBuffer->getMappedMemory();
1769     VkDeviceSize bufferOffset = defaultUniformBuffer->getOffset();
1770     for (gl::ShaderType shaderType : glExecutable.getLinkedShaderStages())
1771     {
1772         if (mDefaultUniformBlocksDirty[shaderType])
1773         {
1774             const angle::MemoryBuffer &uniformData = mDefaultUniformBlocks[shaderType]->uniformData;
1775             memcpy(&bufferData[offsets[shaderType]], uniformData.data(), uniformData.size());
1776             mDynamicUniformDescriptorOffsets[offsetIndex] =
1777                 static_cast<uint32_t>(bufferOffset + offsets[shaderType]);
1778             mDefaultUniformBlocksDirty.reset(shaderType);
1779         }
1780         ++offsetIndex;
1781     }
1782     ANGLE_TRY(defaultUniformBuffer->flush(context->getRenderer()));
1783 
1784     // Because the uniform buffers are per context, we can't rely on dynamicBuffer's allocate
1785     // function to tell us if you have got a new buffer or not. Other program's use of the buffer
1786     // might already pushed dynamicBuffer to a new buffer. We record which buffer (represented by
1787     // the unique BufferSerial number) we were using with the current descriptor set and then we
1788     // use that recorded BufferSerial compare to the current uniform buffer to quickly detect if
1789     // there is a buffer switch or not. We need to retrieve from the descriptor set cache or
1790     // allocate a new descriptor set whenever there is uniform buffer switch.
1791     if (mCurrentDefaultUniformBufferSerial != defaultUniformBuffer->getBufferSerial())
1792     {
1793         // We need to reinitialize the descriptor sets if we newly allocated buffers since we can't
1794         // modify the descriptor sets once initialized.
1795         const vk::WriteDescriptorDescs &writeDescriptorDescs =
1796             getDefaultUniformWriteDescriptorDescs(transformFeedbackVk);
1797 
1798         vk::DescriptorSetDescBuilder uniformsAndXfbDesc(
1799             writeDescriptorDescs.getTotalDescriptorCount());
1800         uniformsAndXfbDesc.updateUniformsAndXfb(
1801             context, glExecutable, *this, writeDescriptorDescs, defaultUniformBuffer, *emptyBuffer,
1802             isTransformFeedbackActiveUnpaused,
1803             glExecutable.hasTransformFeedbackOutput() ? transformFeedbackVk : nullptr);
1804 
1805         vk::SharedDescriptorSetCacheKey newSharedCacheKey;
1806         ANGLE_TRY(updateUniformsAndXfbDescriptorSet(context, updateBuilder, writeDescriptorDescs,
1807                                                     commandBufferHelper, defaultUniformBuffer,
1808                                                     &uniformsAndXfbDesc, &newSharedCacheKey));
1809         if (newSharedCacheKey)
1810         {
1811             defaultUniformBuffer->getBufferBlock()->onNewDescriptorSet(newSharedCacheKey);
1812             if (glExecutable.hasTransformFeedbackOutput() &&
1813                 context->getFeatures().emulateTransformFeedback.enabled)
1814             {
1815                 transformFeedbackVk->onNewDescriptorSet(glExecutable, newSharedCacheKey);
1816             }
1817         }
1818     }
1819 
1820     return angle::Result::Continue;
1821 }
1822 
calcUniformUpdateRequiredSpace(vk::Context * context,const gl::ProgramExecutable & glExecutable,gl::ShaderMap<VkDeviceSize> * uniformOffsets) const1823 size_t ProgramExecutableVk::calcUniformUpdateRequiredSpace(
1824     vk::Context *context,
1825     const gl::ProgramExecutable &glExecutable,
1826     gl::ShaderMap<VkDeviceSize> *uniformOffsets) const
1827 {
1828     size_t requiredSpace = 0;
1829     for (gl::ShaderType shaderType : glExecutable.getLinkedShaderStages())
1830     {
1831         if (mDefaultUniformBlocksDirty[shaderType])
1832         {
1833             (*uniformOffsets)[shaderType] = requiredSpace;
1834             requiredSpace += getDefaultUniformAlignedSize(context, shaderType);
1835         }
1836     }
1837     return requiredSpace;
1838 }
1839 
onProgramBind(const gl::ProgramExecutable & glExecutable)1840 void ProgramExecutableVk::onProgramBind(const gl::ProgramExecutable &glExecutable)
1841 {
1842     // Because all programs share default uniform buffers, when we switch programs, we have to
1843     // re-update all uniform data. We could do more tracking to avoid update if the context's
1844     // current uniform buffer is still the same buffer we last time used and buffer has not been
1845     // recycled. But statistics gathered on gfxbench shows that app always update uniform data on
1846     // program bind anyway, so not really worth it to add more tracking logic here.
1847     setAllDefaultUniformsDirty(glExecutable);
1848 }
1849 
resizeUniformBlockMemory(ContextVk * contextVk,const gl::ProgramExecutable & glExecutable,const gl::ShaderMap<size_t> & requiredBufferSize)1850 angle::Result ProgramExecutableVk::resizeUniformBlockMemory(
1851     ContextVk *contextVk,
1852     const gl::ProgramExecutable &glExecutable,
1853     const gl::ShaderMap<size_t> &requiredBufferSize)
1854 {
1855     for (gl::ShaderType shaderType : glExecutable.getLinkedShaderStages())
1856     {
1857         if (requiredBufferSize[shaderType] > 0)
1858         {
1859             if (!mDefaultUniformBlocks[shaderType]->uniformData.resize(
1860                     requiredBufferSize[shaderType]))
1861             {
1862                 ANGLE_VK_CHECK(contextVk, false, VK_ERROR_OUT_OF_HOST_MEMORY);
1863             }
1864 
1865             // Initialize uniform buffer memory to zero by default.
1866             mDefaultUniformBlocks[shaderType]->uniformData.fill(0);
1867             mDefaultUniformBlocksDirty.set(shaderType);
1868         }
1869     }
1870 
1871     return angle::Result::Continue;
1872 }
1873 }  // namespace rx
1874