• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ProgramVk.cpp:
7 //    Implements the class methods for ProgramVk.
8 //
9 
10 #include "libANGLE/renderer/vulkan/ProgramVk.h"
11 
12 #include "common/debug.h"
13 #include "common/utilities.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/ProgramLinkedResources.h"
16 #include "libANGLE/renderer/renderer_utils.h"
17 #include "libANGLE/renderer/vulkan/BufferVk.h"
18 #include "libANGLE/renderer/vulkan/TextureVk.h"
19 
20 namespace rx
21 {
22 
23 namespace
24 {
25 // Identical to Std140 encoder in all aspects, except it ignores opaque uniform types.
26 class VulkanDefaultBlockEncoder : public sh::Std140BlockEncoder
27 {
28   public:
advanceOffset(GLenum type,const std::vector<unsigned int> & arraySizes,bool isRowMajorMatrix,int arrayStride,int matrixStride)29     void advanceOffset(GLenum type,
30                        const std::vector<unsigned int> &arraySizes,
31                        bool isRowMajorMatrix,
32                        int arrayStride,
33                        int matrixStride) override
34     {
35         if (gl::IsOpaqueType(type))
36         {
37             return;
38         }
39 
40         sh::Std140BlockEncoder::advanceOffset(type, arraySizes, isRowMajorMatrix, arrayStride,
41                                               matrixStride);
42     }
43 };
44 
45 class Std140BlockLayoutEncoderFactory : public gl::CustomBlockLayoutEncoderFactory
46 {
47   public:
makeEncoder()48     sh::BlockLayoutEncoder *makeEncoder() override { return new sh::Std140BlockEncoder(); }
49 };
50 
51 class LinkTaskVk final : public vk::Context, public LinkTask
52 {
53   public:
LinkTaskVk(vk::Renderer * renderer,PipelineLayoutCache & pipelineLayoutCache,DescriptorSetLayoutCache & descriptorSetLayoutCache,const gl::ProgramState & state,bool isGLES1,vk::PipelineRobustness pipelineRobustness,vk::PipelineProtectedAccess pipelineProtectedAccess)54     LinkTaskVk(vk::Renderer *renderer,
55                PipelineLayoutCache &pipelineLayoutCache,
56                DescriptorSetLayoutCache &descriptorSetLayoutCache,
57                const gl::ProgramState &state,
58                bool isGLES1,
59                vk::PipelineRobustness pipelineRobustness,
60                vk::PipelineProtectedAccess pipelineProtectedAccess)
61         : vk::Context(renderer),
62           mState(state),
63           mExecutable(&mState.getExecutable()),
64           mIsGLES1(isGLES1),
65           mPipelineRobustness(pipelineRobustness),
66           mPipelineProtectedAccess(pipelineProtectedAccess),
67           mPipelineLayoutCache(pipelineLayoutCache),
68           mDescriptorSetLayoutCache(descriptorSetLayoutCache)
69     {}
70     ~LinkTaskVk() override = default;
71 
link(const gl::ProgramLinkedResources & resources,const gl::ProgramMergedVaryings & mergedVaryings,std::vector<std::shared_ptr<LinkSubTask>> * linkSubTasksOut,std::vector<std::shared_ptr<LinkSubTask>> * postLinkSubTasksOut)72     void link(const gl::ProgramLinkedResources &resources,
73               const gl::ProgramMergedVaryings &mergedVaryings,
74               std::vector<std::shared_ptr<LinkSubTask>> *linkSubTasksOut,
75               std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut) override
76     {
77         ASSERT(linkSubTasksOut && linkSubTasksOut->empty());
78         ASSERT(postLinkSubTasksOut && postLinkSubTasksOut->empty());
79 
80         // In the Vulkan backend, the only subtasks are pipeline warm up, which is not required for
81         // link.  Running as a post-link task, the expensive warm up is run in a thread without
82         // holding up the link results.
83         angle::Result result = linkImpl(resources, mergedVaryings, postLinkSubTasksOut);
84         ASSERT((result == angle::Result::Continue) == (mErrorCode == VK_SUCCESS));
85     }
86 
handleError(VkResult result,const char * file,const char * function,unsigned int line)87     void handleError(VkResult result,
88                      const char *file,
89                      const char *function,
90                      unsigned int line) override
91     {
92         mErrorCode     = result;
93         mErrorFile     = file;
94         mErrorFunction = function;
95         mErrorLine     = line;
96     }
97 
getResult(const gl::Context * context,gl::InfoLog & infoLog)98     angle::Result getResult(const gl::Context *context, gl::InfoLog &infoLog) override
99     {
100         ContextVk *contextVk              = vk::GetImpl(context);
101         ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
102 
103         ANGLE_TRY(executableVk->initializeDescriptorPools(contextVk,
104                                                           &contextVk->getDescriptorSetLayoutCache(),
105                                                           &contextVk->getMetaDescriptorPools()));
106 
107         // If the program uses framebuffer fetch and this is the first time this happens, switch the
108         // context to "framebuffer fetch mode".  In this mode, all render passes assume framebuffer
109         // fetch may be used, so they are prepared to accept a program that uses input attachments.
110         // This is done only when a program with framebuffer fetch is created to avoid potential
111         // performance impact on applications that don't use this extension.  If other contexts in
112         // the share group use this program, they will lazily switch to this mode.
113         //
114         // This is purely an optimization (to avoid creating and later releasing) non-framebuffer
115         // fetch render passes.
116         if (contextVk->getFeatures().permanentlySwitchToFramebufferFetchMode.enabled &&
117             mExecutable->usesFramebufferFetch())
118         {
119             ANGLE_TRY(contextVk->switchToFramebufferFetchMode(true));
120         }
121 
122         // Forward any errors
123         if (mErrorCode != VK_SUCCESS)
124         {
125             contextVk->handleError(mErrorCode, mErrorFile, mErrorFunction, mErrorLine);
126             return angle::Result::Stop;
127         }
128 
129         // Accumulate relevant perf counters
130         const angle::VulkanPerfCounters &from = getPerfCounters();
131         angle::VulkanPerfCounters &to         = contextVk->getPerfCounters();
132 
133         to.pipelineCreationCacheHits += from.pipelineCreationCacheHits;
134         to.pipelineCreationCacheMisses += from.pipelineCreationCacheMisses;
135         to.pipelineCreationTotalCacheHitsDurationNs +=
136             from.pipelineCreationTotalCacheHitsDurationNs;
137         to.pipelineCreationTotalCacheMissesDurationNs +=
138             from.pipelineCreationTotalCacheMissesDurationNs;
139 
140         return angle::Result::Continue;
141     }
142 
143   private:
144     angle::Result linkImpl(const gl::ProgramLinkedResources &resources,
145                            const gl::ProgramMergedVaryings &mergedVaryings,
146                            std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut);
147 
148     void linkResources(const gl::ProgramLinkedResources &resources);
149     angle::Result initDefaultUniformBlocks();
150     void generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut,
151                                       gl::ShaderMap<size_t> *requiredBufferSizeOut);
152     void initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut);
153 
154     // The front-end ensures that the program is not accessed while linking, so it is safe to
155     // direclty access the state from a potentially parallel job.
156     const gl::ProgramState &mState;
157     const gl::ProgramExecutable *mExecutable;
158     const bool mIsGLES1;
159     const vk::PipelineRobustness mPipelineRobustness;
160     const vk::PipelineProtectedAccess mPipelineProtectedAccess;
161 
162     // Helpers that are interally thread-safe
163     PipelineLayoutCache &mPipelineLayoutCache;
164     DescriptorSetLayoutCache &mDescriptorSetLayoutCache;
165 
166     // Error handling
167     VkResult mErrorCode        = VK_SUCCESS;
168     const char *mErrorFile     = nullptr;
169     const char *mErrorFunction = nullptr;
170     unsigned int mErrorLine    = 0;
171 };
172 
linkImpl(const gl::ProgramLinkedResources & resources,const gl::ProgramMergedVaryings & mergedVaryings,std::vector<std::shared_ptr<LinkSubTask>> * postLinkSubTasksOut)173 angle::Result LinkTaskVk::linkImpl(const gl::ProgramLinkedResources &resources,
174                                    const gl::ProgramMergedVaryings &mergedVaryings,
175                                    std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut)
176 {
177     ANGLE_TRACE_EVENT0("gpu.angle", "LinkTaskVk::linkImpl");
178     ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
179 
180     // Link resources before calling GetShaderSource to make sure they are ready for the set/binding
181     // assignment done in that function.
182     linkResources(resources);
183 
184     executableVk->clearVariableInfoMap();
185 
186     // Gather variable info and compiled SPIR-V binaries.
187     executableVk->assignAllSpvLocations(this, mState, resources);
188 
189     gl::ShaderMap<const angle::spirv::Blob *> spirvBlobs;
190     SpvGetShaderSpirvCode(mState, &spirvBlobs);
191 
192     if (getFeatures().varyingsRequireMatchingPrecisionInSpirv.enabled &&
193         getFeatures().enablePrecisionQualifiers.enabled)
194     {
195         executableVk->resolvePrecisionMismatch(mergedVaryings);
196     }
197 
198     // Compile the shaders.
199     ANGLE_TRY(executableVk->initShaders(this, mExecutable->getLinkedShaderStages(), spirvBlobs,
200                                         mIsGLES1));
201 
202     ANGLE_TRY(initDefaultUniformBlocks());
203 
204     ANGLE_TRY(executableVk->createPipelineLayout(this, &mPipelineLayoutCache,
205                                                  &mDescriptorSetLayoutCache, nullptr));
206 
207     // Warm up the pipeline cache by creating a few placeholder pipelines.  This is not done for
208     // separable programs, and is deferred to when the program pipeline is finalized.
209     //
210     // The cache warm up is skipped for GLES1 for two reasons:
211     //
212     // - Since GLES1 shaders are limited, the individual programs don't necessarily add new
213     //   pipelines, but rather it's draw time state that controls that.  Since the programs are
214     //   generated at draw time, it's just as well to let the pipelines be created using the
215     //   renderer's shared cache.
216     // - Individual GLES1 tests are long, and this adds a considerable overhead to those tests
217     if (!mState.isSeparable() && !mIsGLES1 && getFeatures().warmUpPipelineCacheAtLink.enabled)
218     {
219         // Only build the shaders subset of the pipeline if VK_EXT_graphics_pipeline_library is
220         // supported.
221         const vk::GraphicsPipelineSubset subset =
222             getFeatures().supportsGraphicsPipelineLibrary.enabled
223                 ? vk::GraphicsPipelineSubset::Shaders
224                 : vk::GraphicsPipelineSubset::Complete;
225 
226         ANGLE_TRY(executableVk->getPipelineCacheWarmUpTasks(
227             mRenderer, mPipelineRobustness, mPipelineProtectedAccess, subset, postLinkSubTasksOut));
228     }
229 
230     return angle::Result::Continue;
231 }
232 
linkResources(const gl::ProgramLinkedResources & resources)233 void LinkTaskVk::linkResources(const gl::ProgramLinkedResources &resources)
234 {
235     Std140BlockLayoutEncoderFactory std140EncoderFactory;
236     gl::ProgramLinkedResourcesLinker linker(&std140EncoderFactory);
237 
238     linker.linkResources(mState, resources);
239 }
240 
initDefaultUniformBlocks()241 angle::Result LinkTaskVk::initDefaultUniformBlocks()
242 {
243     ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
244 
245     // Process vertex and fragment uniforms into std140 packing.
246     gl::ShaderMap<sh::BlockLayoutMap> layoutMap;
247     gl::ShaderMap<size_t> requiredBufferSize;
248     requiredBufferSize.fill(0);
249 
250     generateUniformLayoutMapping(&layoutMap, &requiredBufferSize);
251     initDefaultUniformLayoutMapping(&layoutMap);
252 
253     // All uniform initializations are complete, now resize the buffers accordingly and return
254     return executableVk->resizeUniformBlockMemory(this, requiredBufferSize);
255 }
256 
InitDefaultUniformBlock(const std::vector<sh::ShaderVariable> & uniforms,sh::BlockLayoutMap * blockLayoutMapOut,size_t * blockSizeOut)257 void InitDefaultUniformBlock(const std::vector<sh::ShaderVariable> &uniforms,
258                              sh::BlockLayoutMap *blockLayoutMapOut,
259                              size_t *blockSizeOut)
260 {
261     if (uniforms.empty())
262     {
263         *blockSizeOut = 0;
264         return;
265     }
266 
267     VulkanDefaultBlockEncoder blockEncoder;
268     sh::GetActiveUniformBlockInfo(uniforms, "", &blockEncoder, blockLayoutMapOut);
269 
270     size_t blockSize = blockEncoder.getCurrentOffset();
271 
272     // TODO(jmadill): I think we still need a valid block for the pipeline even if zero sized.
273     if (blockSize == 0)
274     {
275         *blockSizeOut = 0;
276         return;
277     }
278 
279     *blockSizeOut = blockSize;
280     return;
281 }
282 
generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> * layoutMapOut,gl::ShaderMap<size_t> * requiredBufferSizeOut)283 void LinkTaskVk::generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut,
284                                               gl::ShaderMap<size_t> *requiredBufferSizeOut)
285 {
286     for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
287     {
288         const gl::SharedCompiledShaderState &shader = mState.getAttachedShader(shaderType);
289 
290         if (shader)
291         {
292             const std::vector<sh::ShaderVariable> &uniforms = shader->uniforms;
293             InitDefaultUniformBlock(uniforms, &(*layoutMapOut)[shaderType],
294                                     &(*requiredBufferSizeOut)[shaderType]);
295         }
296     }
297 }
298 
initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> * layoutMapOut)299 void LinkTaskVk::initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut)
300 {
301     // Init the default block layout info.
302     ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
303     const auto &uniforms              = mExecutable->getUniforms();
304 
305     for (const gl::VariableLocation &location : mExecutable->getUniformLocations())
306     {
307         gl::ShaderMap<sh::BlockMemberInfo> layoutInfo;
308 
309         if (location.used() && !location.ignored)
310         {
311             const auto &uniform = uniforms[location.index];
312             if (uniform.isInDefaultBlock() && !uniform.isSampler() && !uniform.isImage() &&
313                 !uniform.isFragmentInOut())
314             {
315                 std::string uniformName = mExecutable->getUniformNameByIndex(location.index);
316                 if (uniform.isArray())
317                 {
318                     // Gets the uniform name without the [0] at the end.
319                     uniformName = gl::StripLastArrayIndex(uniformName);
320                     ASSERT(uniformName.size() !=
321                            mExecutable->getUniformNameByIndex(location.index).size());
322                 }
323 
324                 bool found = false;
325 
326                 for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
327                 {
328                     auto it = (*layoutMapOut)[shaderType].find(uniformName);
329                     if (it != (*layoutMapOut)[shaderType].end())
330                     {
331                         found                  = true;
332                         layoutInfo[shaderType] = it->second;
333                     }
334                 }
335 
336                 ASSERT(found);
337             }
338         }
339 
340         for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
341         {
342             executableVk->getSharedDefaultUniformBlock(shaderType)
343                 ->uniformLayout.push_back(layoutInfo[shaderType]);
344         }
345     }
346 }
347 }  // anonymous namespace
348 
349 // ProgramVk implementation.
ProgramVk(const gl::ProgramState & state)350 ProgramVk::ProgramVk(const gl::ProgramState &state) : ProgramImpl(state) {}
351 
352 ProgramVk::~ProgramVk() = default;
353 
destroy(const gl::Context * context)354 void ProgramVk::destroy(const gl::Context *context)
355 {
356     ContextVk *contextVk = vk::GetImpl(context);
357     getExecutable()->reset(contextVk);
358 }
359 
load(const gl::Context * context,gl::BinaryInputStream * stream,std::shared_ptr<LinkTask> * loadTaskOut,egl::CacheGetResult * resultOut)360 angle::Result ProgramVk::load(const gl::Context *context,
361                               gl::BinaryInputStream *stream,
362                               std::shared_ptr<LinkTask> *loadTaskOut,
363                               egl::CacheGetResult *resultOut)
364 {
365     ContextVk *contextVk = vk::GetImpl(context);
366 
367     // TODO: parallelize program load.  http://anglebug.com/8297
368     *loadTaskOut = {};
369 
370     return getExecutable()->load(contextVk, mState.isSeparable(), stream, resultOut);
371 }
372 
save(const gl::Context * context,gl::BinaryOutputStream * stream)373 void ProgramVk::save(const gl::Context *context, gl::BinaryOutputStream *stream)
374 {
375     ContextVk *contextVk = vk::GetImpl(context);
376     getExecutable()->save(contextVk, mState.isSeparable(), stream);
377 }
378 
setBinaryRetrievableHint(bool retrievable)379 void ProgramVk::setBinaryRetrievableHint(bool retrievable)
380 {
381     // Nothing to do here yet.
382 }
383 
setSeparable(bool separable)384 void ProgramVk::setSeparable(bool separable)
385 {
386     // Nothing to do here yet.
387 }
388 
link(const gl::Context * context,std::shared_ptr<LinkTask> * linkTaskOut)389 angle::Result ProgramVk::link(const gl::Context *context, std::shared_ptr<LinkTask> *linkTaskOut)
390 {
391     ContextVk *contextVk = vk::GetImpl(context);
392 
393     *linkTaskOut = std::shared_ptr<LinkTask>(new LinkTaskVk(
394         contextVk->getRenderer(), contextVk->getPipelineLayoutCache(),
395         contextVk->getDescriptorSetLayoutCache(), mState, context->getState().isGLES1(),
396         contextVk->pipelineRobustness(), contextVk->pipelineProtectedAccess()));
397 
398     return angle::Result::Continue;
399 }
400 
validate(const gl::Caps & caps)401 GLboolean ProgramVk::validate(const gl::Caps &caps)
402 {
403     // No-op. The spec is very vague about the behavior of validation.
404     return GL_TRUE;
405 }
406 
407 }  // namespace rx
408