1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ProgramVk.cpp:
7 // Implements the class methods for ProgramVk.
8 //
9
10 #include "libANGLE/renderer/vulkan/ProgramVk.h"
11
12 #include "common/debug.h"
13 #include "common/utilities.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/ProgramLinkedResources.h"
16 #include "libANGLE/renderer/renderer_utils.h"
17 #include "libANGLE/renderer/vulkan/BufferVk.h"
18 #include "libANGLE/renderer/vulkan/TextureVk.h"
19
20 namespace rx
21 {
22
23 namespace
24 {
25 // Identical to Std140 encoder in all aspects, except it ignores opaque uniform types.
26 class VulkanDefaultBlockEncoder : public sh::Std140BlockEncoder
27 {
28 public:
advanceOffset(GLenum type,const std::vector<unsigned int> & arraySizes,bool isRowMajorMatrix,int arrayStride,int matrixStride)29 void advanceOffset(GLenum type,
30 const std::vector<unsigned int> &arraySizes,
31 bool isRowMajorMatrix,
32 int arrayStride,
33 int matrixStride) override
34 {
35 if (gl::IsOpaqueType(type))
36 {
37 return;
38 }
39
40 sh::Std140BlockEncoder::advanceOffset(type, arraySizes, isRowMajorMatrix, arrayStride,
41 matrixStride);
42 }
43 };
44
45 class Std140BlockLayoutEncoderFactory : public gl::CustomBlockLayoutEncoderFactory
46 {
47 public:
makeEncoder()48 sh::BlockLayoutEncoder *makeEncoder() override { return new sh::Std140BlockEncoder(); }
49 };
50
51 class LinkTaskVk final : public vk::ErrorContext, public LinkTask
52 {
53 public:
LinkTaskVk(vk::Renderer * renderer,PipelineLayoutCache & pipelineLayoutCache,DescriptorSetLayoutCache & descriptorSetLayoutCache,const gl::ProgramState & state,bool isGLES1,vk::PipelineRobustness pipelineRobustness,vk::PipelineProtectedAccess pipelineProtectedAccess)54 LinkTaskVk(vk::Renderer *renderer,
55 PipelineLayoutCache &pipelineLayoutCache,
56 DescriptorSetLayoutCache &descriptorSetLayoutCache,
57 const gl::ProgramState &state,
58 bool isGLES1,
59 vk::PipelineRobustness pipelineRobustness,
60 vk::PipelineProtectedAccess pipelineProtectedAccess)
61 : vk::ErrorContext(renderer),
62 mState(state),
63 mExecutable(&mState.getExecutable()),
64 mIsGLES1(isGLES1),
65 mPipelineRobustness(pipelineRobustness),
66 mPipelineProtectedAccess(pipelineProtectedAccess),
67 mPipelineLayoutCache(pipelineLayoutCache),
68 mDescriptorSetLayoutCache(descriptorSetLayoutCache)
69 {}
70 ~LinkTaskVk() override = default;
71
link(const gl::ProgramLinkedResources & resources,const gl::ProgramMergedVaryings & mergedVaryings,std::vector<std::shared_ptr<LinkSubTask>> * linkSubTasksOut,std::vector<std::shared_ptr<LinkSubTask>> * postLinkSubTasksOut)72 void link(const gl::ProgramLinkedResources &resources,
73 const gl::ProgramMergedVaryings &mergedVaryings,
74 std::vector<std::shared_ptr<LinkSubTask>> *linkSubTasksOut,
75 std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut) override
76 {
77 ASSERT(linkSubTasksOut && linkSubTasksOut->empty());
78 ASSERT(postLinkSubTasksOut && postLinkSubTasksOut->empty());
79
80 // In the Vulkan backend, the only subtasks are pipeline warm up, which is not required for
81 // link. Running as a post-link task, the expensive warm up is run in a thread without
82 // holding up the link results.
83 angle::Result result = linkImpl(resources, mergedVaryings, postLinkSubTasksOut);
84 ASSERT((result == angle::Result::Continue) == (mErrorCode == VK_SUCCESS));
85 }
86
handleError(VkResult result,const char * file,const char * function,unsigned int line)87 void handleError(VkResult result,
88 const char *file,
89 const char *function,
90 unsigned int line) override
91 {
92 mErrorCode = result;
93 mErrorFile = file;
94 mErrorFunction = function;
95 mErrorLine = line;
96 }
97
getResult(const gl::Context * context,gl::InfoLog & infoLog)98 angle::Result getResult(const gl::Context *context, gl::InfoLog &infoLog) override
99 {
100 ContextVk *contextVk = vk::GetImpl(context);
101 ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
102
103 ANGLE_TRY(executableVk->initializeDescriptorPools(contextVk,
104 &contextVk->getDescriptorSetLayoutCache(),
105 &contextVk->getMetaDescriptorPools()));
106
107 // If the program uses framebuffer fetch and this is the first time this happens, switch the
108 // context to "framebuffer fetch mode". In this mode, all render passes assume framebuffer
109 // fetch may be used, so they are prepared to accept a program that uses input attachments.
110 // This is done only when a program with framebuffer fetch is created to avoid potential
111 // performance impact on applications that don't use this extension. If other contexts in
112 // the share group use this program, they will lazily switch to this mode.
113 //
114 // This is purely an optimization (to avoid creating and later releasing) non-framebuffer
115 // fetch render passes. The optimization is unnecessary for and does not apply to dynamic
116 // rendering.
117 if (!contextVk->getFeatures().preferDynamicRendering.enabled &&
118 contextVk->getFeatures().permanentlySwitchToFramebufferFetchMode.enabled &&
119 mExecutable->usesColorFramebufferFetch())
120 {
121 ANGLE_TRY(contextVk->switchToColorFramebufferFetchMode(true));
122 }
123
124 // Forward any errors
125 if (mErrorCode != VK_SUCCESS)
126 {
127 contextVk->handleError(mErrorCode, mErrorFile, mErrorFunction, mErrorLine);
128 return angle::Result::Stop;
129 }
130
131 return angle::Result::Continue;
132 }
133
134 private:
135 angle::Result linkImpl(const gl::ProgramLinkedResources &resources,
136 const gl::ProgramMergedVaryings &mergedVaryings,
137 std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut);
138
139 void linkResources(const gl::ProgramLinkedResources &resources);
140 angle::Result initDefaultUniformBlocks();
141 void generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut,
142 gl::ShaderMap<size_t> *requiredBufferSizeOut);
143 void initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut);
144
145 // The front-end ensures that the program is not accessed while linking, so it is safe to
146 // direclty access the state from a potentially parallel job.
147 const gl::ProgramState &mState;
148 const gl::ProgramExecutable *mExecutable;
149 const bool mIsGLES1;
150 const vk::PipelineRobustness mPipelineRobustness;
151 const vk::PipelineProtectedAccess mPipelineProtectedAccess;
152
153 // Helpers that are interally thread-safe
154 PipelineLayoutCache &mPipelineLayoutCache;
155 DescriptorSetLayoutCache &mDescriptorSetLayoutCache;
156
157 // Error handling
158 VkResult mErrorCode = VK_SUCCESS;
159 const char *mErrorFile = nullptr;
160 const char *mErrorFunction = nullptr;
161 unsigned int mErrorLine = 0;
162 };
163
linkImpl(const gl::ProgramLinkedResources & resources,const gl::ProgramMergedVaryings & mergedVaryings,std::vector<std::shared_ptr<LinkSubTask>> * postLinkSubTasksOut)164 angle::Result LinkTaskVk::linkImpl(const gl::ProgramLinkedResources &resources,
165 const gl::ProgramMergedVaryings &mergedVaryings,
166 std::vector<std::shared_ptr<LinkSubTask>> *postLinkSubTasksOut)
167 {
168 ANGLE_TRACE_EVENT0("gpu.angle", "LinkTaskVk::linkImpl");
169 ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
170
171 // Link resources before calling GetShaderSource to make sure they are ready for the set/binding
172 // assignment done in that function.
173 linkResources(resources);
174
175 executableVk->clearVariableInfoMap();
176
177 // Gather variable info and compiled SPIR-V binaries.
178 executableVk->assignAllSpvLocations(this, mState, resources);
179
180 gl::ShaderMap<const angle::spirv::Blob *> spirvBlobs;
181 SpvGetShaderSpirvCode(mState, &spirvBlobs);
182
183 if (getFeatures().varyingsRequireMatchingPrecisionInSpirv.enabled &&
184 getFeatures().enablePrecisionQualifiers.enabled)
185 {
186 executableVk->resolvePrecisionMismatch(mergedVaryings);
187 }
188
189 // Compile the shaders.
190 ANGLE_TRY(executableVk->initShaders(this, mExecutable->getLinkedShaderStages(), spirvBlobs,
191 mIsGLES1));
192
193 ANGLE_TRY(initDefaultUniformBlocks());
194
195 ANGLE_TRY(executableVk->createPipelineLayout(this, &mPipelineLayoutCache,
196 &mDescriptorSetLayoutCache, nullptr));
197
198 // Warm up the pipeline cache by creating a few placeholder pipelines. This is not done for
199 // separable programs, and is deferred to when the program pipeline is finalized.
200 //
201 // The cache warm up is skipped for GLES1 for two reasons:
202 //
203 // - Since GLES1 shaders are limited, the individual programs don't necessarily add new
204 // pipelines, but rather it's draw time state that controls that. Since the programs are
205 // generated at draw time, it's just as well to let the pipelines be created using the
206 // renderer's shared cache.
207 // - Individual GLES1 tests are long, and this adds a considerable overhead to those tests
208 if (!mState.isSeparable() && !mIsGLES1 && getFeatures().warmUpPipelineCacheAtLink.enabled)
209 {
210 ANGLE_TRY(executableVk->getPipelineCacheWarmUpTasks(
211 mRenderer, mPipelineRobustness, mPipelineProtectedAccess, postLinkSubTasksOut));
212 }
213
214 return angle::Result::Continue;
215 }
216
linkResources(const gl::ProgramLinkedResources & resources)217 void LinkTaskVk::linkResources(const gl::ProgramLinkedResources &resources)
218 {
219 Std140BlockLayoutEncoderFactory std140EncoderFactory;
220 gl::ProgramLinkedResourcesLinker linker(&std140EncoderFactory);
221
222 linker.linkResources(mState, resources);
223 }
224
initDefaultUniformBlocks()225 angle::Result LinkTaskVk::initDefaultUniformBlocks()
226 {
227 ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
228
229 // Process vertex and fragment uniforms into std140 packing.
230 gl::ShaderMap<sh::BlockLayoutMap> layoutMap;
231 gl::ShaderMap<size_t> requiredBufferSize;
232 requiredBufferSize.fill(0);
233
234 generateUniformLayoutMapping(&layoutMap, &requiredBufferSize);
235 initDefaultUniformLayoutMapping(&layoutMap);
236
237 // All uniform initializations are complete, now resize the buffers accordingly and return
238 return executableVk->resizeUniformBlockMemory(this, requiredBufferSize);
239 }
240
InitDefaultUniformBlock(const std::vector<sh::ShaderVariable> & uniforms,sh::BlockLayoutMap * blockLayoutMapOut,size_t * blockSizeOut)241 void InitDefaultUniformBlock(const std::vector<sh::ShaderVariable> &uniforms,
242 sh::BlockLayoutMap *blockLayoutMapOut,
243 size_t *blockSizeOut)
244 {
245 if (uniforms.empty())
246 {
247 *blockSizeOut = 0;
248 return;
249 }
250
251 VulkanDefaultBlockEncoder blockEncoder;
252 sh::GetActiveUniformBlockInfo(uniforms, "", &blockEncoder, blockLayoutMapOut);
253
254 *blockSizeOut = blockEncoder.getCurrentOffset();
255 return;
256 }
257
generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> * layoutMapOut,gl::ShaderMap<size_t> * requiredBufferSizeOut)258 void LinkTaskVk::generateUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut,
259 gl::ShaderMap<size_t> *requiredBufferSizeOut)
260 {
261 for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
262 {
263 const gl::SharedCompiledShaderState &shader = mState.getAttachedShader(shaderType);
264
265 if (shader)
266 {
267 const std::vector<sh::ShaderVariable> &uniforms = shader->uniforms;
268 InitDefaultUniformBlock(uniforms, &(*layoutMapOut)[shaderType],
269 &(*requiredBufferSizeOut)[shaderType]);
270 }
271 }
272 }
273
initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> * layoutMapOut)274 void LinkTaskVk::initDefaultUniformLayoutMapping(gl::ShaderMap<sh::BlockLayoutMap> *layoutMapOut)
275 {
276 // Init the default block layout info.
277 ProgramExecutableVk *executableVk = vk::GetImpl(mExecutable);
278 const auto &uniforms = mExecutable->getUniforms();
279
280 // Reserve enough storage for the layoutInfo.
281 for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
282 {
283 executableVk->getSharedDefaultUniformBlock(shaderType)
284 ->uniformLayout.reserve(mExecutable->getUniformLocations().size());
285 }
286
287 for (const gl::VariableLocation &location : mExecutable->getUniformLocations())
288 {
289 gl::ShaderMap<sh::BlockMemberInfo> layoutInfo;
290
291 if (location.used() && !location.ignored)
292 {
293 const auto &uniform = uniforms[location.index];
294 if (uniform.isInDefaultBlock() && !uniform.isSampler() && !uniform.isImage() &&
295 !uniform.isFragmentInOut())
296 {
297 std::string uniformName = mExecutable->getUniformNameByIndex(location.index);
298 if (uniform.isArray())
299 {
300 // Gets the uniform name without the [0] at the end.
301 uniformName = gl::StripLastArrayIndex(uniformName);
302 ASSERT(uniformName.size() !=
303 mExecutable->getUniformNameByIndex(location.index).size());
304 }
305
306 bool found = false;
307
308 for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
309 {
310 auto it = (*layoutMapOut)[shaderType].find(uniformName);
311 if (it != (*layoutMapOut)[shaderType].end())
312 {
313 found = true;
314 layoutInfo[shaderType] = it->second;
315 }
316 }
317 ASSERT(found);
318 }
319 }
320
321 for (const gl::ShaderType shaderType : mExecutable->getLinkedShaderStages())
322 {
323 executableVk->getSharedDefaultUniformBlock(shaderType)
324 ->uniformLayout.push_back(layoutInfo[shaderType]);
325 }
326 }
327 }
328 } // anonymous namespace
329
330 // ProgramVk implementation.
ProgramVk(const gl::ProgramState & state)331 ProgramVk::ProgramVk(const gl::ProgramState &state) : ProgramImpl(state) {}
332
333 ProgramVk::~ProgramVk() = default;
334
destroy(const gl::Context * context)335 void ProgramVk::destroy(const gl::Context *context)
336 {
337 ContextVk *contextVk = vk::GetImpl(context);
338 getExecutable()->reset(contextVk);
339 }
340
load(const gl::Context * context,gl::BinaryInputStream * stream,std::shared_ptr<LinkTask> * loadTaskOut,egl::CacheGetResult * resultOut)341 angle::Result ProgramVk::load(const gl::Context *context,
342 gl::BinaryInputStream *stream,
343 std::shared_ptr<LinkTask> *loadTaskOut,
344 egl::CacheGetResult *resultOut)
345 {
346 ContextVk *contextVk = vk::GetImpl(context);
347
348 // TODO: parallelize program load. http://anglebug.com/41488637
349 *loadTaskOut = {};
350
351 return getExecutable()->load(contextVk, mState.isSeparable(), stream, resultOut);
352 }
353
save(const gl::Context * context,gl::BinaryOutputStream * stream)354 void ProgramVk::save(const gl::Context *context, gl::BinaryOutputStream *stream)
355 {
356 ContextVk *contextVk = vk::GetImpl(context);
357 getExecutable()->save(contextVk, mState.isSeparable(), stream);
358 }
359
setBinaryRetrievableHint(bool retrievable)360 void ProgramVk::setBinaryRetrievableHint(bool retrievable)
361 {
362 // Nothing to do here yet.
363 }
364
setSeparable(bool separable)365 void ProgramVk::setSeparable(bool separable)
366 {
367 // Nothing to do here yet.
368 }
369
link(const gl::Context * context,std::shared_ptr<LinkTask> * linkTaskOut)370 angle::Result ProgramVk::link(const gl::Context *context, std::shared_ptr<LinkTask> *linkTaskOut)
371 {
372 ContextVk *contextVk = vk::GetImpl(context);
373
374 *linkTaskOut = std::shared_ptr<LinkTask>(new LinkTaskVk(
375 contextVk->getRenderer(), contextVk->getPipelineLayoutCache(),
376 contextVk->getDescriptorSetLayoutCache(), mState, context->getState().isGLES1(),
377 contextVk->pipelineRobustness(), contextVk->pipelineProtectedAccess()));
378
379 return angle::Result::Continue;
380 }
381
validate(const gl::Caps & caps)382 GLboolean ProgramVk::validate(const gl::Caps &caps)
383 {
384 // No-op. The spec is very vague about the behavior of validation.
385 return GL_TRUE;
386 }
387
388 } // namespace rx
389