1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
9
10 #include "src/core/SkMipmap.h"
11 #include "src/gpu/ganesh/GrFragmentProcessor.h"
12 #include "src/gpu/ganesh/GrGeometryProcessor.h"
13 #include "src/gpu/ganesh/GrPipeline.h"
14 #include "src/gpu/ganesh/GrRenderTarget.h"
15 #include "src/gpu/ganesh/GrTexture.h"
16 #include "src/gpu/ganesh/GrXferProcessor.h"
17 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
18 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
19 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
20 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
21 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
22 #include "src/gpu/ganesh/vk/GrVkGpu.h"
23 #include "src/gpu/ganesh/vk/GrVkImageView.h"
24 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
25 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
26 #include "src/gpu/ganesh/vk/GrVkSampler.h"
27 #include "src/gpu/ganesh/vk/GrVkTexture.h"
28
29 using namespace skia_private;
30
GrVkPipelineState(GrVkGpu * gpu,sk_sp<const GrVkPipeline> pipeline,const GrVkDescriptorSetManager::Handle & samplerDSHandle,const GrGLSLBuiltinUniformHandles & builtinUniformHandles,const UniformInfoArray & uniforms,uint32_t uniformSize,bool usePushConstants,const UniformInfoArray & samplers,std::unique_ptr<GrGeometryProcessor::ProgramImpl> gpImpl,std::unique_ptr<GrXferProcessor::ProgramImpl> xpImpl,std::vector<std::unique_ptr<GrFragmentProcessor::ProgramImpl>> fpImpls)31 GrVkPipelineState::GrVkPipelineState(
32 GrVkGpu* gpu,
33 sk_sp<const GrVkPipeline> pipeline,
34 const GrVkDescriptorSetManager::Handle& samplerDSHandle,
35 const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
36 const UniformInfoArray& uniforms,
37 uint32_t uniformSize,
38 bool usePushConstants,
39 const UniformInfoArray& samplers,
40 std::unique_ptr<GrGeometryProcessor::ProgramImpl> gpImpl,
41 std::unique_ptr<GrXferProcessor::ProgramImpl> xpImpl,
42 std::vector<std::unique_ptr<GrFragmentProcessor::ProgramImpl>> fpImpls)
43 : fPipeline(std::move(pipeline))
44 , fSamplerDSHandle(samplerDSHandle)
45 , fBuiltinUniformHandles(builtinUniformHandles)
46 , fGPImpl(std::move(gpImpl))
47 , fXPImpl(std::move(xpImpl))
48 , fFPImpls(std::move(fpImpls))
49 , fDataManager(uniforms, uniformSize, usePushConstants) {
50 fNumSamplers = samplers.count();
51 for (const auto& sampler : samplers.items()) {
52 // We store the immutable samplers here and take a ref on the sampler. Once we switch to
53 // using sk_sps here we should just move the immutable samplers to save the extra ref/unref.
54 if (sampler.fImmutableSampler) {
55 sampler.fImmutableSampler->ref();
56 }
57 fImmutableSamplers.push_back(sampler.fImmutableSampler);
58 }
59 }
60
~GrVkPipelineState()61 GrVkPipelineState::~GrVkPipelineState() {
62 // Must have freed all GPU resources before this is destroyed
63 SkASSERT(!fPipeline);
64 }
65
freeGPUResources(GrVkGpu * gpu)66 void GrVkPipelineState::freeGPUResources(GrVkGpu* gpu) {
67 fPipeline.reset();
68 fDataManager.releaseData();
69 for (int i = 0; i < fImmutableSamplers.size(); ++i) {
70 if (fImmutableSamplers[i]) {
71 fImmutableSamplers[i]->unref();
72 fImmutableSamplers[i] = nullptr;
73 }
74 }
75 }
76
setAndBindUniforms(GrVkGpu * gpu,SkISize colorAttachmentDimensions,const GrProgramInfo & programInfo,GrVkCommandBuffer * commandBuffer)77 bool GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
78 SkISize colorAttachmentDimensions,
79 const GrProgramInfo& programInfo,
80 GrVkCommandBuffer* commandBuffer) {
81 this->setRenderTargetState(colorAttachmentDimensions, programInfo.origin());
82
83 fGPImpl->setData(fDataManager, *gpu->caps()->shaderCaps(), programInfo.geomProc());
84
85 for (int i = 0; i < programInfo.pipeline().numFragmentProcessors(); ++i) {
86 const auto& fp = programInfo.pipeline().getFragmentProcessor(i);
87 fp.visitWithImpls([&](const GrFragmentProcessor& fp,
88 GrFragmentProcessor::ProgramImpl& impl) {
89 impl.setData(fDataManager, fp);
90 }, *fFPImpls[i]);
91 }
92
93 programInfo.pipeline().setDstTextureUniforms(fDataManager, &fBuiltinUniformHandles);
94 fXPImpl->setData(fDataManager, programInfo.pipeline().getXferProcessor());
95
96 // Upload uniform data and bind descriptor set.
97 auto [uniformBuffer, success] = fDataManager.uploadUniforms(gpu, fPipeline->layout(),
98 commandBuffer);
99 if (!success) {
100 return false;
101 }
102 if (uniformBuffer) {
103 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(uniformBuffer.get());
104 static const int kUniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
105 commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), kUniformDSIdx, /*setCount=*/1,
106 vkBuffer->uniformDescriptorSet(),
107 /*dynamicOffsetCount=*/0, /*dynamicOffsets=*/nullptr);
108 commandBuffer->addGrBuffer(std::move(uniformBuffer));
109 }
110 return true;
111 }
112
setAndBindTextures(GrVkGpu * gpu,const GrGeometryProcessor & geomProc,const GrPipeline & pipeline,const GrSurfaceProxy * const geomProcTextures[],GrVkCommandBuffer * commandBuffer)113 bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
114 const GrGeometryProcessor& geomProc,
115 const GrPipeline& pipeline,
116 const GrSurfaceProxy* const geomProcTextures[],
117 GrVkCommandBuffer* commandBuffer) {
118 SkASSERT(geomProcTextures || !geomProc.numTextureSamplers());
119 if (!fNumSamplers) {
120 return true;
121 }
122 struct SamplerBindings {
123 GrSamplerState fState;
124 GrVkTexture* fTexture;
125 };
126 AutoSTArray<8, SamplerBindings> samplerBindings(fNumSamplers);
127 int currTextureBinding = 0;
128
129 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
130 SkASSERT(geomProcTextures[i]->asTextureProxy());
131 const auto& sampler = geomProc.textureSampler(i);
132 auto texture = static_cast<GrVkTexture*>(geomProcTextures[i]->peekTexture());
133 samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture};
134 }
135
136
137 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
138 samplerBindings[currTextureBinding++] = {GrSamplerState::Filter::kNearest,
139 static_cast<GrVkTexture*>(dstTexture)};
140 }
141
142 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
143 GrSamplerState samplerState = te.samplerState();
144 auto* texture = static_cast<GrVkTexture*>(te.texture());
145 samplerBindings[currTextureBinding++] = {samplerState, texture};
146 });
147
148 // Get new descriptor set
149 SkASSERT(fNumSamplers == currTextureBinding);
150 static const int kSamplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
151
152 if (fNumSamplers == 1) {
153 auto texture = samplerBindings[0].fTexture;
154 auto texAttachment = texture->textureImage();
155 const auto& samplerState = samplerBindings[0].fState;
156 const GrVkDescriptorSet* descriptorSet = texture->cachedSingleDescSet(samplerState);
157 if (descriptorSet) {
158 commandBuffer->addGrSurface(sk_ref_sp<const GrSurface>(texture));
159 commandBuffer->addResource(texAttachment->textureView());
160 commandBuffer->addResource(texAttachment->resource());
161 commandBuffer->addRecycledResource(descriptorSet);
162 commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), kSamplerDSIdx,
163 /*setCount=*/1, descriptorSet->descriptorSet(),
164 /*dynamicOffsetCount=*/0,
165 /*dynamicOffsets=*/nullptr);
166 return true;
167 }
168 }
169
170 const GrVkDescriptorSet* descriptorSet =
171 gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
172 if (!descriptorSet) {
173 return false;
174 }
175
176 for (int i = 0; i < fNumSamplers; ++i) {
177 GrSamplerState state = samplerBindings[i].fState;
178 GrVkTexture* texture = samplerBindings[i].fTexture;
179 auto texAttachment = texture->textureImage();
180
181 const GrVkImageView* textureView = texAttachment->textureView();
182 const GrVkSampler* sampler = nullptr;
183 if (fImmutableSamplers[i]) {
184 sampler = fImmutableSamplers[i];
185 } else {
186 sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
187 state, texAttachment->ycbcrConversionInfo());
188 if (!sampler) {
189 descriptorSet->recycle();
190 return false;
191 }
192 }
193 SkASSERT(sampler);
194
195 VkDescriptorImageInfo imageInfo;
196 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
197 imageInfo.sampler = fImmutableSamplers[i] ? VK_NULL_HANDLE : sampler->sampler();
198 imageInfo.imageView = textureView->imageView();
199 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
200
201 VkWriteDescriptorSet writeInfo;
202 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
203 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
204 writeInfo.pNext = nullptr;
205 writeInfo.dstSet = *descriptorSet->descriptorSet();
206 writeInfo.dstBinding = i;
207 writeInfo.dstArrayElement = 0;
208 writeInfo.descriptorCount = 1;
209 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
210 writeInfo.pImageInfo = &imageInfo;
211 writeInfo.pBufferInfo = nullptr;
212 writeInfo.pTexelBufferView = nullptr;
213
214 GR_VK_CALL(gpu->vkInterface(),
215 UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
216 commandBuffer->addResource(sampler);
217 if (!fImmutableSamplers[i]) {
218 sampler->unref();
219 }
220 commandBuffer->addResource(textureView);
221 commandBuffer->addResource(texAttachment->resource());
222 }
223 if (fNumSamplers == 1) {
224 GrSamplerState state = samplerBindings[0].fState;
225 GrVkTexture* texture = samplerBindings[0].fTexture;
226 texture->addDescriptorSetToCache(descriptorSet, state);
227 }
228
229 commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), kSamplerDSIdx, /*setCount=*/1,
230 descriptorSet->descriptorSet(),
231 /*dynamicOffsetCount=*/0, /*dynamicOffsets=*/nullptr);
232 commandBuffer->addRecycledResource(descriptorSet);
233 descriptorSet->recycle();
234 return true;
235 }
236
setAndBindInputAttachment(GrVkGpu * gpu,gr_rp<const GrVkDescriptorSet> inputDescSet,GrVkCommandBuffer * commandBuffer)237 bool GrVkPipelineState::setAndBindInputAttachment(GrVkGpu* gpu,
238 gr_rp<const GrVkDescriptorSet> inputDescSet,
239 GrVkCommandBuffer* commandBuffer) {
240 SkASSERT(inputDescSet);
241 commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), GrVkUniformHandler::kInputDescSet,
242 /*setCount=*/1, inputDescSet->descriptorSet(),
243 /*dynamicOffsetCount=*/0, /*dynamicOffsets=*/nullptr);
244 // We don't add the input resource to the command buffer to track since the input will be
245 // the same as the color attachment which is already tracked on the command buffer.
246 commandBuffer->addRecycledResource(std::move(inputDescSet));
247 return true;
248 }
249
setRenderTargetState(SkISize colorAttachmentDimensions,GrSurfaceOrigin origin)250 void GrVkPipelineState::setRenderTargetState(SkISize colorAttachmentDimensions,
251 GrSurfaceOrigin origin) {
252 // Set RT adjustment and RT flip
253 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
254 if (fRenderTargetState.fRenderTargetOrigin != origin ||
255 fRenderTargetState.fRenderTargetSize != colorAttachmentDimensions) {
256 fRenderTargetState.fRenderTargetSize = colorAttachmentDimensions;
257 fRenderTargetState.fRenderTargetOrigin = origin;
258
259 // The client will mark a swap buffer as kTopLeft when making a SkSurface because
260 // Vulkan's framebuffer space has (0, 0) at the top left. This agrees with Skia's device
261 // coords and with Vulkan's NDC that has (-1, -1) in the top left. So a flip is needed when
262 // surface origin is kBottomLeft rather than kTopLeft.
263 bool flip = (origin == kBottomLeft_GrSurfaceOrigin);
264 std::array<float, 4> v = SkSL::Compiler::GetRTAdjustVector(colorAttachmentDimensions, flip);
265 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, v.data());
266 if (fBuiltinUniformHandles.fRTFlipUni.isValid()) {
267 std::array<float, 2> d =
268 SkSL::Compiler::GetRTFlipVector(colorAttachmentDimensions.height(), flip);
269 fDataManager.set2fv(fBuiltinUniformHandles.fRTFlipUni, 1, d.data());
270 }
271 }
272 }
273
bindPipeline(const GrVkGpu * gpu,GrVkCommandBuffer * commandBuffer)274 void GrVkPipelineState::bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
275 commandBuffer->bindPipeline(gpu, fPipeline);
276 }
277