1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/GrContext.h"
9 #include "src/core/SkMipMap.h"
10 #include "src/gpu/GrContextPriv.h"
11 #include "src/gpu/GrPipeline.h"
12 #include "src/gpu/GrRenderTarget.h"
13 #include "src/gpu/GrTexturePriv.h"
14 #include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
15 #include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
16 #include "src/gpu/glsl/GrGLSLXferProcessor.h"
17 #include "src/gpu/vk/GrVkBufferView.h"
18 #include "src/gpu/vk/GrVkCommandBuffer.h"
19 #include "src/gpu/vk/GrVkDescriptorPool.h"
20 #include "src/gpu/vk/GrVkDescriptorSet.h"
21 #include "src/gpu/vk/GrVkGpu.h"
22 #include "src/gpu/vk/GrVkImageView.h"
23 #include "src/gpu/vk/GrVkMemory.h"
24 #include "src/gpu/vk/GrVkPipeline.h"
25 #include "src/gpu/vk/GrVkPipelineState.h"
26 #include "src/gpu/vk/GrVkSampler.h"
27 #include "src/gpu/vk/GrVkTexture.h"
28 #include "src/gpu/vk/GrVkUniformBuffer.h"
29
GrVkPipelineState(GrVkGpu * gpu,GrVkPipeline * pipeline,const GrVkDescriptorSetManager::Handle & samplerDSHandle,const GrGLSLBuiltinUniformHandles & builtinUniformHandles,const UniformInfoArray & uniforms,uint32_t uniformSize,const UniformInfoArray & samplers,std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,std::unique_ptr<GrGLSLXferProcessor> xferProcessor,std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,int fragmentProcessorCnt)30 GrVkPipelineState::GrVkPipelineState(
31 GrVkGpu* gpu,
32 GrVkPipeline* pipeline,
33 const GrVkDescriptorSetManager::Handle& samplerDSHandle,
34 const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
35 const UniformInfoArray& uniforms,
36 uint32_t uniformSize,
37 const UniformInfoArray& samplers,
38 std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
39 std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
40 std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
41 int fragmentProcessorCnt)
42 : fPipeline(pipeline)
43 , fUniformDescriptorSet(nullptr)
44 , fSamplerDescriptorSet(nullptr)
45 , fSamplerDSHandle(samplerDSHandle)
46 , fBuiltinUniformHandles(builtinUniformHandles)
47 , fGeometryProcessor(std::move(geometryProcessor))
48 , fXferProcessor(std::move(xferProcessor))
49 , fFragmentProcessors(std::move(fragmentProcessors))
50 , fFragmentProcessorCnt(fragmentProcessorCnt)
51 , fDataManager(uniforms, uniformSize) {
52 fDescriptorSets[0] = VK_NULL_HANDLE;
53 fDescriptorSets[1] = VK_NULL_HANDLE;
54 fDescriptorSets[2] = VK_NULL_HANDLE;
55
56 fUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, uniformSize));
57
58 fNumSamplers = samplers.count();
59
60 for (int i = 0; i < fNumSamplers; ++i) {
61 // We store the immutable samplers here and take ownership of the ref from the
62 // GrVkUnformHandler.
63 fImmutableSamplers.push_back(samplers[i].fImmutableSampler);
64 }
65 }
66
~GrVkPipelineState()67 GrVkPipelineState::~GrVkPipelineState() {
68 // Must have freed all GPU resources before this is destroyed
69 SkASSERT(!fPipeline);
70 }
71
freeGPUResources(GrVkGpu * gpu)72 void GrVkPipelineState::freeGPUResources(GrVkGpu* gpu) {
73 if (fPipeline) {
74 fPipeline->unref(gpu);
75 fPipeline = nullptr;
76 }
77
78 if (fUniformBuffer) {
79 fUniformBuffer->release(gpu);
80 fUniformBuffer.reset();
81 }
82
83 if (fUniformDescriptorSet) {
84 fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
85 fUniformDescriptorSet = nullptr;
86 }
87
88 if (fSamplerDescriptorSet) {
89 fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
90 fSamplerDescriptorSet = nullptr;
91 }
92 }
93
abandonGPUResources()94 void GrVkPipelineState::abandonGPUResources() {
95 if (fPipeline) {
96 fPipeline->unrefAndAbandon();
97 fPipeline = nullptr;
98 }
99
100 if (fUniformBuffer) {
101 fUniformBuffer->abandon();
102 fUniformBuffer.reset();
103 }
104
105 if (fUniformDescriptorSet) {
106 fUniformDescriptorSet->unrefAndAbandon();
107 fUniformDescriptorSet = nullptr;
108 }
109
110 if (fSamplerDescriptorSet) {
111 fSamplerDescriptorSet->unrefAndAbandon();
112 fSamplerDescriptorSet = nullptr;
113 }
114 }
115
setAndBindUniforms(GrVkGpu * gpu,const GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,GrVkCommandBuffer * commandBuffer)116 void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
117 const GrRenderTarget* renderTarget,
118 GrSurfaceOrigin origin,
119 const GrPrimitiveProcessor& primProc,
120 const GrPipeline& pipeline,
121 GrVkCommandBuffer* commandBuffer) {
122 this->setRenderTargetState(renderTarget, origin);
123
124 fGeometryProcessor->setData(fDataManager, primProc,
125 GrFragmentProcessor::CoordTransformIter(pipeline));
126 GrFragmentProcessor::Iter iter(pipeline);
127 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
128 const GrFragmentProcessor* fp = iter.next();
129 GrGLSLFragmentProcessor* glslFP = glslIter.next();
130 while (fp && glslFP) {
131 glslFP->setData(fDataManager, *fp);
132 fp = iter.next();
133 glslFP = glslIter.next();
134 }
135 SkASSERT(!fp && !glslFP);
136
137 {
138 SkIPoint offset;
139 GrTexture* dstTexture = pipeline.peekDstTexture(&offset);
140
141 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor(), dstTexture, offset);
142 }
143
144 // Get new descriptor set
145 if (fUniformBuffer) {
146 int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
147 if (fDataManager.uploadUniformBuffers(gpu, fUniformBuffer.get()) ||
148 !fUniformDescriptorSet) {
149 if (fUniformDescriptorSet) {
150 fUniformDescriptorSet->recycle(gpu);
151 }
152 fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
153 fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
154 this->writeUniformBuffers(gpu);
155 }
156 commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), uniformDSIdx, 1,
157 &fDescriptorSets[uniformDSIdx], 0, nullptr);
158 if (fUniformDescriptorSet) {
159 commandBuffer->addRecycledResource(fUniformDescriptorSet);
160 }
161 if (fUniformBuffer) {
162 commandBuffer->addRecycledResource(fUniformBuffer->resource());
163 }
164 }
165 }
166
setAndBindTextures(GrVkGpu * gpu,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrTextureProxy * const primProcTextures[],GrVkCommandBuffer * commandBuffer)167 void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
168 const GrPrimitiveProcessor& primProc,
169 const GrPipeline& pipeline,
170 const GrTextureProxy* const primProcTextures[],
171 GrVkCommandBuffer* commandBuffer) {
172 SkASSERT(primProcTextures || !primProc.numTextureSamplers());
173
174 struct SamplerBindings {
175 GrSamplerState fState;
176 GrVkTexture* fTexture;
177 };
178 SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers);
179 int currTextureBinding = 0;
180
181 fGeometryProcessor->setData(fDataManager, primProc,
182 GrFragmentProcessor::CoordTransformIter(pipeline));
183 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
184 const auto& sampler = primProc.textureSampler(i);
185 auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture());
186 samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture};
187 }
188
189 GrFragmentProcessor::Iter iter(pipeline);
190 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
191 const GrFragmentProcessor* fp = iter.next();
192 GrGLSLFragmentProcessor* glslFP = glslIter.next();
193 while (fp && glslFP) {
194 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
195 const auto& sampler = fp->textureSampler(i);
196 samplerBindings[currTextureBinding++] =
197 {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())};
198 }
199 fp = iter.next();
200 glslFP = glslIter.next();
201 }
202 SkASSERT(!fp && !glslFP);
203
204 if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) {
205 samplerBindings[currTextureBinding++] = {
206 GrSamplerState::ClampNearest(),
207 static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())};
208 }
209
210 // Get new descriptor set
211 SkASSERT(fNumSamplers == currTextureBinding);
212 if (fNumSamplers) {
213 if (fSamplerDescriptorSet) {
214 fSamplerDescriptorSet->recycle(gpu);
215 }
216 fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
217 int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
218 fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
219 for (int i = 0; i < fNumSamplers; ++i) {
220 const GrSamplerState& state = samplerBindings[i].fState;
221 GrVkTexture* texture = samplerBindings[i].fTexture;
222
223 const GrVkImageView* textureView = texture->textureView();
224 const GrVkSampler* sampler = nullptr;
225 if (fImmutableSamplers[i]) {
226 sampler = fImmutableSamplers[i];
227 } else {
228 sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
229 state, texture->ycbcrConversionInfo());
230 }
231 SkASSERT(sampler);
232
233 VkDescriptorImageInfo imageInfo;
234 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
235 imageInfo.sampler = sampler->sampler();
236 imageInfo.imageView = textureView->imageView();
237 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
238
239 VkWriteDescriptorSet writeInfo;
240 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
241 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
242 writeInfo.pNext = nullptr;
243 writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
244 writeInfo.dstBinding = i;
245 writeInfo.dstArrayElement = 0;
246 writeInfo.descriptorCount = 1;
247 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
248 writeInfo.pImageInfo = &imageInfo;
249 writeInfo.pBufferInfo = nullptr;
250 writeInfo.pTexelBufferView = nullptr;
251
252 GR_VK_CALL(gpu->vkInterface(),
253 UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
254 commandBuffer->addResource(sampler);
255 if (!fImmutableSamplers[i]) {
256 sampler->unref(gpu);
257 }
258 commandBuffer->addResource(samplerBindings[i].fTexture->textureView());
259 commandBuffer->addResource(samplerBindings[i].fTexture->resource());
260 }
261
262 commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), samplerDSIdx, 1,
263 &fDescriptorSets[samplerDSIdx], 0, nullptr);
264 commandBuffer->addRecycledResource(fSamplerDescriptorSet);
265 }
266 }
267
set_uniform_descriptor_writes(VkWriteDescriptorSet * descriptorWrite,VkDescriptorBufferInfo * bufferInfo,const GrVkUniformBuffer * buffer,VkDescriptorSet descriptorSet)268 void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
269 VkDescriptorBufferInfo* bufferInfo,
270 const GrVkUniformBuffer* buffer,
271 VkDescriptorSet descriptorSet) {
272
273 memset(bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
274 bufferInfo->buffer = buffer->buffer();
275 bufferInfo->offset = buffer->offset();
276 bufferInfo->range = buffer->size();
277
278 memset(descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
279 descriptorWrite->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
280 descriptorWrite->pNext = nullptr;
281 descriptorWrite->dstSet = descriptorSet;
282 descriptorWrite->dstBinding = GrVkUniformHandler::kUniformBinding;
283 descriptorWrite->dstArrayElement = 0;
284 descriptorWrite->descriptorCount = 1;
285 descriptorWrite->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
286 descriptorWrite->pImageInfo = nullptr;
287 descriptorWrite->pBufferInfo = bufferInfo;
288 descriptorWrite->pTexelBufferView = nullptr;
289 }
290
writeUniformBuffers(const GrVkGpu * gpu)291 void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
292 VkWriteDescriptorSet descriptorWrites[3];
293 VkDescriptorBufferInfo bufferInfos[3];
294
295 uint32_t writeCount = 0;
296
297 if (fUniformBuffer.get()) {
298 set_uniform_descriptor_writes(&descriptorWrites[writeCount],
299 &bufferInfos[writeCount],
300 fUniformBuffer.get(),
301 fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]);
302 ++writeCount;
303 }
304
305 if (writeCount) {
306 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
307 writeCount,
308 descriptorWrites,
309 0, nullptr));
310 }
311 }
312
setRenderTargetState(const GrRenderTarget * rt,GrSurfaceOrigin origin)313 void GrVkPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
314
315 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
316 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
317 fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
318 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
319 }
320
321 // set RT adjustment
322 SkISize size;
323 size.set(rt->width(), rt->height());
324 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
325 if (fRenderTargetState.fRenderTargetOrigin != origin ||
326 fRenderTargetState.fRenderTargetSize != size) {
327 fRenderTargetState.fRenderTargetSize = size;
328 fRenderTargetState.fRenderTargetOrigin = origin;
329
330 float rtAdjustmentVec[4];
331 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
332 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
333 }
334 }
335
bindPipeline(const GrVkGpu * gpu,GrVkCommandBuffer * commandBuffer)336 void GrVkPipelineState::bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
337 commandBuffer->bindPipeline(gpu, fPipeline);
338 }
339