1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkGpuCommandBuffer.h"
9
10 #include "GrFixedClip.h"
11 #include "GrMesh.h"
12 #include "GrOpFlushState.h"
13 #include "GrPipeline.h"
14 #include "GrRenderTargetPriv.h"
15 #include "GrTexturePriv.h"
16 #include "GrVkCommandBuffer.h"
17 #include "GrVkGpu.h"
18 #include "GrVkPipeline.h"
19 #include "GrVkRenderPass.h"
20 #include "GrVkRenderTarget.h"
21 #include "GrVkResourceProvider.h"
22 #include "GrVkTexture.h"
23 #include "SkRect.h"
24
get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo & info,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)25 void get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo& info,
26 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
27 switch (info.fLoadOp) {
28 case GrGpuCommandBuffer::LoadOp::kLoad:
29 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
30 break;
31 case GrGpuCommandBuffer::LoadOp::kClear:
32 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
33 break;
34 case GrGpuCommandBuffer::LoadOp::kDiscard:
35 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
36 break;
37 default:
38 SK_ABORT("Invalid LoadOp");
39 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
40 }
41
42 switch (info.fStoreOp) {
43 case GrGpuCommandBuffer::StoreOp::kStore:
44 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
45 break;
46 case GrGpuCommandBuffer::StoreOp::kDiscard:
47 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
48 break;
49 default:
50 SK_ABORT("Invalid StoreOp");
51 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
52 }
53 }
54
GrVkGpuCommandBuffer(GrVkGpu * gpu,const LoadAndStoreInfo & colorInfo,const LoadAndStoreInfo & stencilInfo)55 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu,
56 const LoadAndStoreInfo& colorInfo,
57 const LoadAndStoreInfo& stencilInfo)
58 : fGpu(gpu)
59 , fRenderTarget(nullptr)
60 , fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor))
61 , fLastPipelineState(nullptr) {
62
63 get_vk_load_store_ops(colorInfo, &fVkColorLoadOp, &fVkColorStoreOp);
64
65 get_vk_load_store_ops(stencilInfo, &fVkStencilLoadOp, &fVkStencilStoreOp);
66
67 fCurrentCmdInfo = -1;
68 }
69
init(GrVkRenderTarget * target)70 void GrVkGpuCommandBuffer::init(GrVkRenderTarget* target) {
71 SkASSERT(!fRenderTarget);
72 fRenderTarget = target;
73
74 GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
75 GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
76
77 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
78 SkASSERT(fCommandBufferInfos.count() == 1);
79 fCurrentCmdInfo = 0;
80
81 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibleRenderPassHandle();
82 if (rpHandle.isValid()) {
83 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
84 vkColorOps,
85 vkStencilOps);
86 } else {
87 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*target,
88 vkColorOps,
89 vkStencilOps);
90 }
91
92 cbInfo.fColorClearValue.color.float32[0] = fClearColor.fRGBA[0];
93 cbInfo.fColorClearValue.color.float32[1] = fClearColor.fRGBA[1];
94 cbInfo.fColorClearValue.color.float32[2] = fClearColor.fRGBA[2];
95 cbInfo.fColorClearValue.color.float32[3] = fClearColor.fRGBA[3];
96
97 cbInfo.fBounds.setEmpty();
98 cbInfo.fIsEmpty = true;
99 cbInfo.fStartsWithClear = false;
100
101 cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
102 cbInfo.currentCmdBuf()->begin(fGpu, target->framebuffer(), cbInfo.fRenderPass);
103 }
104
105
~GrVkGpuCommandBuffer()106 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
107 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
108 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
109 for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
110 cbInfo.fCommandBuffers[j]->unref(fGpu);
111 }
112 cbInfo.fRenderPass->unref(fGpu);
113 }
114 }
115
gpu()116 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; }
renderTarget()117 GrRenderTarget* GrVkGpuCommandBuffer::renderTarget() { return fRenderTarget; }
118
end()119 void GrVkGpuCommandBuffer::end() {
120 if (fCurrentCmdInfo >= 0) {
121 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
122 }
123 }
124
onSubmit()125 void GrVkGpuCommandBuffer::onSubmit() {
126 if (!fRenderTarget) {
127 return;
128 }
129 // Change layout of our render target so it can be used as the color attachment. Currently
130 // we don't attach the resolve to the framebuffer so no need to change its layout.
131 GrVkImage* targetImage = fRenderTarget->msaaImage() ? fRenderTarget->msaaImage()
132 : fRenderTarget;
133
134 // Change layout of our render target so it can be used as the color attachment
135 targetImage->setImageLayout(fGpu,
136 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
137 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
138 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
139 false);
140
141 // If we are using a stencil attachment we also need to update its layout
142 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
143 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
144 vkStencil->setImageLayout(fGpu,
145 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
146 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
147 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
148 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
149 false);
150 }
151
152 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
153 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
154
155 for (int j = 0; j < cbInfo.fPreDrawUploads.count(); ++j) {
156 InlineUploadInfo& iuInfo = cbInfo.fPreDrawUploads[j];
157 iuInfo.fFlushState->doUpload(iuInfo.fUpload);
158 }
159
160 // TODO: We can't add this optimization yet since many things create a scratch texture which
161 // adds the discard immediately, but then don't draw to it right away. This causes the
162 // discard to be ignored and we get yelled at for loading uninitialized data. However, once
163 // MDP lands, the discard will get reordered with the rest of the draw commands and we can
164 // re-enable this.
165 #if 0
166 if (cbInfo.fIsEmpty && !cbInfo.fStartsWithClear) {
167 // We have sumbitted no actual draw commands to the command buffer and we are not using
168 // the render pass to do a clear so there is no need to submit anything.
169 continue;
170 }
171 #endif
172 if (cbInfo.fBounds.intersect(0, 0,
173 SkIntToScalar(fRenderTarget->width()),
174 SkIntToScalar(fRenderTarget->height()))) {
175 SkIRect iBounds;
176 cbInfo.fBounds.roundOut(&iBounds);
177
178 fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
179 &cbInfo.fColorClearValue, fRenderTarget, iBounds);
180 }
181 }
182 }
183
discard(GrRenderTarget * rt)184 void GrVkGpuCommandBuffer::discard(GrRenderTarget* rt) {
185 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
186 if (!fRenderTarget) {
187 this->init(target);
188 }
189 SkASSERT(target == fRenderTarget);
190
191 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
192 if (cbInfo.fIsEmpty) {
193 // We will change the render pass to do a clear load instead
194 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
195 VK_ATTACHMENT_STORE_OP_STORE);
196 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
197 VK_ATTACHMENT_STORE_OP_STORE);
198
199 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
200
201 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
202 fRenderTarget->compatibleRenderPassHandle();
203 if (rpHandle.isValid()) {
204 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
205 vkColorOps,
206 vkStencilOps);
207 } else {
208 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
209 vkColorOps,
210 vkStencilOps);
211 }
212
213 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
214 oldRP->unref(fGpu);
215 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
216 cbInfo.fStartsWithClear = false;
217 }
218 }
219
onClearStencilClip(GrRenderTarget * rt,const GrFixedClip & clip,bool insideStencilMask)220 void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* rt, const GrFixedClip& clip,
221 bool insideStencilMask) {
222 SkASSERT(!clip.hasWindowRectangles());
223
224 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
225 if (!fRenderTarget) {
226 this->init(target);
227 }
228 SkASSERT(target == fRenderTarget);
229
230 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
231
232 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
233 // this should only be called internally when we know we have a
234 // stencil buffer.
235 SkASSERT(sb);
236 int stencilBitCount = sb->bits();
237
238 // The contract with the callers does not guarantee that we preserve all bits in the stencil
239 // during this clear. Thus we will clear the entire stencil to the desired value.
240
241 VkClearDepthStencilValue vkStencilColor;
242 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
243 if (insideStencilMask) {
244 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
245 } else {
246 vkStencilColor.stencil = 0;
247 }
248
249 VkClearRect clearRect;
250 // Flip rect if necessary
251 SkIRect vkRect;
252 if (!clip.scissorEnabled()) {
253 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
254 } else if (kBottomLeft_GrSurfaceOrigin != fRenderTarget->origin()) {
255 vkRect = clip.scissorRect();
256 } else {
257 const SkIRect& scissor = clip.scissorRect();
258 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
259 scissor.fRight, fRenderTarget->height() - scissor.fTop);
260 }
261
262 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
263 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
264
265 clearRect.baseArrayLayer = 0;
266 clearRect.layerCount = 1;
267
268 uint32_t stencilIndex;
269 SkAssertResult(cbInfo.fRenderPass->stencilAttachmentIndex(&stencilIndex));
270
271 VkClearAttachment attachment;
272 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
273 attachment.colorAttachment = 0; // this value shouldn't matter
274 attachment.clearValue.depthStencil = vkStencilColor;
275
276 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
277 cbInfo.fIsEmpty = false;
278
279 // Update command buffer bounds
280 if (!clip.scissorEnabled()) {
281 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
282 } else {
283 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
284 }
285 }
286
onClear(GrRenderTarget * rt,const GrFixedClip & clip,GrColor color)287 void GrVkGpuCommandBuffer::onClear(GrRenderTarget* rt, const GrFixedClip& clip, GrColor color) {
288 // parent class should never let us get here with no RT
289 SkASSERT(!clip.hasWindowRectangles());
290
291 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
292 if (!fRenderTarget) {
293 this->init(target);
294 }
295 SkASSERT(target == fRenderTarget);
296
297 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
298
299 VkClearColorValue vkColor;
300 GrColorToRGBAFloat(color, vkColor.float32);
301
302 if (cbInfo.fIsEmpty && !clip.scissorEnabled()) {
303 // We will change the render pass to do a clear load instead
304 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
305 VK_ATTACHMENT_STORE_OP_STORE);
306 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
307 VK_ATTACHMENT_STORE_OP_STORE);
308
309 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
310
311 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
312 fRenderTarget->compatibleRenderPassHandle();
313 if (rpHandle.isValid()) {
314 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
315 vkColorOps,
316 vkStencilOps);
317 } else {
318 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
319 vkColorOps,
320 vkStencilOps);
321 }
322
323 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
324 oldRP->unref(fGpu);
325
326 GrColorToRGBAFloat(color, cbInfo.fColorClearValue.color.float32);
327 cbInfo.fStartsWithClear = true;
328
329 // Update command buffer bounds
330 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
331 return;
332 }
333
334 // We always do a sub rect clear with clearAttachments since we are inside a render pass
335 VkClearRect clearRect;
336 // Flip rect if necessary
337 SkIRect vkRect;
338 if (!clip.scissorEnabled()) {
339 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
340 } else if (kBottomLeft_GrSurfaceOrigin != fRenderTarget->origin()) {
341 vkRect = clip.scissorRect();
342 } else {
343 const SkIRect& scissor = clip.scissorRect();
344 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
345 scissor.fRight, fRenderTarget->height() - scissor.fTop);
346 }
347 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
348 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
349 clearRect.baseArrayLayer = 0;
350 clearRect.layerCount = 1;
351
352 uint32_t colorIndex;
353 SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&colorIndex));
354
355 VkClearAttachment attachment;
356 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
357 attachment.colorAttachment = colorIndex;
358 attachment.clearValue.color = vkColor;
359
360 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
361 cbInfo.fIsEmpty = false;
362
363 // Update command buffer bounds
364 if (!clip.scissorEnabled()) {
365 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
366 } else {
367 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
368 }
369 return;
370 }
371
addAdditionalCommandBuffer()372 void GrVkGpuCommandBuffer::addAdditionalCommandBuffer() {
373 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
374 cbInfo.currentCmdBuf()->end(fGpu);
375 cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
376 cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
377 }
378
addAdditionalRenderPass()379 void GrVkGpuCommandBuffer::addAdditionalRenderPass() {
380 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
381
382 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
383 fCurrentCmdInfo++;
384
385 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
386 VK_ATTACHMENT_STORE_OP_STORE);
387 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
388 VK_ATTACHMENT_STORE_OP_STORE);
389
390 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
391 fRenderTarget->compatibleRenderPassHandle();
392 if (rpHandle.isValid()) {
393 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
394 vkColorOps,
395 vkStencilOps);
396 } else {
397 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
398 vkColorOps,
399 vkStencilOps);
400 }
401
402 cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
403 // It shouldn't matter what we set the clear color to here since we will assume loading of the
404 // attachment.
405 memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
406 cbInfo.fBounds.setEmpty();
407 cbInfo.fIsEmpty = true;
408 cbInfo.fStartsWithClear = false;
409
410 cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
411 }
412
inlineUpload(GrOpFlushState * state,GrDrawOp::DeferredUploadFn & upload,GrRenderTarget * rt)413 void GrVkGpuCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload,
414 GrRenderTarget* rt) {
415 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
416 if (!fRenderTarget) {
417 this->init(target);
418 }
419 if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
420 this->addAdditionalRenderPass();
421 }
422 fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
423 }
424
425 ////////////////////////////////////////////////////////////////////////////////
426
bindGeometry(const GrPrimitiveProcessor & primProc,const GrNonInstancedMesh & mesh)427 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
428 const GrNonInstancedMesh& mesh) {
429 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
430 // There is no need to put any memory barriers to make sure host writes have finished here.
431 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
432 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
433 // an active RenderPass.
434 SkASSERT(!mesh.vertexBuffer()->isCPUBacked());
435 GrVkVertexBuffer* vbuf;
436 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
437 SkASSERT(vbuf);
438 SkASSERT(!vbuf->isMapped());
439
440 cbInfo.currentCmdBuf()->bindVertexBuffer(fGpu, vbuf);
441
442 if (mesh.isIndexed()) {
443 SkASSERT(!mesh.indexBuffer()->isCPUBacked());
444 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
445 SkASSERT(ibuf);
446 SkASSERT(!ibuf->isMapped());
447
448 cbInfo.currentCmdBuf()->bindIndexBuffer(fGpu, ibuf);
449 }
450 }
451
prepareDrawState(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,GrPrimitiveType primitiveType)452 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState(
453 const GrPipeline& pipeline,
454 const GrPrimitiveProcessor& primProc,
455 GrPrimitiveType primitiveType) {
456 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
457 SkASSERT(cbInfo.fRenderPass);
458
459 sk_sp<GrVkPipelineState> pipelineState =
460 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline,
461 primProc,
462 primitiveType,
463 *cbInfo.fRenderPass);
464 if (!pipelineState) {
465 return pipelineState;
466 }
467
468 if (!cbInfo.fIsEmpty &&
469 fLastPipelineState && fLastPipelineState != pipelineState.get() &&
470 fGpu->vkCaps().newSecondaryCBOnPipelineChange()) {
471 this->addAdditionalCommandBuffer();
472 }
473 fLastPipelineState = pipelineState.get();
474
475 pipelineState->setData(fGpu, primProc, pipeline);
476
477 pipelineState->bind(fGpu, cbInfo.currentCmdBuf());
478
479 GrVkPipeline::SetDynamicState(fGpu, cbInfo.currentCmdBuf(), pipeline);
480
481 return pipelineState;
482 }
483
prepare_sampled_images(const GrProcessor & processor,GrVkGpu * gpu)484 static void prepare_sampled_images(const GrProcessor& processor, GrVkGpu* gpu) {
485 for (int i = 0; i < processor.numTextureSamplers(); ++i) {
486 const GrProcessor::TextureSampler& sampler = processor.textureSampler(i);
487 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(sampler.texture());
488 SkASSERT(vkTexture);
489
490 // We may need to resolve the texture first if it is also a render target
491 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
492 if (texRT) {
493 gpu->onResolveRenderTarget(texRT);
494 }
495
496 const GrSamplerParams& params = sampler.params();
497 // Check if we need to regenerate any mip maps
498 if (GrSamplerParams::kMipMap_FilterMode == params.filterMode()) {
499 if (vkTexture->texturePriv().mipMapsAreDirty()) {
500 gpu->generateMipmap(vkTexture);
501 vkTexture->texturePriv().dirtyMipMaps(false);
502 }
503 }
504
505 // TODO: If we ever decide to create the secondary command buffers ahead of time before we
506 // are actually going to submit them, we will need to track the sampled images and delay
507 // adding the layout change/barrier until we are ready to submit.
508 vkTexture->setImageLayout(gpu,
509 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
510 VK_ACCESS_SHADER_READ_BIT,
511 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
512 false);
513 }
514 }
515
onDraw(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,const GrMesh * meshes,int meshCount,const SkRect & bounds)516 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline,
517 const GrPrimitiveProcessor& primProc,
518 const GrMesh* meshes,
519 int meshCount,
520 const SkRect& bounds) {
521 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(pipeline.getRenderTarget());
522 if (!fRenderTarget) {
523 this->init(target);
524 }
525 SkASSERT(target == fRenderTarget);
526
527 if (!meshCount) {
528 return;
529 }
530 prepare_sampled_images(primProc, fGpu);
531 GrFragmentProcessor::Iter iter(pipeline);
532 while (const GrFragmentProcessor* fp = iter.next()) {
533 prepare_sampled_images(*fp, fGpu);
534 }
535 prepare_sampled_images(pipeline.getXferProcessor(), fGpu);
536
537 GrPrimitiveType primitiveType = meshes[0].primitiveType();
538 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
539 primProc,
540 primitiveType);
541 if (!pipelineState) {
542 return;
543 }
544
545 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
546
547 for (int i = 0; i < meshCount; ++i) {
548 const GrMesh& mesh = meshes[i];
549 GrMesh::Iterator iter;
550 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
551 do {
552 if (nonIdxMesh->primitiveType() != primitiveType) {
553 // Technically we don't have to call this here (since there is a safety check in
554 // pipelineState:setData but this will allow for quicker freeing of resources if the
555 // pipelineState sits in a cache for a while.
556 pipelineState->freeTempResources(fGpu);
557 SkDEBUGCODE(pipelineState = nullptr);
558 primitiveType = nonIdxMesh->primitiveType();
559 pipelineState = this->prepareDrawState(pipeline,
560 primProc,
561 primitiveType);
562 if (!pipelineState) {
563 return;
564 }
565 }
566 SkASSERT(pipelineState);
567 this->bindGeometry(primProc, *nonIdxMesh);
568
569 if (nonIdxMesh->isIndexed()) {
570 cbInfo.currentCmdBuf()->drawIndexed(fGpu,
571 nonIdxMesh->indexCount(),
572 1,
573 nonIdxMesh->startIndex(),
574 nonIdxMesh->startVertex(),
575 0);
576 } else {
577 cbInfo.currentCmdBuf()->draw(fGpu,
578 nonIdxMesh->vertexCount(),
579 1,
580 nonIdxMesh->startVertex(),
581 0);
582 }
583 cbInfo.fIsEmpty = false;
584
585 fGpu->stats()->incNumDraws();
586 } while ((nonIdxMesh = iter.next()));
587 }
588
589 // Update command buffer bounds
590 cbInfo.fBounds.join(bounds);
591
592 // Technically we don't have to call this here (since there is a safety check in
593 // pipelineState:setData but this will allow for quicker freeing of resources if the
594 // pipelineState sits in a cache for a while.
595 pipelineState->freeTempResources(fGpu);
596 }
597
598