• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9 
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkTraceEvent.h"
15 #include "src/gpu/GrBackendUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrOpFlushState.h"
18 #ifdef SKIA_OHOS
19 #include "src/gpu/GrPerfMonitorReporter.h"
20 #endif
21 #include "src/gpu/GrPipeline.h"
22 #include "src/gpu/GrRenderTarget.h"
23 #include "src/gpu/effects/GrTextureEffect.h"
24 #include "src/gpu/vk/GrVkBuffer.h"
25 #include "src/gpu/vk/GrVkCommandBuffer.h"
26 #include "src/gpu/vk/GrVkCommandPool.h"
27 #include "src/gpu/vk/GrVkFramebuffer.h"
28 #include "src/gpu/vk/GrVkGpu.h"
29 #include "src/gpu/vk/GrVkImage.h"
30 #include "src/gpu/vk/GrVkImageView.h"
31 #include "src/gpu/vk/GrVkPipeline.h"
32 #include "src/gpu/vk/GrVkRenderPass.h"
33 #include "src/gpu/vk/GrVkRenderTarget.h"
34 #include "src/gpu/vk/GrVkResourceProvider.h"
35 #include "src/gpu/vk/GrVkSemaphore.h"
36 #include "src/gpu/vk/GrVkTexture.h"
37 
38 /////////////////////////////////////////////////////////////////////////////
39 
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)40 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
41                            VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
42     switch (loadOpIn) {
43         case GrLoadOp::kLoad:
44             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
45             break;
46         case GrLoadOp::kClear:
47             *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
48             break;
49         case GrLoadOp::kDiscard:
50             *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
51             break;
52         default:
53             SK_ABORT("Invalid LoadOp");
54             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
55     }
56 
57     switch (storeOpIn) {
58         case GrStoreOp::kStore:
59             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
60             break;
61         case GrStoreOp::kDiscard:
62             *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
63             break;
64         default:
65             SK_ABORT("Invalid StoreOp");
66             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
67     }
68 }
69 
GrVkOpsRenderPass(GrVkGpu * gpu)70 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
71 
setAttachmentLayouts(LoadFromResolve loadFromResolve)72 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
73     bool withStencil = fCurrentRenderPass->hasStencilAttachment();
74     bool withResolve = fCurrentRenderPass->hasResolveAttachment();
75 
76     if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
77         // We need to use the GENERAL layout in this case since we'll be using texture barriers
78         // with an input attachment.
79         VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
80                                   VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
81                                   VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
82         VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
83                                          VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
84         fFramebuffer->colorAttachment()->setImageLayout(
85                 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
86     } else {
87         // Change layout of our render target so it can be used as the color attachment.
88         // TODO: If we know that we will never be blending or loading the attachment we could drop
89         // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
90         fFramebuffer->colorAttachment()->setImageLayout(
91                 fGpu,
92                 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
93                 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
94                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
95                 false);
96     }
97 
98     if (withResolve) {
99         GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
100         SkASSERT(resolveAttachment);
101         if (loadFromResolve == LoadFromResolve::kLoad) {
102             resolveAttachment->setImageLayout(fGpu,
103                                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
104                                               VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
105                                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
106                                               false);
107         } else {
108             resolveAttachment->setImageLayout(
109                     fGpu,
110                     VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
111                     VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
112                     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
113                     false);
114         }
115     }
116 
117     // If we are using a stencil attachment we also need to update its layout
118     if (withStencil) {
119         auto* vkStencil = fFramebuffer->stencilAttachment();
120         SkASSERT(vkStencil);
121 
122         // We need the write and read access bits since we may load and store the stencil.
123         // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
124         // wait there.
125         vkStencil->setImageLayout(fGpu,
126                                   VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
127                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
128                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
129                                   VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
130                                   false);
131     }
132 }
133 
134 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
135 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
136 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)137 void adjust_bounds_to_granularity(SkIRect* dstBounds,
138                                   const SkIRect& srcBounds,
139                                   const VkExtent2D& granularity,
140                                   int maxWidth,
141                                   int maxHeight) {
142     // Adjust Width
143     if ((0 != granularity.width && 1 != granularity.width)) {
144         // Start with the right side of rect so we know if we end up going pass the maxWidth.
145         int rightAdj = srcBounds.fRight % granularity.width;
146         if (rightAdj != 0) {
147             rightAdj = granularity.width - rightAdj;
148         }
149         dstBounds->fRight = srcBounds.fRight + rightAdj;
150         if (dstBounds->fRight > maxWidth) {
151             dstBounds->fRight = maxWidth;
152             dstBounds->fLeft = 0;
153         } else {
154             dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
155         }
156     } else {
157         dstBounds->fLeft = srcBounds.fLeft;
158         dstBounds->fRight = srcBounds.fRight;
159     }
160 
161     // Adjust height
162     if ((0 != granularity.height && 1 != granularity.height)) {
163         // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
164         int bottomAdj = srcBounds.fBottom % granularity.height;
165         if (bottomAdj != 0) {
166             bottomAdj = granularity.height - bottomAdj;
167         }
168         dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
169         if (dstBounds->fBottom > maxHeight) {
170             dstBounds->fBottom = maxHeight;
171             dstBounds->fTop = 0;
172         } else {
173             dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
174         }
175     } else {
176         dstBounds->fTop = srcBounds.fTop;
177         dstBounds->fBottom = srcBounds.fBottom;
178     }
179 }
180 
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)181 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
182                                         LoadFromResolve loadFromResolve) {
183     this->setAttachmentLayouts(loadFromResolve);
184 
185     bool firstSubpassUsesSecondaryCB =
186             loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
187 
188     bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
189                          fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
190 
191     auto dimensions = fFramebuffer->colorAttachment()->dimensions();
192 
193     auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
194             fOrigin,
195             dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
196 
197     // The bounds we use for the render pass should be of the granularity supported
198     // by the device.
199     const VkExtent2D& granularity = fCurrentRenderPass->granularity();
200     SkIRect adjustedBounds;
201     if ((0 != granularity.width && 1 != granularity.width) ||
202         (0 != granularity.height && 1 != granularity.height)) {
203         adjust_bounds_to_granularity(&adjustedBounds,
204                                      nativeBounds,
205                                      granularity,
206                                      dimensions.width(),
207                                      dimensions.height());
208     } else {
209         adjustedBounds = nativeBounds;
210     }
211 
212     if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
213                                adjustedBounds, firstSubpassUsesSecondaryCB)) {
214         if (fCurrentSecondaryCommandBuffer) {
215             fCurrentSecondaryCommandBuffer->end(fGpu);
216         }
217         fCurrentRenderPass = nullptr;
218         return false;
219     }
220 
221     if (loadFromResolve == LoadFromResolve::kLoad) {
222         this->loadResolveIntoMSAA(adjustedBounds);
223     }
224 
225     return true;
226 }
227 
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)228 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
229                              const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
230                              const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
231     VkAttachmentLoadOp loadOp;
232     VkAttachmentStoreOp storeOp;
233     get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
234     GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
235 
236     get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
237     GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
238 
239     get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
240     GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
241 
242     GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
243     SkASSERT(rpHandle.isValid());
244     fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
245                                                                  vkColorOps,
246                                                                  vkResolveOps,
247                                                                  vkStencilOps);
248 
249     if (!fCurrentRenderPass) {
250         return false;
251     }
252 
253     if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
254         SkASSERT(fGpu->cmdPool());
255         fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
256         if (!fCurrentSecondaryCommandBuffer) {
257             fCurrentRenderPass = nullptr;
258             return false;
259         }
260         fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
261     }
262 
263     VkClearValue vkClearColor;
264     vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
265     vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
266     vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
267     vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
268 
269     return this->beginRenderPass(vkClearColor, fLoadFromResolve);
270 }
271 
initWrapped()272 bool GrVkOpsRenderPass::initWrapped() {
273     SkASSERT(fFramebuffer->isExternal());
274     fCurrentRenderPass = fFramebuffer->externalRenderPass();
275     SkASSERT(fCurrentRenderPass);
276     fCurrentRenderPass->ref();
277 
278     fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
279     if (!fCurrentSecondaryCommandBuffer) {
280         return false;
281     }
282     return true;
283 }
284 
~GrVkOpsRenderPass()285 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
286     this->reset();
287 }
288 
gpu()289 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
290 
currentCommandBuffer()291 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
292     if (fCurrentSecondaryCommandBuffer) {
293         return fCurrentSecondaryCommandBuffer.get();
294     }
295     // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
296     // are still using this object.
297     SkASSERT(fGpu->currentCommandBuffer());
298     return fGpu->currentCommandBuffer();
299 }
300 
loadResolveIntoMSAA(const SkIRect & nativeBounds)301 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
302     fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
303                               fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
304                               nativeBounds);
305     fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
306 
307     // If we loaded the resolve attachment, then we would have set the image layout to be
308     // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
309     // attachment. However, when we switched to the main subpass it will transition the layout
310     // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
311     // of the layout to match the new layout.
312     SkASSERT(fFramebuffer->resolveAttachment());
313     fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
314 }
315 
submit()316 void GrVkOpsRenderPass::submit() {
317     if (!fRenderTarget) {
318         return;
319     }
320     if (!fCurrentRenderPass) {
321         SkASSERT(fGpu->isDeviceLost());
322         return;
323     }
324 
325     // We don't want to actually submit the secondary command buffer if it is wrapped.
326     if (this->wrapsSecondaryCommandBuffer()) {
327         // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
328         // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
329         // GrVkSecondaryCommandBuffer alive.
330         fFramebuffer->returnExternalGrSecondaryCommandBuffer(
331                 std::move(fCurrentSecondaryCommandBuffer));
332         return;
333     }
334 
335     if (fCurrentSecondaryCommandBuffer) {
336         fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
337     }
338     fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
339 }
340 
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)341 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
342                             sk_sp<GrVkFramebuffer> framebuffer,
343                             GrSurfaceOrigin origin,
344                             const SkIRect& bounds,
345                             const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
346                             const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
347                             const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
348                             GrVkRenderPass::SelfDependencyFlags selfDepFlags,
349                             GrVkRenderPass::LoadFromResolve loadFromResolve,
350                             const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
351     SkASSERT(!fRenderTarget);
352     SkASSERT(fGpu == rt->getContext()->priv().getGpu());
353 
354 #ifdef SK_DEBUG
355     fIsActive = true;
356 #endif
357 
358     // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
359     // access it. If the command buffer is valid here should be valid throughout the use of the
360     // render pass since nothing should trigger a submit while this render pass is active.
361     if (!fGpu->currentCommandBuffer()) {
362         return false;
363     }
364 
365     this->INHERITED::set(rt, origin);
366 
367     for (int i = 0; i < sampledProxies.count(); ++i) {
368         if (sampledProxies[i]->isInstantiated()) {
369             SkASSERT(sampledProxies[i]->asTextureProxy());
370             GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
371             SkASSERT(vkTex);
372             GrVkImage* texture = vkTex->textureImage();
373             SkASSERT(texture);
374             texture->setImageLayout(
375                     fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
376                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
377         }
378     }
379 
380     SkASSERT(framebuffer);
381     fFramebuffer = std::move(framebuffer);
382 
383     SkASSERT(bounds.isEmpty() ||
384              SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
385     fBounds = bounds;
386 
387     fSelfDependencyFlags = selfDepFlags;
388     fLoadFromResolve = loadFromResolve;
389 
390     if (this->wrapsSecondaryCommandBuffer()) {
391         return this->initWrapped();
392     }
393 
394     return this->init(colorInfo, resolveInfo, stencilInfo);
395 }
396 
reset()397 void GrVkOpsRenderPass::reset() {
398     if (fCurrentSecondaryCommandBuffer) {
399         // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
400         // secondary command buffer from since we haven't submitted any work yet.
401         SkASSERT(fGpu->cmdPool());
402         fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
403     }
404     if (fCurrentRenderPass) {
405         fCurrentRenderPass->unref();
406         fCurrentRenderPass = nullptr;
407     }
408     fCurrentCBIsEmpty = true;
409 
410     fRenderTarget = nullptr;
411     fFramebuffer.reset();
412 
413     fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
414 
415     fLoadFromResolve = LoadFromResolve::kNo;
416     fOverridePipelinesForResolveLoad = false;
417 
418 #ifdef SK_DEBUG
419     fIsActive = false;
420 #endif
421 }
422 
wrapsSecondaryCommandBuffer() const423 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
424     return fFramebuffer->isExternal();
425 }
426 
427 ////////////////////////////////////////////////////////////////////////////////
428 
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)429 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
430     if (!fCurrentRenderPass) {
431         SkASSERT(fGpu->isDeviceLost());
432         return;
433     }
434 
435     GrAttachment* sb = fFramebuffer->stencilAttachment();
436     // this should only be called internally when we know we have a
437     // stencil buffer.
438     SkASSERT(sb);
439     int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
440 
441     // The contract with the callers does not guarantee that we preserve all bits in the stencil
442     // during this clear. Thus we will clear the entire stencil to the desired value.
443 
444     VkClearDepthStencilValue vkStencilColor;
445     memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
446     if (insideStencilMask) {
447         vkStencilColor.stencil = (1 << (stencilBitCount - 1));
448     } else {
449         vkStencilColor.stencil = 0;
450     }
451 
452     VkClearRect clearRect;
453     // Flip rect if necessary
454     SkIRect vkRect;
455     if (!scissor.enabled()) {
456         vkRect.setXYWH(0, 0, sb->width(), sb->height());
457     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
458         vkRect = scissor.rect();
459     } else {
460         vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
461                        scissor.rect().fRight, sb->height() - scissor.rect().fTop);
462     }
463 
464     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
465     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
466 
467     clearRect.baseArrayLayer = 0;
468     clearRect.layerCount = 1;
469 
470     uint32_t stencilIndex;
471     SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
472 
473     VkClearAttachment attachment;
474     attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
475     attachment.colorAttachment = 0; // this value shouldn't matter
476     attachment.clearValue.depthStencil = vkStencilColor;
477 
478     this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
479     fCurrentCBIsEmpty = false;
480 }
481 
482 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
onClearStencil(const GrScissorState & scissor,uint32_t stencilVal)483 void GrVkOpsRenderPass::onClearStencil(const GrScissorState& scissor, uint32_t stencilVal) {
484     if (!fCurrentRenderPass) {
485         SkASSERT(fGpu->isDeviceLost());
486         return;
487     }
488 
489     GrAttachment* sb = fFramebuffer->stencilAttachment();
490     // this should only be called internally when we know we have a
491     // stencil buffer.
492     SkASSERT(sb);
493     int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
494 
495     // The contract with the callers does not guarantee that we preserve all bits in the stencil
496     // during this clear. Thus we will clear the entire stencil to the desired value.
497 
498     VkClearDepthStencilValue vkStencilColor;
499     memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
500 
501     vkStencilColor.stencil = stencilVal;
502 
503     VkClearRect clearRect;
504 
505     const SkIRect& vkRect = scissor.stencilRect();
506     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
507     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
508 
509     clearRect.baseArrayLayer = 0;
510     clearRect.layerCount = 1;
511 
512     uint32_t stencilIndex;
513     SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
514 
515     VkClearAttachment attachment;
516     attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
517     attachment.colorAttachment = 0; // this value shouldn't matter
518     attachment.clearValue.depthStencil = vkStencilColor;
519 
520     this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
521     fCurrentCBIsEmpty = false;
522 }
523 #endif
524 
onClear(const GrScissorState & scissor,std::array<float,4> color)525 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
526     if (!fCurrentRenderPass) {
527         SkASSERT(fGpu->isDeviceLost());
528         return;
529     }
530 
531     VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
532 
533     // If we end up in a situation where we are calling clear without a scissior then in general it
534     // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
535     // there are situations where higher up we couldn't discard the previous ops and set a clear
536     // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
537     // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
538     // can then reenable this assert assuming we can't get messed up by a waitOp.
539     //SkASSERT(!fCurrentCBIsEmpty || scissor);
540 
541     auto dimensions = fFramebuffer->colorAttachment()->dimensions();
542     // We always do a sub rect clear with clearAttachments since we are inside a render pass
543     VkClearRect clearRect;
544     // Flip rect if necessary
545     SkIRect vkRect;
546     if (!scissor.enabled()) {
547         vkRect.setSize(dimensions);
548     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
549         vkRect = scissor.rect();
550     } else {
551         vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
552                        scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
553     }
554     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
555     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
556     clearRect.baseArrayLayer = 0;
557     clearRect.layerCount = 1;
558 
559     uint32_t colorIndex;
560     SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
561 
562     VkClearAttachment attachment;
563     attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
564     attachment.colorAttachment = colorIndex;
565     attachment.clearValue.color = vkColor;
566 
567     this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
568     fCurrentCBIsEmpty = false;
569     return;
570 }
571 
572 ////////////////////////////////////////////////////////////////////////////////
573 
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)574 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
575     SkASSERT(!this->wrapsSecondaryCommandBuffer());
576 
577     bool withResolve = fFramebuffer->resolveAttachment();
578     bool withStencil = fFramebuffer->stencilAttachment();
579 
580     // If we have a resolve attachment we must do a resolve load in the new render pass since we
581     // broke up the original one. GrProgramInfos were made without any knowledge that the render
582     // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
583     // need to override that to make sure they are compatible with the extra load subpass.
584     fOverridePipelinesForResolveLoad |=
585             withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
586 
587     GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
588                                             VK_ATTACHMENT_STORE_OP_STORE);
589     GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
590                                               VK_ATTACHMENT_STORE_OP_STORE);
591     LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
592     if (withResolve) {
593         vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
594         loadFromResolve = LoadFromResolve::kLoad;
595     }
596     GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
597                                               VK_ATTACHMENT_STORE_OP_STORE);
598 
599     SkASSERT(fCurrentRenderPass);
600     fCurrentRenderPass->unref();
601     fCurrentRenderPass = nullptr;
602 
603     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
604     auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
605     if (!fb) {
606         return;
607     }
608     fFramebuffer = sk_ref_sp(fb);
609 
610     SkASSERT(fFramebuffer);
611     const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
612             fFramebuffer->compatibleRenderPassHandle();
613     SkASSERT(rpHandle.isValid());
614 
615     fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
616                                                                  vkColorOps,
617                                                                  vkResolveOps,
618                                                                  vkStencilOps);
619 
620     if (!fCurrentRenderPass) {
621         return;
622     }
623 
624     if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
625         mustUseSecondaryCommandBuffer) {
626         SkASSERT(fGpu->cmdPool());
627         fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
628         if (!fCurrentSecondaryCommandBuffer) {
629             fCurrentRenderPass = nullptr;
630             return;
631         }
632         fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
633     }
634 
635     VkClearValue vkClearColor;
636     memset(&vkClearColor, 0, sizeof(VkClearValue));
637 
638     this->beginRenderPass(vkClearColor, loadFromResolve);
639 }
640 
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)641 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
642     if (!fCurrentRenderPass) {
643         SkASSERT(fGpu->isDeviceLost());
644         return;
645     }
646     if (fCurrentSecondaryCommandBuffer) {
647         fCurrentSecondaryCommandBuffer->end(fGpu);
648         fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
649     }
650     fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
651 
652     // We pass in true here to signal that after the upload we need to set the upload textures
653     // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
654     state->doUpload(upload, true);
655 
656     this->addAdditionalRenderPass(false);
657 }
658 
659 ////////////////////////////////////////////////////////////////////////////////
660 
onEnd()661 void GrVkOpsRenderPass::onEnd() {
662     if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
663         fCurrentSecondaryCommandBuffer->end(fGpu);
664     }
665 }
666 
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)667 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
668     if (!fCurrentRenderPass) {
669         SkASSERT(fGpu->isDeviceLost());
670         return false;
671     }
672 
673     SkRect rtRect = SkRect::Make(fBounds);
674     if (rtRect.intersect(drawBounds)) {
675         rtRect.roundOut(&fCurrentPipelineBounds);
676     } else {
677         fCurrentPipelineBounds.setEmpty();
678     }
679 
680     GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
681     SkASSERT(fCurrentRenderPass);
682 
683     VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
684     fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
685             fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
686     if (!fCurrentPipelineState) {
687         return false;
688     }
689 
690     fCurrentPipelineState->bindPipeline(fGpu, currentCB);
691 
692     // Both the 'programInfo' and this renderPass have an origin. Since they come from the
693     // same place (i.e., the target renderTargetProxy) they had best agree.
694     SkASSERT(programInfo.origin() == fOrigin);
695 
696     auto colorAttachment = fFramebuffer->colorAttachment();
697     if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
698                                                    currentCB)) {
699         return false;
700     }
701 
702     if (!programInfo.pipeline().isScissorTestEnabled()) {
703         // "Disable" scissor by setting it to the full pipeline bounds.
704         GrVkPipeline::SetDynamicScissorRectState(
705                 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
706                                                  fCurrentPipelineBounds);
707     }
708     GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
709     GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
710                                                programInfo.pipeline().writeSwizzle(),
711                                                programInfo.pipeline().getXferProcessor());
712 
713     return true;
714 }
715 
onSetScissorRect(const SkIRect & scissor)716 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
717     SkIRect combinedScissorRect;
718     if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
719         combinedScissorRect = SkIRect::MakeEmpty();
720     }
721     GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
722                                              fFramebuffer->colorAttachment()->dimensions(),
723                                              fOrigin, combinedScissorRect);
724 }
725 
726 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)727 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
728     SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
729     auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
730     SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
731 }
732 #endif
733 
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)734 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
735                                        const GrSurfaceProxy* const geomProcTextures[],
736                                        const GrPipeline& pipeline) {
737 #ifdef SK_DEBUG
738     SkASSERT(fCurrentPipelineState);
739     auto colorAttachment = fFramebuffer->colorAttachment();
740     for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
741         check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
742     }
743     pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
744         check_sampled_texture(te.texture(), colorAttachment, fGpu);
745     });
746     if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
747         check_sampled_texture(dstTexture, colorAttachment, fGpu);
748     }
749 #endif
750     if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
751                                                    this->currentCommandBuffer())) {
752         return false;
753     }
754     if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
755         // We bind the color attachment as an input attachment
756         auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
757         if (!ds) {
758             return false;
759         }
760         return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
761                                                                 this->currentCommandBuffer());
762     }
763     return true;
764 }
765 
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)766 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
767                                       sk_sp<const GrBuffer> instanceBuffer,
768                                       sk_sp<const GrBuffer> vertexBuffer,
769                                       GrPrimitiveRestart primRestart) {
770     SkASSERT(GrPrimitiveRestart::kNo == primRestart);
771     if (!fCurrentRenderPass) {
772         SkASSERT(fGpu->isDeviceLost());
773         return;
774     }
775     SkASSERT(fCurrentPipelineState);
776     SkASSERT(!fGpu->caps()->usePrimitiveRestart());  // Ignore primitiveRestart parameter.
777 
778     GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
779     SkASSERT(currCmdBuf);
780 
781     // There is no need to put any memory barriers to make sure host writes have finished here.
782     // When a command buffer is submitted to a queue, there is an implicit memory barrier that
783     // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
784     // an active RenderPass.
785 
786     // Here our vertex and instance inputs need to match the same 0-based bindings they were
787     // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
788     uint32_t binding = 0;
789     if (auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())) {
790         SkASSERT(!gpuVertexBuffer->isCpuBuffer());
791         SkASSERT(!gpuVertexBuffer->isMapped());
792         currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
793     }
794     if (auto* gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer.get())) {
795         SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
796         SkASSERT(!gpuInstanceBuffer->isMapped());
797         currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
798     }
799     if (auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())) {
800         SkASSERT(!gpuIndexBuffer->isCpuBuffer());
801         SkASSERT(!gpuIndexBuffer->isMapped());
802         currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
803     }
804 }
805 
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)806 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
807                                         int baseInstance,
808                                         int vertexCount, int baseVertex) {
809     if (!fCurrentRenderPass) {
810         SkASSERT(fGpu->isDeviceLost());
811         return;
812     }
813     SkASSERT(fCurrentPipelineState);
814     this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
815     fGpu->stats()->incNumDraws();
816     fCurrentCBIsEmpty = false;
817 }
818 
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)819 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
820                                                int baseInstance, int baseVertex) {
821     if (!fCurrentRenderPass) {
822         SkASSERT(fGpu->isDeviceLost());
823         return;
824     }
825     SkASSERT(fCurrentPipelineState);
826     this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
827                                               baseIndex, baseVertex, baseInstance);
828     fGpu->stats()->incNumDraws();
829     fCurrentCBIsEmpty = false;
830 }
831 
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)832 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
833                                        int drawCount) {
834     SkASSERT(!drawIndirectBuffer->isCpuBuffer());
835     if (!fCurrentRenderPass) {
836         SkASSERT(fGpu->isDeviceLost());
837         return;
838     }
839     const GrVkCaps& caps = fGpu->vkCaps();
840     SkASSERT(caps.nativeDrawIndirectSupport());
841     SkASSERT(fCurrentPipelineState);
842 
843     const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
844     uint32_t remainingDraws = drawCount;
845     const size_t stride = sizeof(GrDrawIndirectCommand);
846     while (remainingDraws >= 1) {
847         uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
848         this->currentCommandBuffer()->drawIndirect(
849                 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
850         remainingDraws -= currDrawCount;
851         offset += stride * currDrawCount;
852         fGpu->stats()->incNumDraws();
853     }
854     fCurrentCBIsEmpty = false;
855 }
856 
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)857 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
858                                               int drawCount) {
859     SkASSERT(!drawIndirectBuffer->isCpuBuffer());
860     if (!fCurrentRenderPass) {
861         SkASSERT(fGpu->isDeviceLost());
862         return;
863     }
864     const GrVkCaps& caps = fGpu->vkCaps();
865     SkASSERT(caps.nativeDrawIndirectSupport());
866     SkASSERT(fCurrentPipelineState);
867     const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
868     uint32_t remainingDraws = drawCount;
869     const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
870     while (remainingDraws >= 1) {
871         uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
872         this->currentCommandBuffer()->drawIndexedIndirect(
873                 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
874         remainingDraws -= currDrawCount;
875         offset += stride * currDrawCount;
876         fGpu->stats()->incNumDraws();
877     }
878     fCurrentCBIsEmpty = false;
879 }
880 
881 ////////////////////////////////////////////////////////////////////////////////
882 
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)883 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
884     if (!fCurrentRenderPass) {
885         SkASSERT(fGpu->isDeviceLost());
886         return;
887     }
888 
889     VkRect2D bounds;
890     bounds.offset = { 0, 0 };
891     bounds.extent = { 0, 0 };
892 
893     if (!fCurrentSecondaryCommandBuffer) {
894         fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
895         this->addAdditionalRenderPass(true);
896         // We may have failed to start a new render pass
897         if (!fCurrentRenderPass) {
898             SkASSERT(fGpu->isDeviceLost());
899             return;
900         }
901     }
902     SkASSERT(fCurrentSecondaryCommandBuffer);
903 
904     GrVkDrawableInfo vkInfo;
905     vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
906     vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
907     SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
908     vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
909     vkInfo.fDrawBounds = &bounds;
910 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
911     vkInfo.fImage = fFramebuffer->colorAttachment()->image();
912 #else
913     vkInfo.fImage = VK_NULL_HANDLE;
914 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
915 
916     GrBackendDrawableInfo info(vkInfo);
917 
918     // After we draw into the command buffer via the drawable, cached state we have may be invalid.
919     this->currentCommandBuffer()->invalidateState();
920     // Also assume that the drawable produced output.
921     fCurrentCBIsEmpty = false;
922 
923     drawable->draw(info);
924     fGpu->addDrawable(std::move(drawable));
925 }
926 
927 
928 ////////////////////////////////////////////////////////////////////////////////
929 
onDrawBlurImage(const GrSurfaceProxyView & proxyView,const SkBlurArg & blurArg)930 void GrVkOpsRenderPass::onDrawBlurImage(const GrSurfaceProxyView& proxyView, const SkBlurArg& blurArg)
931 {
932     if (!proxyView.proxy()) {
933         return;
934     }
935 
936     GrVkTexture* texture = static_cast<GrVkTexture*>(proxyView.proxy()->peekTexture());
937     if (!texture) {
938         return;
939     }
940     GrVkImage* image = texture->textureImage();
941     if (!image) {
942         return;
943     }
944 #ifdef SKIA_OHOS
945     // observer drawBlurImage start
946     int64_t startTime = GrPerfMonitorReporter::getCurrentTime();
947 #endif
948 
949     HITRACE_OHOS_NAME_ALWAYS("DrawBlurImage");
950     // reference textureop, resource's refcount should add.
951     fGpu->currentCommandBuffer()->addResource(image->textureView());
952     fGpu->currentCommandBuffer()->addResource(image->resource());
953     // OH ISSUE : fix hps blur, add GrSurface reference protection
954     fGpu->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(texture));
955     SkOriginInfo originInfo {};
956     originInfo.imageOrigin = proxyView.origin();
957     originInfo.rtOrigin = fOrigin;
958     fGpu->currentCommandBuffer()->drawBlurImage(fGpu, image, fFramebuffer->colorAttachment()->dimensions(),
959                                                 originInfo, blurArg);
960 #ifdef SKIA_OHOS
961     // observer drawBlurImage end
962     int64_t blurTime = GrPerfMonitorReporter::getCurrentTime() - startTime;
963     GrGpuResourceTag tag = proxyView.proxy()->getGrProxyTag();
964     if (tag.isGrTagValid()) {
965         // observer material type
966         const uint16_t filterType = 2;
967         GrPerfMonitorReporter::GetInstance().recordBlurNode(tag.fName, blurTime);
968         GrPerfMonitorReporter::GetInstance().recordBlurPerfEvent(tag.fName, tag.fPid, filterType,
969             static_cast<float>(blurArg.sigma), static_cast<int32_t>(proxyView.width()),
970             static_cast<int32_t>(proxyView.height()), blurTime);
971     }
972 #endif
973     return;
974 }
975