• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ccpr/GrCCDrawPathsOp.h"
9 
10 #include "include/private/GrRecordingContext.h"
11 #include "src/gpu/GrMemoryPool.h"
12 #include "src/gpu/GrOpFlushState.h"
13 #include "src/gpu/GrRecordingContextPriv.h"
14 #include "src/gpu/ccpr/GrCCPathCache.h"
15 #include "src/gpu/ccpr/GrCCPerFlushResources.h"
16 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
17 #include "src/gpu/ccpr/GrOctoBounds.h"
18 
has_coord_transforms(const GrPaint & paint)19 static bool has_coord_transforms(const GrPaint& paint) {
20     GrFragmentProcessor::Iter iter(paint);
21     while (const GrFragmentProcessor* fp = iter.next()) {
22         if (!fp->coordTransforms().empty()) {
23             return true;
24         }
25     }
26     return false;
27 }
28 
Make(GrRecordingContext * context,const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,GrPaint && paint)29 std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(
30         GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
31         const GrShape& shape, GrPaint&& paint) {
32     SkRect conservativeDevBounds;
33     m.mapRect(&conservativeDevBounds, shape.bounds());
34 
35     const SkStrokeRec& stroke = shape.style().strokeRec();
36     float strokeDevWidth = 0;
37     float conservativeInflationRadius = 0;
38     if (!stroke.isFillStyle()) {
39         strokeDevWidth = GrCoverageCountingPathRenderer::GetStrokeDevWidth(
40                 m, stroke, &conservativeInflationRadius);
41         conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
42     }
43 
44     std::unique_ptr<GrCCDrawPathsOp> op;
45     float conservativeSize = SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width());
46     if (conservativeSize > GrCoverageCountingPathRenderer::kPathCropThreshold) {
47         // The path is too large. Crop it or analytic AA can run out of fp32 precision.
48         SkPath croppedDevPath;
49         shape.asPath(&croppedDevPath);
50         croppedDevPath.transform(m, &croppedDevPath);
51 
52         SkIRect cropBox = clipIBounds;
53         GrShape croppedDevShape;
54         if (stroke.isFillStyle()) {
55             GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
56             croppedDevShape = GrShape(croppedDevPath);
57             conservativeDevBounds = croppedDevShape.bounds();
58         } else {
59             int r = SkScalarCeilToInt(conservativeInflationRadius);
60             cropBox.outset(r, r);
61             GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
62             SkStrokeRec devStroke = stroke;
63             devStroke.setStrokeStyle(strokeDevWidth);
64             croppedDevShape = GrShape(croppedDevPath, GrStyle(devStroke, nullptr));
65             conservativeDevBounds = croppedDevPath.getBounds();
66             conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
67         }
68 
69         // FIXME: This breaks local coords: http://skbug.com/8003
70         return InternalMake(context, clipIBounds, SkMatrix::I(), croppedDevShape, strokeDevWidth,
71                             conservativeDevBounds, std::move(paint));
72     }
73 
74     return InternalMake(context, clipIBounds, m, shape, strokeDevWidth, conservativeDevBounds,
75                         std::move(paint));
76 }
77 
InternalMake(GrRecordingContext * context,const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,float strokeDevWidth,const SkRect & conservativeDevBounds,GrPaint && paint)78 std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::InternalMake(
79         GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
80         const GrShape& shape, float strokeDevWidth, const SkRect& conservativeDevBounds,
81         GrPaint&& paint) {
82     // The path itself should have been cropped if larger than kPathCropThreshold. If it had a
83     // stroke, that would have further inflated its draw bounds.
84     SkASSERT(SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width()) <
85              GrCoverageCountingPathRenderer::kPathCropThreshold +
86              GrCoverageCountingPathRenderer::kMaxBoundsInflationFromStroke*2 + 1);
87 
88     SkIRect shapeConservativeIBounds;
89     conservativeDevBounds.roundOut(&shapeConservativeIBounds);
90 
91     SkIRect maskDevIBounds;
92     if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
93         return nullptr;
94     }
95 
96     GrOpMemoryPool* pool = context->priv().opMemoryPool();
97     return pool->allocate<GrCCDrawPathsOp>(m, shape, strokeDevWidth, shapeConservativeIBounds,
98                                            maskDevIBounds, conservativeDevBounds, std::move(paint));
99 }
100 
GrCCDrawPathsOp(const SkMatrix & m,const GrShape & shape,float strokeDevWidth,const SkIRect & shapeConservativeIBounds,const SkIRect & maskDevIBounds,const SkRect & conservativeDevBounds,GrPaint && paint)101 GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
102                                  const SkIRect& shapeConservativeIBounds,
103                                  const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
104                                  GrPaint&& paint)
105         : GrDrawOp(ClassID())
106         , fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
107         , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
108                  paint.getColor4f())
109         , fProcessors(std::move(paint)) {  // Paint must be moved after fetching its color above.
110     SkDEBUGCODE(fBaseInstance = -1);
111     // If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
112     // since it eliminates the need for scissor when drawing to the main canvas.
113     // FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
114     // give the opList, as well as enabling threaded parsing when using DDL.
115     SkRect clippedDrawBounds;
116     if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
117         clippedDrawBounds.setEmpty();
118     }
119     // We always have AA bloat, even in MSAA atlas mode. This is because by the time this Op comes
120     // along and draws to the main canvas, the atlas has been resolved to analytic coverage.
121     this->setBounds(clippedDrawBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
122 }
123 
~GrCCDrawPathsOp()124 GrCCDrawPathsOp::~GrCCDrawPathsOp() {
125     if (fOwningPerOpListPaths) {
126         // Remove the list's dangling pointer to this Op before deleting it.
127         fOwningPerOpListPaths->fDrawOps.remove(this);
128     }
129 }
130 
SingleDraw(const SkMatrix & m,const GrShape & shape,float strokeDevWidth,const SkIRect & shapeConservativeIBounds,const SkIRect & maskDevIBounds,const SkPMColor4f & color)131 GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrShape& shape,
132                                         float strokeDevWidth,
133                                         const SkIRect& shapeConservativeIBounds,
134                                         const SkIRect& maskDevIBounds, const SkPMColor4f& color)
135         : fMatrix(m)
136         , fShape(shape)
137         , fStrokeDevWidth(strokeDevWidth)
138         , fShapeConservativeIBounds(shapeConservativeIBounds)
139         , fMaskDevIBounds(maskDevIBounds)
140         , fColor(color) {
141 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
142     if (fShape.hasUnstyledKey()) {
143         // On AOSP we round view matrix translates to integer values for cachable paths. We do this
144         // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
145         fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
146         fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
147     }
148 #endif
149 }
150 
finalize(const GrCaps & caps,const GrAppliedClip * clip,bool hasMixedSampledCoverage,GrClampType clampType)151 GrProcessorSet::Analysis GrCCDrawPathsOp::finalize(
152         const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
153         GrClampType clampType) {
154     SkASSERT(1 == fNumDraws);  // There should only be one single path draw in this Op right now.
155     return fDraws.head().finalize(caps, clip, hasMixedSampledCoverage, clampType, &fProcessors);
156 }
157 
finalize(const GrCaps & caps,const GrAppliedClip * clip,bool hasMixedSampledCoverage,GrClampType clampType,GrProcessorSet * processors)158 GrProcessorSet::Analysis GrCCDrawPathsOp::SingleDraw::finalize(
159         const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, GrClampType
160         clampType, GrProcessorSet* processors) {
161     const GrProcessorSet::Analysis& analysis = processors->finalize(
162             fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
163             &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps, clampType, &fColor);
164 
165     // Lines start looking jagged when they get thinner than 1px. For thin strokes it looks better
166     // if we can convert them to hairline (i.e., inflate the stroke width to 1px), and instead
167     // reduce the opacity to create the illusion of thin-ness. This strategy also helps reduce
168     // artifacts from coverage dilation when there are self intersections.
169     if (analysis.isCompatibleWithCoverageAsAlpha() &&
170             !fShape.style().strokeRec().isFillStyle() && fStrokeDevWidth < 1) {
171         // Modifying the shape affects its cache key. The draw can't have a cache entry yet or else
172         // our next step would invalidate it.
173         SkASSERT(!fCacheEntry);
174         SkASSERT(SkStrokeRec::kStroke_Style == fShape.style().strokeRec().getStyle());
175 
176         SkPath path;
177         fShape.asPath(&path);
178 
179         // Create a hairline version of our stroke.
180         SkStrokeRec hairlineStroke = fShape.style().strokeRec();
181         hairlineStroke.setStrokeStyle(0);
182 
183         // How transparent does a 1px stroke have to be in order to appear as thin as the real one?
184         float coverage = fStrokeDevWidth;
185 
186         fShape = GrShape(path, GrStyle(hairlineStroke, nullptr));
187         fStrokeDevWidth = 1;
188 
189         // fShapeConservativeIBounds already accounted for this possibility of inflating the stroke.
190         fColor = fColor * coverage;
191     }
192 
193     return analysis;
194 }
195 
onCombineIfPossible(GrOp * op,const GrCaps &)196 GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
197     GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
198     SkASSERT(fOwningPerOpListPaths);
199     SkASSERT(fNumDraws);
200     SkASSERT(!that->fOwningPerOpListPaths || that->fOwningPerOpListPaths == fOwningPerOpListPaths);
201     SkASSERT(that->fNumDraws);
202 
203     if (fProcessors != that->fProcessors ||
204         fViewMatrixIfUsingLocalCoords != that->fViewMatrixIfUsingLocalCoords) {
205         return CombineResult::kCannotCombine;
206     }
207 
208     fDraws.append(std::move(that->fDraws), &fOwningPerOpListPaths->fAllocator);
209 
210     SkDEBUGCODE(fNumDraws += that->fNumDraws);
211     SkDEBUGCODE(that->fNumDraws = 0);
212     return CombineResult::kMerged;
213 }
214 
addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths)215 void GrCCDrawPathsOp::addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths) {
216     SkASSERT(1 == fNumDraws);
217     SkASSERT(!fOwningPerOpListPaths);
218     fOwningPerOpListPaths = std::move(owningPerOpListPaths);
219     fOwningPerOpListPaths->fDrawOps.addToTail(this);
220 }
221 
accountForOwnPaths(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPerFlushResourceSpecs * specs)222 void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
223                                          GrOnFlushResourceProvider* onFlushRP,
224                                          GrCCPerFlushResourceSpecs* specs) {
225     for (SingleDraw& draw : fDraws) {
226         draw.accountForOwnPath(pathCache, onFlushRP, specs);
227     }
228 }
229 
accountForOwnPath(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPerFlushResourceSpecs * specs)230 void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
231         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
232         GrCCPerFlushResourceSpecs* specs) {
233     using CoverageType = GrCCAtlas::CoverageType;
234 
235     SkPath path;
236     fShape.asPath(&path);
237 
238     SkASSERT(!fCacheEntry);
239 
240     if (pathCache) {
241         fCacheEntry = pathCache->find(
242                 onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
243     }
244 
245     if (fCacheEntry) {
246         if (const GrCCCachedAtlas* cachedAtlas = fCacheEntry->cachedAtlas()) {
247             SkASSERT(cachedAtlas->getOnFlushProxy());
248             if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
249                 ++specs->fNumCachedPaths;
250             } else {
251                 // Suggest that this path be copied to a literal coverage atlas, to save memory.
252                 // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
253                 int idx = (fShape.style().strokeRec().isFillStyle())
254                         ? GrCCPerFlushResourceSpecs::kFillIdx
255                         : GrCCPerFlushResourceSpecs::kStrokeIdx;
256                 ++specs->fNumCopiedPaths[idx];
257                 specs->fCopyPathStats[idx].statPath(path);
258                 specs->fCopyAtlasSpecs.accountForSpace(fCacheEntry->width(), fCacheEntry->height());
259                 fDoCopyToA8Coverage = true;
260             }
261             return;
262         }
263 
264         if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
265             fDoCachePathMask = true;
266             // We don't cache partial masks; ensure the bounds include the entire path.
267             fMaskDevIBounds = fShapeConservativeIBounds;
268         }
269     }
270 
271     // Plan on rendering this path in a new atlas.
272     int idx = (fShape.style().strokeRec().isFillStyle())
273             ? GrCCPerFlushResourceSpecs::kFillIdx
274             : GrCCPerFlushResourceSpecs::kStrokeIdx;
275     ++specs->fNumRenderedPaths[idx];
276     specs->fRenderedPathStats[idx].statPath(path);
277     specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
278     SkDEBUGCODE(fWasCountedAsRender = true);
279 }
280 
shouldCachePathMask(int maxRenderTargetSize) const281 bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
282     SkASSERT(fCacheEntry);
283     SkASSERT(!fCacheEntry->cachedAtlas());
284     if (fCacheEntry->hitCount() <= 1) {
285         return false;  // Don't cache a path mask until at least its second hit.
286     }
287 
288     int shapeMaxDimension = SkTMax(
289             fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
290     if (shapeMaxDimension > maxRenderTargetSize) {
291         return false;  // This path isn't cachable.
292     }
293 
294     int64_t shapeArea = sk_64_mul(
295             fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
296     if (shapeArea < 100*100) {
297         // If a path is small enough, we might as well try to render and cache the entire thing, no
298         // matter how much of it is actually visible.
299         return true;
300     }
301 
302     // The hitRect should already be contained within the shape's bounds, but we still intersect it
303     // because it's possible for edges very near pixel boundaries (e.g., 0.999999), to round out
304     // inconsistently, depending on the integer translation values and fp32 precision.
305     SkIRect hitRect = fCacheEntry->hitRect().makeOffset(fCachedMaskShift.x(), fCachedMaskShift.y());
306     hitRect.intersect(fShapeConservativeIBounds);
307 
308     // Render and cache the entire path mask if we see enough of it to justify rendering all the
309     // pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
310     // past, and in this particular draw we must see at least 10% of it.
311     int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
312     int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
313     return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
314 }
315 
setupResources(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPerFlushResources * resources,DoCopiesToA8Coverage doCopies)316 void GrCCDrawPathsOp::setupResources(
317         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
318         GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
319     SkASSERT(fNumDraws > 0);
320     SkASSERT(-1 == fBaseInstance);
321     fBaseInstance = resources->nextPathInstanceIdx();
322 
323     for (SingleDraw& draw : fDraws) {
324         draw.setupResources(pathCache, onFlushRP, resources, doCopies, this);
325     }
326 
327     if (!fInstanceRanges.empty()) {
328         fInstanceRanges.back().fEndInstanceIdx = resources->nextPathInstanceIdx();
329     }
330 }
331 
setupResources(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPerFlushResources * resources,DoCopiesToA8Coverage doCopies,GrCCDrawPathsOp * op)332 void GrCCDrawPathsOp::SingleDraw::setupResources(
333         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
334         GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies, GrCCDrawPathsOp* op) {
335     SkPath path;
336     fShape.asPath(&path);
337 
338     auto fillRule = (fShape.style().strokeRec().isFillStyle())
339             ? GrFillRuleForSkPath(path)
340             : GrFillRule::kNonzero;
341 
342     if (fCacheEntry) {
343         // Does the path already exist in a cached atlas texture?
344         if (fCacheEntry->cachedAtlas()) {
345             SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
346             if (DoCopiesToA8Coverage::kYes == doCopies && fDoCopyToA8Coverage) {
347                 resources->upgradeEntryToLiteralCoverageAtlas(
348                         pathCache, onFlushRP, fCacheEntry.get(), fillRule);
349                 SkASSERT(fCacheEntry->cachedAtlas());
350                 SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage
351                                  == fCacheEntry->cachedAtlas()->coverageType());
352                 SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
353             }
354 #if 0
355             // Simple color manipulation to visualize cached paths.
356             fColor = (GrCCAtlas::CoverageType::kA8_LiteralCoverage
357                               == fCacheEntry->cachedAtlas()->coverageType())
358                     ? SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
359 #endif
360             auto coverageMode = GrCCPathProcessor::GetCoverageMode(
361                     fCacheEntry->cachedAtlas()->coverageType());
362             op->recordInstance(coverageMode, fCacheEntry->cachedAtlas()->getOnFlushProxy(),
363                                resources->nextPathInstanceIdx());
364             resources->appendDrawPathInstance().set(
365                     *fCacheEntry, fCachedMaskShift, SkPMColor4f_toFP16(fColor), fillRule);
366 #ifdef SK_DEBUG
367             if (fWasCountedAsRender) {
368                 // A path mask didn't exist for this path at the beginning of flush, but we have one
369                 // now. What this means is that we've drawn the same path multiple times this flush.
370                 // Let the resources know that we reused one for their internal debug counters.
371                 resources->debugOnly_didReuseRenderedPath();
372             }
373 #endif
374             return;
375         }
376     }
377 
378     // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
379     // bounding boxes: One in device space, as well as a second one rotated an additional 45
380     // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
381     // circumscribes the path.
382     GrOctoBounds octoBounds;
383     SkIRect devIBounds;
384     SkIVector devToAtlasOffset;
385     if (auto atlas = resources->renderShapeInAtlas(
386                 fMaskDevIBounds, fMatrix, fShape, fStrokeDevWidth, &octoBounds, &devIBounds,
387                 &devToAtlasOffset)) {
388         auto coverageMode = GrCCPathProcessor::GetCoverageMode(
389                 resources->renderedPathCoverageType());
390         op->recordInstance(coverageMode, atlas->textureProxy(), resources->nextPathInstanceIdx());
391         resources->appendDrawPathInstance().set(
392                 octoBounds, devToAtlasOffset, SkPMColor4f_toFP16(fColor), fillRule);
393 
394         if (fDoCachePathMask) {
395             SkASSERT(fCacheEntry);
396             SkASSERT(!fCacheEntry->cachedAtlas());
397             SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
398             fCacheEntry->setCoverageCountAtlas(
399                     onFlushRP, atlas, devToAtlasOffset, octoBounds, devIBounds, fCachedMaskShift);
400         }
401     }
402 }
403 
recordInstance(GrCCPathProcessor::CoverageMode coverageMode,GrTextureProxy * atlasProxy,int instanceIdx)404 inline void GrCCDrawPathsOp::recordInstance(
405         GrCCPathProcessor::CoverageMode coverageMode, GrTextureProxy* atlasProxy, int instanceIdx) {
406     if (fInstanceRanges.empty()) {
407         fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
408     } else if (fInstanceRanges.back().fAtlasProxy != atlasProxy) {
409         fInstanceRanges.back().fEndInstanceIdx = instanceIdx;
410         fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
411     }
412     SkASSERT(fInstanceRanges.back().fCoverageMode == coverageMode);
413     SkASSERT(fInstanceRanges.back().fAtlasProxy == atlasProxy);
414 }
415 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)416 void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
417     SkASSERT(fOwningPerOpListPaths);
418 
419     const GrCCPerFlushResources* resources = fOwningPerOpListPaths->fFlushResources.get();
420     if (!resources) {
421         return;  // Setup failed.
422     }
423 
424     GrPipeline::InitArgs initArgs;
425     initArgs.fCaps = &flushState->caps();
426     initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
427     initArgs.fOutputSwizzle = flushState->drawOpArgs().fOutputSwizzle;
428     auto clip = flushState->detachAppliedClip();
429     GrPipeline::FixedDynamicState fixedDynamicState(clip.scissorState().rect());
430     GrPipeline pipeline(initArgs, std::move(fProcessors), std::move(clip));
431 
432     int baseInstance = fBaseInstance;
433     SkASSERT(baseInstance >= 0);  // Make sure setupResources() has been called.
434 
435     for (const InstanceRange& range : fInstanceRanges) {
436         SkASSERT(range.fEndInstanceIdx > baseInstance);
437 
438         const GrTextureProxy* atlas = range.fAtlasProxy;
439         SkASSERT(atlas->isInstantiated());
440 
441         GrCCPathProcessor pathProc(
442                 range.fCoverageMode, atlas->peekTexture(), atlas->textureSwizzle(), atlas->origin(),
443                 fViewMatrixIfUsingLocalCoords);
444         GrTextureProxy* atlasProxy = range.fAtlasProxy;
445         fixedDynamicState.fPrimitiveProcessorTextures = &atlasProxy;
446         pathProc.drawPaths(flushState, pipeline, &fixedDynamicState, *resources, baseInstance,
447                            range.fEndInstanceIdx, this->bounds());
448 
449         baseInstance = range.fEndInstanceIdx;
450     }
451 }
452