• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrCCPerFlushResources.h"
9 
10 #include "GrClip.h"
11 #include "GrMemoryPool.h"
12 #include "GrOnFlushResourceProvider.h"
13 #include "GrSurfaceContextPriv.h"
14 #include "GrRenderTargetContext.h"
15 #include "GrShape.h"
16 #include "SkMakeUnique.h"
17 #include "ccpr/GrCCPathCache.h"
18 
19 using FillBatchID = GrCCFiller::BatchID;
20 using StrokeBatchID = GrCCStroker::BatchID;
21 using PathInstance = GrCCPathProcessor::Instance;
22 
23 static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx;
24 static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx;
25 
26 namespace {
27 
28 // Base class for an Op that renders a CCPR atlas.
29 class AtlasOp : public GrDrawOp {
30 public:
fixedFunctionFlags() const31     FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
finalize(const GrCaps &,const GrAppliedClip *)32     GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*) override {
33         return GrProcessorSet::EmptySetAnalysis();
34     }
onCombineIfPossible(GrOp * other,const GrCaps &)35     CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
36         // We will only make multiple copy ops if they have different source proxies.
37         // TODO: make use of texture chaining.
38         return CombineResult::kCannotCombine;
39     }
onPrepare(GrOpFlushState *)40     void onPrepare(GrOpFlushState*) override {}
41 
42 protected:
AtlasOp(uint32_t classID,sk_sp<const GrCCPerFlushResources> resources,const SkISize & drawBounds)43     AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
44             const SkISize& drawBounds)
45             : GrDrawOp(classID)
46             , fResources(std::move(resources)) {
47         this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
48                         GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
49     }
50 
51     const sk_sp<const GrCCPerFlushResources> fResources;
52 };
53 
54 // Copies paths from a cached coverage count atlas into an 8-bit literal-coverage atlas.
55 class CopyAtlasOp : public AtlasOp {
56 public:
57     DEFINE_OP_CLASS_ID
58 
Make(GrContext * context,sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> copyProxy,int baseInstance,int endInstance,const SkISize & drawBounds)59     static std::unique_ptr<GrDrawOp> Make(GrContext* context,
60                                           sk_sp<const GrCCPerFlushResources> resources,
61                                           sk_sp<GrTextureProxy> copyProxy, int baseInstance,
62                                           int endInstance, const SkISize& drawBounds) {
63         GrOpMemoryPool* pool = context->contextPriv().opMemoryPool();
64 
65         return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy),
66                                            baseInstance, endInstance, drawBounds);
67     }
68 
name() const69     const char* name() const override { return "CopyAtlasOp (CCPR)"; }
visitProxies(const VisitProxyFunc & fn,VisitorType) const70     void visitProxies(const VisitProxyFunc& fn, VisitorType) const override { fn(fSrcProxy.get()); }
71 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)72     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
73         SkASSERT(fSrcProxy);
74         GrPipeline::FixedDynamicState dynamicState;
75         auto srcProxy = fSrcProxy.get();
76         dynamicState.fPrimitiveProcessorTextures = &srcProxy;
77 
78         GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc);
79         GrCCPathProcessor pathProc(srcProxy);
80         pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
81                            fEndInstance, this->bounds());
82     }
83 
84 private:
85     friend class ::GrOpMemoryPool; // for ctor
86 
CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> srcProxy,int baseInstance,int endInstance,const SkISize & drawBounds)87     CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
88                 int baseInstance, int endInstance, const SkISize& drawBounds)
89             : AtlasOp(ClassID(), std::move(resources), drawBounds)
90             , fSrcProxy(srcProxy)
91             , fBaseInstance(baseInstance)
92             , fEndInstance(endInstance) {
93     }
94     sk_sp<GrTextureProxy> fSrcProxy;
95     const int fBaseInstance;
96     const int fEndInstance;
97 };
98 
99 // Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
100 class RenderAtlasOp : public AtlasOp {
101 public:
102     DEFINE_OP_CLASS_ID
103 
Make(GrContext * context,sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)104     static std::unique_ptr<GrDrawOp> Make(GrContext* context,
105                                           sk_sp<const GrCCPerFlushResources> resources,
106                                           FillBatchID fillBatchID, StrokeBatchID strokeBatchID,
107                                           const SkISize& drawBounds) {
108         GrOpMemoryPool* pool = context->contextPriv().opMemoryPool();
109 
110         return pool->allocate<RenderAtlasOp>(std::move(resources), fillBatchID, strokeBatchID,
111                                              drawBounds);
112     }
113 
114     // GrDrawOp interface.
name() const115     const char* name() const override { return "RenderAtlasOp (CCPR)"; }
116 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)117     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
118         fResources->filler().drawFills(flushState, fFillBatchID, fDrawBounds);
119         fResources->stroker().drawStrokes(flushState, fStrokeBatchID, fDrawBounds);
120     }
121 
122 private:
123     friend class ::GrOpMemoryPool; // for ctor
124 
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)125     RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
126                   StrokeBatchID strokeBatchID, const SkISize& drawBounds)
127             : AtlasOp(ClassID(), std::move(resources), drawBounds)
128             , fFillBatchID(fillBatchID)
129             , fStrokeBatchID(strokeBatchID)
130             , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
131     }
132 
133     const FillBatchID fFillBatchID;
134     const StrokeBatchID fStrokeBatchID;
135     const SkIRect fDrawBounds;
136 };
137 
138 }
139 
inst_buffer_count(const GrCCPerFlushResourceSpecs & specs)140 static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
141     return specs.fNumCachedPaths +
142            // Copies get two instances per draw: 1 copy + 1 draw.
143            (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 +
144            specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx];
145            // No clips in instance buffers.
146 }
147 
GrCCPerFlushResources(GrOnFlushResourceProvider * onFlushRP,const GrCCPerFlushResourceSpecs & specs)148 GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
149                                              const GrCCPerFlushResourceSpecs& specs)
150           // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
151           // (See transform_path_pts below.)
152           // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
153         : fLocalDevPtsBuffer(SkTMax(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
154                                     specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
155         , fFiller(specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
156                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints,
157                   specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs,
158                   specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights)
159         , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
160                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
161                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
162         , fCopyAtlasStack(GrCCAtlas::CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
163                           onFlushRP->caps())
164         , fRenderedAtlasStack(GrCCAtlas::CoverageType::kFP16_CoverageCount,
165                               specs.fRenderedAtlasSpecs, onFlushRP->caps())
166         , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
167         , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
168         , fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
169                                                 inst_buffer_count(specs) * sizeof(PathInstance)))
170         , fNextCopyInstanceIdx(0)
171         , fNextPathInstanceIdx(specs.fNumCopiedPaths[kFillIdx] +
172                                specs.fNumCopiedPaths[kStrokeIdx]) {
173     if (!fIndexBuffer) {
174         SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
175         return;
176     }
177     if (!fVertexBuffer) {
178         SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
179         return;
180     }
181     if (!fInstanceBuffer) {
182         SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
183         return;
184     }
185     fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
186     SkASSERT(fPathInstanceData);
187     SkDEBUGCODE(fEndCopyInstance =
188                         specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]);
189     SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
190 }
191 
upgradeEntryToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPathCacheEntry * entry,GrCCPathProcessor::DoEvenOddFill evenOdd)192 void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
193         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
194         GrCCPathProcessor::DoEvenOddFill evenOdd) {
195     using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
196     SkASSERT(this->isMapped());
197     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
198 
199     const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
200     SkASSERT(cachedAtlas);
201     SkASSERT(cachedAtlas->getOnFlushProxy());
202 
203     if (GrCCAtlas::CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
204         // This entry has already been upgraded to literal coverage. The path must have been drawn
205         // multiple times during the flush.
206         SkDEBUGCODE(--fEndCopyInstance);
207         return;
208     }
209 
210     SkIVector newAtlasOffset;
211     if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
212         // We did not fit in the previous copy atlas and it was retired. We will render the ranges
213         // up until fCopyPathRanges.count() into the retired atlas during finalize().
214         retiredAtlas->setFillBatchID(fCopyPathRanges.count());
215         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
216     }
217 
218     this->recordCopyPathInstance(*entry, newAtlasOffset, evenOdd,
219                                  sk_ref_sp(cachedAtlas->getOnFlushProxy()));
220 
221     sk_sp<GrTexture> previousAtlasTexture =
222             sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
223     GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
224     if (ReleaseAtlasResult::kDidInvalidateFromCache ==
225             entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
226         // This texture just got booted out of the cache. Keep it around, in case we might be able
227         // to recycle it for a new atlas. We can recycle it because copying happens before rendering
228         // new paths, and every path from the atlas that we're planning to use this flush will be
229         // copied to a new atlas. We'll never copy some and leave others.
230         fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
231     }
232 }
233 
234 template<typename T, typename... Args>
emplace_at_memcpy(SkTArray<T> * array,int idx,Args &&...args)235 static void emplace_at_memcpy(SkTArray<T>* array, int idx, Args&&... args) {
236     if (int moveCount = array->count() - idx) {
237         array->push_back();
238         T* location = array->begin() + idx;
239         memcpy(location+1, location, moveCount * sizeof(T));
240         new (location) T(std::forward<Args>(args)...);
241     } else {
242         array->emplace_back(std::forward<Args>(args)...);
243     }
244 }
245 
recordCopyPathInstance(const GrCCPathCacheEntry & entry,const SkIVector & newAtlasOffset,GrCCPathProcessor::DoEvenOddFill evenOdd,sk_sp<GrTextureProxy> srcProxy)246 void GrCCPerFlushResources::recordCopyPathInstance(const GrCCPathCacheEntry& entry,
247                                                    const SkIVector& newAtlasOffset,
248                                                    GrCCPathProcessor::DoEvenOddFill evenOdd,
249                                                    sk_sp<GrTextureProxy> srcProxy) {
250     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
251 
252     // Write the instance at the back of the array.
253     int currentInstanceIdx = fNextCopyInstanceIdx++;
254     constexpr uint64_t kWhite = (((uint64_t) SK_Half1) <<  0) |
255                                 (((uint64_t) SK_Half1) << 16) |
256                                 (((uint64_t) SK_Half1) << 32) |
257                                 (((uint64_t) SK_Half1) << 48);
258     fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, evenOdd);
259 
260     // Percolate the instance forward until it's contiguous with other instances that share the same
261     // proxy.
262     for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
263         if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
264             ++fCopyPathRanges[i].fCount;
265             return;
266         }
267         int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
268         std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
269         currentInstanceIdx = rangeFirstInstanceIdx;
270     }
271 
272     // An instance with this particular proxy did not yet exist in the array. Add a range for it.
273     emplace_at_memcpy(&fCopyPathRanges, fCurrCopyAtlasRangesIdx, std::move(srcProxy), 1);
274 }
275 
transform_path_pts(const SkMatrix & m,const SkPath & path,const SkAutoSTArray<32,SkPoint> & outDevPts,SkRect * devBounds,SkRect * devBounds45)276 static bool transform_path_pts(const SkMatrix& m, const SkPath& path,
277                                const SkAutoSTArray<32, SkPoint>& outDevPts, SkRect* devBounds,
278                                SkRect* devBounds45) {
279     const SkPoint* pts = SkPathPriv::PointData(path);
280     int numPts = path.countPoints();
281     SkASSERT(numPts + 1 <= outDevPts.count());
282     SkASSERT(numPts);
283 
284     // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
285     // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
286     // transform is not necessary as long as the shader uses the correct inverse.
287     SkMatrix m45;
288     m45.setSinCos(1, 1);
289     m45.preConcat(m);
290 
291     // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
292     // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
293     //                                                          | 1  1 |
294     Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
295     Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
296     Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
297 
298     // Map the path's points to device space and accumulate bounding boxes.
299     Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
300     devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
301     Sk4f topLeft = devPt;
302     Sk4f bottomRight = devPt;
303 
304     // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
305     // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
306     // be at least one larger than the number of points.
307     devPt.store(&outDevPts[0]);
308 
309     for (int i = 1; i < numPts; ++i) {
310         devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
311         devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
312         topLeft = Sk4f::Min(topLeft, devPt);
313         bottomRight = Sk4f::Max(bottomRight, devPt);
314         devPt.store(&outDevPts[i]);
315     }
316 
317     if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
318         // The bounds are infinite or NaN.
319         return false;
320     }
321 
322     SkPoint topLeftPts[2], bottomRightPts[2];
323     topLeft.store(topLeftPts);
324     bottomRight.store(bottomRightPts);
325     devBounds->setLTRB(topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(),
326                        bottomRightPts[0].y());
327     devBounds45->setLTRB(topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(),
328                          bottomRightPts[1].y());
329     return true;
330 }
331 
renderShapeInAtlas(const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,float strokeDevWidth,SkRect * devBounds,SkRect * devBounds45,SkIRect * devIBounds,SkIVector * devToAtlasOffset)332 GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
333         const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
334         SkRect* devBounds, SkRect* devBounds45, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
335     SkASSERT(this->isMapped());
336     SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
337 
338     SkPath path;
339     shape.asPath(&path);
340     if (path.isEmpty()) {
341         SkDEBUGCODE(--fEndPathInstance);
342         return nullptr;
343     }
344     if (!transform_path_pts(m, path, fLocalDevPtsBuffer, devBounds, devBounds45)) {
345         // The transformed path had infinite or NaN bounds.
346         SkDEBUGCODE(--fEndPathInstance);
347         return nullptr;
348     }
349 
350     const SkStrokeRec& stroke = shape.style().strokeRec();
351     if (!stroke.isFillStyle()) {
352         float r = SkStrokeRec::GetInflationRadius(stroke.getJoin(), stroke.getMiter(),
353                                                   stroke.getCap(), strokeDevWidth);
354         devBounds->outset(r, r);
355         // devBounds45 is in (| 1 -1 | * devCoords) space.
356         //                    | 1  1 |
357         devBounds45->outset(r*SK_ScalarSqrt2, r*SK_ScalarSqrt2);
358     }
359     devBounds->roundOut(devIBounds);
360 
361     GrScissorTest scissorTest;
362     SkIRect clippedPathIBounds;
363     if (!this->placeRenderedPathInAtlas(clipIBounds, *devIBounds, &scissorTest, &clippedPathIBounds,
364                                         devToAtlasOffset)) {
365         SkDEBUGCODE(--fEndPathInstance);
366         return nullptr;  // Path was degenerate or clipped away.
367     }
368 
369     if (stroke.isFillStyle()) {
370         SkASSERT(0 == strokeDevWidth);
371         fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), scissorTest,
372                                      clippedPathIBounds, *devToAtlasOffset);
373     } else {
374         // Stroke-and-fill is not yet supported.
375         SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle());
376         SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth);
377         fStroker.parseDeviceSpaceStroke(path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth,
378                                         scissorTest, clippedPathIBounds, *devToAtlasOffset);
379     }
380     return &fRenderedAtlasStack.current();
381 }
382 
renderDeviceSpacePathInAtlas(const SkIRect & clipIBounds,const SkPath & devPath,const SkIRect & devPathIBounds,SkIVector * devToAtlasOffset)383 const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
384         const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
385         SkIVector* devToAtlasOffset) {
386     SkASSERT(this->isMapped());
387 
388     if (devPath.isEmpty()) {
389         return nullptr;
390     }
391 
392     GrScissorTest scissorTest;
393     SkIRect clippedPathIBounds;
394     if (!this->placeRenderedPathInAtlas(clipIBounds, devPathIBounds, &scissorTest,
395                                         &clippedPathIBounds, devToAtlasOffset)) {
396         return nullptr;
397     }
398 
399     fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), scissorTest,
400                                  clippedPathIBounds, *devToAtlasOffset);
401     return &fRenderedAtlasStack.current();
402 }
403 
placeRenderedPathInAtlas(const SkIRect & clipIBounds,const SkIRect & pathIBounds,GrScissorTest * scissorTest,SkIRect * clippedPathIBounds,SkIVector * devToAtlasOffset)404 bool GrCCPerFlushResources::placeRenderedPathInAtlas(const SkIRect& clipIBounds,
405                                                      const SkIRect& pathIBounds,
406                                                      GrScissorTest* scissorTest,
407                                                      SkIRect* clippedPathIBounds,
408                                                      SkIVector* devToAtlasOffset) {
409     if (clipIBounds.contains(pathIBounds)) {
410         *clippedPathIBounds = pathIBounds;
411         *scissorTest = GrScissorTest::kDisabled;
412     } else if (clippedPathIBounds->intersect(clipIBounds, pathIBounds)) {
413         *scissorTest = GrScissorTest::kEnabled;
414     } else {
415         return false;
416     }
417 
418     if (GrCCAtlas* retiredAtlas =
419                 fRenderedAtlasStack.addRect(*clippedPathIBounds, devToAtlasOffset)) {
420         // We did not fit in the previous coverage count atlas and it was retired. Close the path
421         // parser's current batch (which does not yet include the path we just parsed). We will
422         // render this batch into the retired atlas during finalize().
423         retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch());
424         retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch());
425     }
426     return true;
427 }
428 
finalize(GrOnFlushResourceProvider * onFlushRP,SkTArray<sk_sp<GrRenderTargetContext>> * out)429 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
430                                      SkTArray<sk_sp<GrRenderTargetContext>>* out) {
431     SkASSERT(this->isMapped());
432     SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
433     SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
434 
435     fInstanceBuffer->unmap();
436     fPathInstanceData = nullptr;
437 
438     if (!fCopyAtlasStack.empty()) {
439         fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
440         fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
441     }
442     if (!fRenderedAtlasStack.empty()) {
443         fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
444         fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch());
445     }
446 
447     // Build the GPU buffers to render path coverage counts. (This must not happen until after the
448     // final calls to fFiller/fStroker.closeCurrentBatch().)
449     if (!fFiller.prepareToDraw(onFlushRP)) {
450         return false;
451     }
452     if (!fStroker.prepareToDraw(onFlushRP)) {
453         return false;
454     }
455 
456     // Draw the copies from 16-bit literal coverage atlas(es) into 8-bit cached atlas(es).
457     int copyRangeIdx = 0;
458     int baseCopyInstance = 0;
459     for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
460         int endCopyRange = atlas->getFillBatchID();
461         SkASSERT(endCopyRange > copyRangeIdx);
462 
463         sk_sp<GrRenderTargetContext> rtc = atlas->makeRenderTargetContext(onFlushRP);
464         for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
465             const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
466             int endCopyInstance = baseCopyInstance + copyRange.fCount;
467             if (rtc) {
468                 auto op = CopyAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
469                                             copyRange.fSrcProxy, baseCopyInstance, endCopyInstance,
470                                             atlas->drawBounds());
471                 rtc->addDrawOp(GrNoClip(), std::move(op));
472             }
473             baseCopyInstance = endCopyInstance;
474         }
475         out->push_back(std::move(rtc));
476     }
477     SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
478     SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
479     SkASSERT(baseCopyInstance == fEndCopyInstance);
480 
481     // Render the coverage count atlas(es).
482     for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
483         // Copies will be finished by the time we get to rendering new atlases. See if we can
484         // recycle any previous invalidated atlas textures instead of creating new ones.
485         sk_sp<GrTexture> backingTexture;
486         for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
487             if (texture && atlas->currentHeight() == texture->height() &&
488                     atlas->currentWidth() == texture->width()) {
489                 backingTexture = skstd::exchange(texture, nullptr);
490                 break;
491             }
492         }
493 
494         if (auto rtc = atlas->makeRenderTargetContext(onFlushRP, std::move(backingTexture))) {
495             auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
496                                           atlas->getFillBatchID(), atlas->getStrokeBatchID(),
497                                           atlas->drawBounds());
498             rtc->addDrawOp(GrNoClip(), std::move(op));
499             out->push_back(std::move(rtc));
500         }
501     }
502 
503     return true;
504 }
505 
cancelCopies()506 void GrCCPerFlushResourceSpecs::cancelCopies() {
507     // Convert copies to cached draws.
508     fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
509     fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
510     fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
511     fCopyAtlasSpecs = GrCCAtlas::Specs();
512 }
513