• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrCoverageCountingPathRenderer.h"
9 
10 #include "GrCaps.h"
11 #include "GrClip.h"
12 #include "GrProxyProvider.h"
13 #include "SkMakeUnique.h"
14 #include "SkPathOps.h"
15 #include "ccpr/GrCCClipProcessor.h"
16 #include "ccpr/GrCCDrawPathsOp.h"
17 #include "ccpr/GrCCPathCache.h"
18 
19 using PathInstance = GrCCPathProcessor::Instance;
20 
IsSupported(const GrCaps & caps)21 bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
22     const GrShaderCaps& shaderCaps = *caps.shaderCaps();
23     return caps.instanceAttribSupport() && shaderCaps.integerSupport() &&
24            shaderCaps.floatIs32Bits() && GrCaps::kNone_MapFlags != caps.mapBufferFlags() &&
25            caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
26            caps.isConfigRenderable(kAlpha_half_GrPixelConfig) &&
27            caps.isConfigTexturable(kAlpha_8_GrPixelConfig) &&
28            caps.isConfigRenderable(kAlpha_8_GrPixelConfig) &&
29            caps.halfFloatVertexAttributeSupport() &&
30            !caps.blacklistCoverageCounting();
31 }
32 
CreateIfSupported(const GrCaps & caps,AllowCaching allowCaching,uint32_t contextUniqueID)33 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
34         const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
35     return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
36             ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
37             : nullptr);
38 }
39 
GrCoverageCountingPathRenderer(AllowCaching allowCaching,uint32_t contextUniqueID)40 GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
41                                                                uint32_t contextUniqueID) {
42     if (AllowCaching::kYes == allowCaching) {
43         fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
44     }
45 }
46 
lookupPendingPaths(uint32_t opListID)47 GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
48     auto it = fPendingPaths.find(opListID);
49     if (fPendingPaths.end() == it) {
50         sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
51         it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
52     }
53     return it->second.get();
54 }
55 
onCanDrawPath(const CanDrawPathArgs & args) const56 GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
57         const CanDrawPathArgs& args) const {
58     const GrShape& shape = *args.fShape;
59     if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
60         args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
61         return CanDrawPath::kNo;
62     }
63 
64     SkPath path;
65     shape.asPath(&path);
66 
67     const SkStrokeRec& stroke = shape.style().strokeRec();
68     switch (stroke.getStyle()) {
69         case SkStrokeRec::kFill_Style: {
70             SkRect devBounds;
71             args.fViewMatrix->mapRect(&devBounds, path.getBounds());
72 
73             SkIRect clippedIBounds;
74             devBounds.roundOut(&clippedIBounds);
75             if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
76                 // The path is completely clipped away. Our code will eventually notice this before
77                 // doing any real work.
78                 return CanDrawPath::kYes;
79             }
80 
81             int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
82             if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
83                 // This is a complicated path that has more vertices than pixels! Let's let the SW
84                 // renderer have this one: It will probably be faster and a bitmap will require less
85                 // total memory on the GPU than CCPR instance buffers would for the raw path data.
86                 return CanDrawPath::kNo;
87             }
88 
89             if (numPixels > 256 * 256) {
90                 // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
91                 // rendering algorithm. Give the simpler direct renderers a chance before we commit
92                 // to drawing it.
93                 return CanDrawPath::kAsBackup;
94             }
95 
96             if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
97                 // Complex paths do better cached in an SDF, if the renderer will accept them.
98                 return CanDrawPath::kAsBackup;
99             }
100 
101             return CanDrawPath::kYes;
102         }
103 
104         case SkStrokeRec::kStroke_Style:
105             if (!args.fViewMatrix->isSimilarity()) {
106                 // The stroker currently only supports rigid-body transfoms for the stroke lines
107                 // themselves. This limitation doesn't affect hairlines since their stroke lines are
108                 // defined relative to device space.
109                 return CanDrawPath::kNo;
110             }
111             // fallthru
112         case SkStrokeRec::kHairline_Style: {
113             float inflationRadius;
114             GetStrokeDevWidth(*args.fViewMatrix, stroke, &inflationRadius);
115             if (!(inflationRadius <= kMaxBoundsInflationFromStroke)) {
116                 // Let extremely wide strokes be converted to fill paths and drawn by the CCPR
117                 // filler instead. (Cast the logic negatively in order to also catch r=NaN.)
118                 return CanDrawPath::kNo;
119             }
120             SkASSERT(!SkScalarIsNaN(inflationRadius));
121             if (SkPathPriv::ConicWeightCnt(path)) {
122                 // The stroker does not support conics yet.
123                 return CanDrawPath::kNo;
124             }
125             return CanDrawPath::kYes;
126         }
127 
128         case SkStrokeRec::kStrokeAndFill_Style:
129             return CanDrawPath::kNo;
130     }
131 
132     SK_ABORT("Invalid stroke style.");
133     return CanDrawPath::kNo;
134 }
135 
onDrawPath(const DrawPathArgs & args)136 bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
137     SkASSERT(!fFlushing);
138 
139     SkIRect clipIBounds;
140     GrRenderTargetContext* rtc = args.fRenderTargetContext;
141     args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
142 
143     auto op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
144                                     std::move(args.fPaint));
145     this->recordOp(std::move(op), args);
146     return true;
147 }
148 
recordOp(std::unique_ptr<GrCCDrawPathsOp> op,const DrawPathArgs & args)149 void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
150                                               const DrawPathArgs& args) {
151     if (op) {
152         auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
153             op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
154                     sk_ref_sp(this->lookupPendingPaths(opListID)));
155         };
156         args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
157     }
158 }
159 
makeClipProcessor(uint32_t opListID,const SkPath & deviceSpacePath,const SkIRect & accessRect,int rtWidth,int rtHeight,const GrCaps & caps)160 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
161         uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth,
162         int rtHeight, const GrCaps& caps) {
163     using MustCheckBounds = GrCCClipProcessor::MustCheckBounds;
164 
165     SkASSERT(!fFlushing);
166 
167     GrCCClipPath& clipPath =
168             this->lookupPendingPaths(opListID)->fClipPaths[deviceSpacePath.getGenerationID()];
169     if (!clipPath.isInitialized()) {
170         // This ClipPath was just created during lookup. Initialize it.
171         const SkRect& pathDevBounds = deviceSpacePath.getBounds();
172         if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
173             // The path is too large. Crop it or analytic AA can run out of fp32 precision.
174             SkPath croppedPath;
175             int maxRTSize = caps.maxRenderTargetSize();
176             CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
177             clipPath.init(croppedPath, accessRect, rtWidth, rtHeight, caps);
178         } else {
179             clipPath.init(deviceSpacePath, accessRect, rtWidth, rtHeight, caps);
180         }
181     } else {
182         clipPath.addAccess(accessRect);
183     }
184 
185     bool mustCheckBounds = !clipPath.pathDevIBounds().contains(accessRect);
186     return skstd::make_unique<GrCCClipProcessor>(&clipPath, MustCheckBounds(mustCheckBounds),
187                                                  deviceSpacePath.getFillType());
188 }
189 
preFlush(GrOnFlushResourceProvider * onFlushRP,const uint32_t * opListIDs,int numOpListIDs,SkTArray<sk_sp<GrRenderTargetContext>> * out)190 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
191                                               const uint32_t* opListIDs, int numOpListIDs,
192                                               SkTArray<sk_sp<GrRenderTargetContext>>* out) {
193     using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
194     SkASSERT(!fFlushing);
195     SkASSERT(fFlushingPaths.empty());
196     SkDEBUGCODE(fFlushing = true);
197 
198     if (fPathCache) {
199         fPathCache->doPreFlushProcessing();
200     }
201 
202     if (fPendingPaths.empty()) {
203         return;  // Nothing to draw.
204     }
205 
206     GrCCPerFlushResourceSpecs specs;
207     int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
208     specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
209     SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
210     specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
211     specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
212 
213     // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
214     // and count them up so we can preallocate buffers.
215     fFlushingPaths.reserve(numOpListIDs);
216     for (int i = 0; i < numOpListIDs; ++i) {
217         auto iter = fPendingPaths.find(opListIDs[i]);
218         if (fPendingPaths.end() == iter) {
219             continue;  // No paths on this opList.
220         }
221 
222         fFlushingPaths.push_back(std::move(iter->second));
223         fPendingPaths.erase(iter);
224 
225         for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
226             op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
227         }
228         for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
229             clipsIter.second.accountForOwnPath(&specs);
230         }
231     }
232 
233     if (specs.isEmpty()) {
234         return;  // Nothing to draw.
235     }
236 
237     // Determine if there are enough reusable paths from last flush for it to be worth our time to
238     // copy them to cached atlas(es).
239     int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
240                     specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
241     auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
242                                          specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
243     if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
244         specs.cancelCopies();
245     }
246 
247     auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, specs);
248     if (!resources->isMapped()) {
249         return;  // Some allocation failed.
250     }
251 
252     // Layout the atlas(es) and parse paths.
253     for (const auto& flushingPaths : fFlushingPaths) {
254         for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
255             op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
256         }
257         for (auto& clipsIter : flushingPaths->fClipPaths) {
258             clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
259         }
260     }
261 
262     if (fPathCache) {
263         // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
264         // the underlying textures objects can be freed up and reused for the next atlases.
265         fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
266     }
267 
268     // Allocate resources and then render the atlas(es).
269     if (!resources->finalize(onFlushRP, out)) {
270         return;
271     }
272 
273     // Commit flushing paths to the resources once they are successfully completed.
274     for (auto& flushingPaths : fFlushingPaths) {
275         SkASSERT(!flushingPaths->fFlushResources);
276         flushingPaths->fFlushResources = resources;
277     }
278 }
279 
postFlush(GrDeferredUploadToken,const uint32_t * opListIDs,int numOpListIDs)280 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
281                                                int numOpListIDs) {
282     SkASSERT(fFlushing);
283 
284     if (!fFlushingPaths.empty()) {
285         // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
286         // resources manually.
287         for (auto& flushingPaths : fFlushingPaths) {
288             flushingPaths->fFlushResources = nullptr;
289         }
290 
291         // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
292         fFlushingPaths.reset();
293     }
294 
295     SkDEBUGCODE(fFlushing = false);
296 }
297 
purgeCacheEntriesOlderThan(GrProxyProvider * proxyProvider,const GrStdSteadyClock::time_point & purgeTime)298 void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
299         GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
300     if (fPathCache) {
301         fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
302     }
303 }
304 
CropPath(const SkPath & path,const SkIRect & cropbox,SkPath * out)305 void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
306                                               SkPath* out) {
307     SkPath cropboxPath;
308     cropboxPath.addRect(SkRect::Make(cropbox));
309     if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
310         // This can fail if the PathOps encounter NaN or infinities.
311         out->reset();
312     }
313     out->setIsVolatile(true);
314 }
315 
GetStrokeDevWidth(const SkMatrix & m,const SkStrokeRec & stroke,float * inflationRadius)316 float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
317                                                         const SkStrokeRec& stroke,
318                                                         float* inflationRadius) {
319     float strokeDevWidth;
320     if (stroke.isHairlineStyle()) {
321         strokeDevWidth = 1;
322     } else {
323         SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
324         SkASSERT(m.isSimilarity());  // Otherwise matrixScaleFactor = m.getMaxScale().
325         float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
326         strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
327     }
328     if (inflationRadius) {
329         // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
330         // wide, we may inflate it to 1px and instead reduce the opacity.
331         *inflationRadius = SkStrokeRec::GetInflationRadius(
332                 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), SkTMax(strokeDevWidth, 1.f));
333     }
334     return strokeDevWidth;
335 }
336