• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
9 
10 #include "include/pathops/SkPathOps.h"
11 #include "src/core/SkMakeUnique.h"
12 #include "src/gpu/GrCaps.h"
13 #include "src/gpu/GrClip.h"
14 #include "src/gpu/GrProxyProvider.h"
15 #include "src/gpu/ccpr/GrCCClipProcessor.h"
16 #include "src/gpu/ccpr/GrCCDrawPathsOp.h"
17 #include "src/gpu/ccpr/GrCCPathCache.h"
18 
19 using PathInstance = GrCCPathProcessor::Instance;
20 
IsSupported(const GrCaps & caps,CoverageType * coverageType)21 bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType* coverageType) {
22     const GrShaderCaps& shaderCaps = *caps.shaderCaps();
23     GrBackendFormat defaultA8Format = caps.getDefaultBackendFormat(GrColorType::kAlpha_8,
24                                                                    GrRenderable::kYes);
25     if (caps.driverBlacklistCCPR() || !shaderCaps.integerSupport() ||
26         !caps.instanceAttribSupport() || !shaderCaps.floatIs32Bits() ||
27         GrCaps::kNone_MapFlags == caps.mapBufferFlags() ||
28         !defaultA8Format.isValid() || // This checks both texturable and renderable
29         !caps.halfFloatVertexAttributeSupport()) {
30         return false;
31     }
32 
33     GrBackendFormat defaultAHalfFormat = caps.getDefaultBackendFormat(GrColorType::kAlpha_F16,
34                                                                       GrRenderable::kYes);
35     if (caps.allowCoverageCounting() &&
36         defaultAHalfFormat.isValid()) { // This checks both texturable and renderable
37         if (coverageType) {
38             *coverageType = CoverageType::kFP16_CoverageCount;
39         }
40         return true;
41     }
42 
43     if (!caps.driverBlacklistMSAACCPR() &&
44         caps.internalMultisampleCount(defaultA8Format) > 1 &&
45         caps.sampleLocationsSupport() &&
46         shaderCaps.sampleVariablesStencilSupport()) {
47         if (coverageType) {
48             *coverageType = CoverageType::kA8_Multisample;
49         }
50         return true;
51     }
52 
53     return false;
54 }
55 
CreateIfSupported(const GrCaps & caps,AllowCaching allowCaching,uint32_t contextUniqueID)56 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
57         const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
58     CoverageType coverageType;
59     if (IsSupported(caps, &coverageType)) {
60         return sk_sp<GrCoverageCountingPathRenderer>(new GrCoverageCountingPathRenderer(
61                 coverageType, allowCaching, contextUniqueID));
62     }
63     return nullptr;
64 }
65 
GrCoverageCountingPathRenderer(CoverageType coverageType,AllowCaching allowCaching,uint32_t contextUniqueID)66 GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(
67         CoverageType coverageType, AllowCaching allowCaching, uint32_t contextUniqueID)
68         : fCoverageType(coverageType) {
69     if (AllowCaching::kYes == allowCaching) {
70         fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
71     }
72 }
73 
lookupPendingPaths(uint32_t opListID)74 GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
75     auto it = fPendingPaths.find(opListID);
76     if (fPendingPaths.end() == it) {
77         sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
78         it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
79     }
80     return it->second.get();
81 }
82 
onCanDrawPath(const CanDrawPathArgs & args) const83 GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
84         const CanDrawPathArgs& args) const {
85     const GrShape& shape = *args.fShape;
86     // We use "kCoverage", or analytic AA, no mater what the coverage type of our atlas: Even if the
87     // atlas is multisampled, that resolves into analytic coverage before we draw the path to the
88     // main canvas.
89     if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
90         args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
91         return CanDrawPath::kNo;
92     }
93 
94     SkPath path;
95     shape.asPath(&path);
96 
97     const SkStrokeRec& stroke = shape.style().strokeRec();
98     switch (stroke.getStyle()) {
99         case SkStrokeRec::kFill_Style: {
100             SkRect devBounds;
101             args.fViewMatrix->mapRect(&devBounds, path.getBounds());
102 
103             SkIRect clippedIBounds;
104             devBounds.roundOut(&clippedIBounds);
105             if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
106                 // The path is completely clipped away. Our code will eventually notice this before
107                 // doing any real work.
108                 return CanDrawPath::kYes;
109             }
110 
111             int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
112             if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
113                 // This is a complicated path that has more vertices than pixels! Let's let the SW
114                 // renderer have this one: It will probably be faster and a bitmap will require less
115                 // total memory on the GPU than CCPR instance buffers would for the raw path data.
116                 return CanDrawPath::kNo;
117             }
118 
119             if (numPixels > 256 * 256) {
120                 // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
121                 // rendering algorithm. Give the simpler direct renderers a chance before we commit
122                 // to drawing it.
123                 return CanDrawPath::kAsBackup;
124             }
125 
126             if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
127                 // Complex paths do better cached in an SDF, if the renderer will accept them.
128                 return CanDrawPath::kAsBackup;
129             }
130 
131             return CanDrawPath::kYes;
132         }
133 
134         case SkStrokeRec::kStroke_Style:
135             if (!args.fViewMatrix->isSimilarity()) {
136                 // The stroker currently only supports rigid-body transfoms for the stroke lines
137                 // themselves. This limitation doesn't affect hairlines since their stroke lines are
138                 // defined relative to device space.
139                 return CanDrawPath::kNo;
140             }
141             // fallthru
142         case SkStrokeRec::kHairline_Style: {
143             if (CoverageType::kFP16_CoverageCount != fCoverageType) {
144                 // Stroking is not yet supported in MSAA atlas mode.
145                 return CanDrawPath::kNo;
146             }
147             float inflationRadius;
148             GetStrokeDevWidth(*args.fViewMatrix, stroke, &inflationRadius);
149             if (!(inflationRadius <= kMaxBoundsInflationFromStroke)) {
150                 // Let extremely wide strokes be converted to fill paths and drawn by the CCPR
151                 // filler instead. (Cast the logic negatively in order to also catch r=NaN.)
152                 return CanDrawPath::kNo;
153             }
154             SkASSERT(!SkScalarIsNaN(inflationRadius));
155             if (SkPathPriv::ConicWeightCnt(path)) {
156                 // The stroker does not support conics yet.
157                 return CanDrawPath::kNo;
158             }
159             return CanDrawPath::kYes;
160         }
161 
162         case SkStrokeRec::kStrokeAndFill_Style:
163             return CanDrawPath::kNo;
164     }
165 
166     SK_ABORT("Invalid stroke style.");
167 }
168 
onDrawPath(const DrawPathArgs & args)169 bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
170     SkASSERT(!fFlushing);
171 
172     SkIRect clipIBounds;
173     GrRenderTargetContext* rtc = args.fRenderTargetContext;
174     args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
175 
176     auto op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
177                                     std::move(args.fPaint));
178     this->recordOp(std::move(op), args);
179     return true;
180 }
181 
recordOp(std::unique_ptr<GrCCDrawPathsOp> op,const DrawPathArgs & args)182 void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
183                                               const DrawPathArgs& args) {
184     if (op) {
185         auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
186             op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
187                     sk_ref_sp(this->lookupPendingPaths(opListID)));
188         };
189         args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
190     }
191 }
192 
makeClipProcessor(uint32_t opListID,const SkPath & deviceSpacePath,const SkIRect & accessRect,const GrCaps & caps)193 std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
194         uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
195         const GrCaps& caps) {
196     SkASSERT(!fFlushing);
197 
198     uint32_t key = deviceSpacePath.getGenerationID();
199     if (CoverageType::kA8_Multisample == fCoverageType) {
200         // We only need to consider fill rule in MSAA mode. In coverage count mode Even/Odd and
201         // Nonzero both reference the same coverage count mask.
202         key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
203     }
204     GrCCClipPath& clipPath =
205             this->lookupPendingPaths(opListID)->fClipPaths[key];
206     if (!clipPath.isInitialized()) {
207         // This ClipPath was just created during lookup. Initialize it.
208         const SkRect& pathDevBounds = deviceSpacePath.getBounds();
209         if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
210             // The path is too large. Crop it or analytic AA can run out of fp32 precision.
211             SkPath croppedPath;
212             int maxRTSize = caps.maxRenderTargetSize();
213             CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
214             clipPath.init(croppedPath, accessRect, fCoverageType, caps);
215         } else {
216             clipPath.init(deviceSpacePath, accessRect, fCoverageType, caps);
217         }
218     } else {
219         clipPath.addAccess(accessRect);
220     }
221 
222     auto isCoverageCount = GrCCClipProcessor::IsCoverageCount(
223             CoverageType::kFP16_CoverageCount == fCoverageType);
224     auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
225             !clipPath.pathDevIBounds().contains(accessRect));
226     return skstd::make_unique<GrCCClipProcessor>(&clipPath, isCoverageCount, mustCheckBounds);
227 }
228 
preFlush(GrOnFlushResourceProvider * onFlushRP,const uint32_t * opListIDs,int numOpListIDs,SkTArray<sk_sp<GrRenderTargetContext>> * out)229 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
230                                               const uint32_t* opListIDs, int numOpListIDs,
231                                               SkTArray<sk_sp<GrRenderTargetContext>>* out) {
232     using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
233     SkASSERT(!fFlushing);
234     SkASSERT(fFlushingPaths.empty());
235     SkDEBUGCODE(fFlushing = true);
236 
237     if (fPathCache) {
238         fPathCache->doPreFlushProcessing();
239     }
240 
241     if (fPendingPaths.empty()) {
242         return;  // Nothing to draw.
243     }
244 
245     GrCCPerFlushResourceSpecs specs;
246     int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
247     specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
248     SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
249     specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
250     specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
251 
252     // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
253     // and count them up so we can preallocate buffers.
254     fFlushingPaths.reserve(numOpListIDs);
255     for (int i = 0; i < numOpListIDs; ++i) {
256         auto iter = fPendingPaths.find(opListIDs[i]);
257         if (fPendingPaths.end() == iter) {
258             continue;  // No paths on this opList.
259         }
260 
261         fFlushingPaths.push_back(std::move(iter->second));
262         fPendingPaths.erase(iter);
263 
264         for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
265             op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
266         }
267         for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
268             clipsIter.second.accountForOwnPath(&specs);
269         }
270     }
271 
272     if (specs.isEmpty()) {
273         return;  // Nothing to draw.
274     }
275 
276     // Determine if there are enough reusable paths from last flush for it to be worth our time to
277     // copy them to cached atlas(es).
278     int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
279                     specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
280     auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
281                                          specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
282     if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
283         specs.cancelCopies();
284     }
285 
286     auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, fCoverageType, specs);
287     if (!resources->isMapped()) {
288         return;  // Some allocation failed.
289     }
290 
291     // Layout the atlas(es) and parse paths.
292     for (const auto& flushingPaths : fFlushingPaths) {
293         for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
294             op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
295         }
296         for (auto& clipsIter : flushingPaths->fClipPaths) {
297             clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
298         }
299     }
300 
301     if (fPathCache) {
302         // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
303         // the underlying textures objects can be freed up and reused for the next atlases.
304         fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
305     }
306 
307     // Allocate resources and then render the atlas(es).
308     if (!resources->finalize(onFlushRP, out)) {
309         return;
310     }
311 
312     // Commit flushing paths to the resources once they are successfully completed.
313     for (auto& flushingPaths : fFlushingPaths) {
314         SkASSERT(!flushingPaths->fFlushResources);
315         flushingPaths->fFlushResources = resources;
316     }
317 }
318 
postFlush(GrDeferredUploadToken,const uint32_t * opListIDs,int numOpListIDs)319 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
320                                                int numOpListIDs) {
321     SkASSERT(fFlushing);
322 
323     if (!fFlushingPaths.empty()) {
324         // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
325         // resources manually.
326         for (auto& flushingPaths : fFlushingPaths) {
327             flushingPaths->fFlushResources = nullptr;
328         }
329 
330         // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
331         fFlushingPaths.reset();
332     }
333 
334     SkDEBUGCODE(fFlushing = false);
335 }
336 
purgeCacheEntriesOlderThan(GrProxyProvider * proxyProvider,const GrStdSteadyClock::time_point & purgeTime)337 void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
338         GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
339     if (fPathCache) {
340         fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
341     }
342 }
343 
CropPath(const SkPath & path,const SkIRect & cropbox,SkPath * out)344 void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
345                                               SkPath* out) {
346     SkPath cropboxPath;
347     cropboxPath.addRect(SkRect::Make(cropbox));
348     if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
349         // This can fail if the PathOps encounter NaN or infinities.
350         out->reset();
351     }
352     out->setIsVolatile(true);
353 }
354 
GetStrokeDevWidth(const SkMatrix & m,const SkStrokeRec & stroke,float * inflationRadius)355 float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
356                                                         const SkStrokeRec& stroke,
357                                                         float* inflationRadius) {
358     float strokeDevWidth;
359     if (stroke.isHairlineStyle()) {
360         strokeDevWidth = 1;
361     } else {
362         SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
363         SkASSERT(m.isSimilarity());  // Otherwise matrixScaleFactor = m.getMaxScale().
364         float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
365         strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
366     }
367     if (inflationRadius) {
368         // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
369         // wide, we may inflate it to 1px and instead reduce the opacity.
370         *inflationRadius = SkStrokeRec::GetInflationRadius(
371                 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), SkTMax(strokeDevWidth, 1.f));
372     }
373     return strokeDevWidth;
374 }
375