1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ccpr/GrCCPerFlushResources.h"
9
10 #include "include/private/GrRecordingContext.h"
11 #include "src/gpu/GrClip.h"
12 #include "src/gpu/GrMemoryPool.h"
13 #include "src/gpu/GrOnFlushResourceProvider.h"
14 #include "src/gpu/GrRecordingContextPriv.h"
15 #include "src/gpu/GrRenderTargetContext.h"
16 #include "src/gpu/GrSurfaceContextPriv.h"
17 #include "src/gpu/ccpr/GrCCPathCache.h"
18 #include "src/gpu/ccpr/GrGSCoverageProcessor.h"
19 #include "src/gpu/ccpr/GrSampleMaskProcessor.h"
20 #include "src/gpu/ccpr/GrVSCoverageProcessor.h"
21 #include "src/gpu/geometry/GrShape.h"
22 #include <algorithm>
23
24 using CoverageType = GrCCAtlas::CoverageType;
25 using FillBatchID = GrCCFiller::BatchID;
26 using StrokeBatchID = GrCCStroker::BatchID;
27 using PathInstance = GrCCPathProcessor::Instance;
28
29 static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx;
30 static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx;
31
32 namespace {
33
34 // Base class for an Op that renders a CCPR atlas.
35 class AtlasOp : public GrDrawOp {
36 public:
fixedFunctionFlags() const37 FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
finalize(const GrCaps &,const GrAppliedClip *,bool hasMixedSampledCoverage,GrClampType)38 GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
39 bool hasMixedSampledCoverage, GrClampType) override {
40 return GrProcessorSet::EmptySetAnalysis();
41 }
onCombineIfPossible(GrOp * other,GrRecordingContext::Arenas *,const GrCaps &)42 CombineResult onCombineIfPossible(GrOp* other, GrRecordingContext::Arenas*,
43 const GrCaps&) override {
44 // We will only make multiple copy ops if they have different source proxies.
45 // TODO: make use of texture chaining.
46 return CombineResult::kCannotCombine;
47 }
onPrepare(GrOpFlushState *)48 void onPrepare(GrOpFlushState*) override {}
49
50 protected:
AtlasOp(uint32_t classID,sk_sp<const GrCCPerFlushResources> resources,const SkISize & drawBounds)51 AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
52 const SkISize& drawBounds)
53 : GrDrawOp(classID)
54 , fResources(std::move(resources)) {
55 this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
56 GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo);
57 }
58
59 const sk_sp<const GrCCPerFlushResources> fResources;
60 };
61
62 // Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas.
63 class CopyAtlasOp : public AtlasOp {
64 public:
65 DEFINE_OP_CLASS_ID
66
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> copyProxy,int baseInstance,int endInstance,const SkISize & drawBounds)67 static std::unique_ptr<GrDrawOp> Make(
68 GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
69 sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance,
70 const SkISize& drawBounds) {
71 GrOpMemoryPool* pool = context->priv().opMemoryPool();
72
73 return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy), baseInstance,
74 endInstance, drawBounds);
75 }
76
name() const77 const char* name() const override { return "CopyAtlasOp (CCPR)"; }
78
visitProxies(const VisitProxyFunc & fn) const79 void visitProxies(const VisitProxyFunc& fn) const override {
80 fn(fSrcProxy.get(), GrMipMapped::kNo);
81 }
82
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)83 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
84 SkASSERT(fSrcProxy);
85 GrSurfaceProxy* srcProxy = fSrcProxy.get();
86 SkASSERT(srcProxy->isInstantiated());
87
88 auto coverageMode = GrCCAtlas::CoverageTypeToPathCoverageMode(
89 fResources->renderedPathCoverageType());
90 GrColorType ct = GrCCAtlas::CoverageTypeToColorType(fResources->renderedPathCoverageType());
91 GrSwizzle swizzle = flushState->caps().getReadSwizzle(srcProxy->backendFormat(), ct);
92 GrCCPathProcessor pathProc(coverageMode, srcProxy->peekTexture(), swizzle,
93 GrCCAtlas::kTextureOrigin);
94
95 GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc,
96 flushState->drawOpArgs().outputSwizzle());
97 GrPipeline::FixedDynamicState dynamicState;
98 dynamicState.fPrimitiveProcessorTextures = &srcProxy;
99
100 pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
101 fEndInstance, this->bounds());
102 }
103
104 private:
105 friend class ::GrOpMemoryPool; // for ctor
106
CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources,sk_sp<GrTextureProxy> srcProxy,int baseInstance,int endInstance,const SkISize & drawBounds)107 CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
108 int baseInstance, int endInstance, const SkISize& drawBounds)
109 : AtlasOp(ClassID(), std::move(resources), drawBounds)
110 , fSrcProxy(srcProxy)
111 , fBaseInstance(baseInstance)
112 , fEndInstance(endInstance) {
113 }
114 sk_sp<GrTextureProxy> fSrcProxy;
115 const int fBaseInstance;
116 const int fEndInstance;
117 };
118
119 // Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
120 template<typename ProcessorType> class RenderAtlasOp : public AtlasOp {
121 public:
122 DEFINE_OP_CLASS_ID
123
Make(GrRecordingContext * context,sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)124 static std::unique_ptr<GrDrawOp> Make(
125 GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
126 FillBatchID fillBatchID, StrokeBatchID strokeBatchID, const SkISize& drawBounds) {
127 GrOpMemoryPool* pool = context->priv().opMemoryPool();
128
129 return pool->allocate<RenderAtlasOp>(
130 std::move(resources), fillBatchID, strokeBatchID, drawBounds);
131 }
132
133 // GrDrawOp interface.
name() const134 const char* name() const override { return "RenderAtlasOp (CCPR)"; }
135
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)136 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
137 ProcessorType proc;
138 GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus,
139 flushState->drawOpArgs().outputSwizzle());
140 fResources->filler().drawFills(flushState, &proc, pipeline, fFillBatchID, fDrawBounds);
141 fResources->stroker().drawStrokes(flushState, &proc, fStrokeBatchID, fDrawBounds);
142 }
143
144 private:
145 friend class ::GrOpMemoryPool; // for ctor
146
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources,FillBatchID fillBatchID,StrokeBatchID strokeBatchID,const SkISize & drawBounds)147 RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID,
148 StrokeBatchID strokeBatchID, const SkISize& drawBounds)
149 : AtlasOp(ClassID(), std::move(resources), drawBounds)
150 , fFillBatchID(fillBatchID)
151 , fStrokeBatchID(strokeBatchID)
152 , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
153 }
154
155 const FillBatchID fFillBatchID;
156 const StrokeBatchID fStrokeBatchID;
157 const SkIRect fDrawBounds;
158 };
159
160 }
161
inst_buffer_count(const GrCCPerFlushResourceSpecs & specs)162 static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
163 return specs.fNumCachedPaths +
164 // Copies get two instances per draw: 1 copy + 1 draw.
165 (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 +
166 specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx];
167 // No clips in instance buffers.
168 }
169
GrCCPerFlushResources(GrOnFlushResourceProvider * onFlushRP,CoverageType coverageType,const GrCCPerFlushResourceSpecs & specs)170 GrCCPerFlushResources::GrCCPerFlushResources(
171 GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType,
172 const GrCCPerFlushResourceSpecs& specs)
173 // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
174 // (See transform_path_pts below.)
175 // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
176 : fLocalDevPtsBuffer(std::max(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath,
177 specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1)
178 , fFiller((CoverageType::kFP16_CoverageCount == coverageType)
179 ? GrCCFiller::Algorithm::kCoverageCount
180 : GrCCFiller::Algorithm::kStencilWindingCount,
181 specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths,
182 specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints,
183 specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs,
184 specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights)
185 , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
186 specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
187 specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
188 , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
189 onFlushRP->caps())
190 , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
191 , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
192 , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
193 , fInstanceBuffer(onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
194 inst_buffer_count(specs) * sizeof(PathInstance)))
195 , fNextCopyInstanceIdx(0)
196 , fNextPathInstanceIdx(
197 specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) {
198 if (!fIndexBuffer) {
199 SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
200 return;
201 }
202 if (!fVertexBuffer) {
203 SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
204 return;
205 }
206 if (!fInstanceBuffer) {
207 SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
208 return;
209 }
210 fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
211 SkASSERT(fPathInstanceData);
212
213 if (CoverageType::kA8_Multisample == coverageType) {
214 int numRenderedPaths =
215 specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] +
216 specs.fNumClipPaths;
217 fStencilResolveBuffer = onFlushRP->makeBuffer(
218 GrGpuBufferType::kVertex,
219 numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
220 fStencilResolveInstanceData = static_cast<GrStencilAtlasOp::ResolveRectInstance*>(
221 fStencilResolveBuffer->map());
222 SkASSERT(fStencilResolveInstanceData);
223 SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
224 }
225
226 SkDEBUGCODE(fEndCopyInstance =
227 specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]);
228 SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
229 }
230
upgradeEntryToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCPathCacheEntry * entry,GrFillRule fillRule)231 void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
232 GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
233 GrFillRule fillRule) {
234 using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
235 SkASSERT(this->isMapped());
236 SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
237
238 const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
239 SkASSERT(cachedAtlas);
240 SkASSERT(cachedAtlas->getOnFlushProxy());
241
242 if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
243 // This entry has already been upgraded to literal coverage. The path must have been drawn
244 // multiple times during the flush.
245 SkDEBUGCODE(--fEndCopyInstance);
246 return;
247 }
248
249 SkIVector newAtlasOffset;
250 if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
251 // We did not fit in the previous copy atlas and it was retired. We will render the ranges
252 // up until fCopyPathRanges.count() into the retired atlas during finalize().
253 retiredAtlas->setFillBatchID(fCopyPathRanges.count());
254 fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
255 }
256
257 this->recordCopyPathInstance(
258 *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy()));
259
260 sk_sp<GrTexture> previousAtlasTexture =
261 sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
262 GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
263 if (ReleaseAtlasResult::kDidInvalidateFromCache ==
264 entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
265 // This texture just got booted out of the cache. Keep it around, in case we might be able
266 // to recycle it for a new atlas. We can recycle it because copying happens before rendering
267 // new paths, and every path from the atlas that we're planning to use this flush will be
268 // copied to a new atlas. We'll never copy some and leave others.
269 fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
270 }
271 }
272
recordCopyPathInstance(const GrCCPathCacheEntry & entry,const SkIVector & newAtlasOffset,GrFillRule fillRule,sk_sp<GrTextureProxy> srcProxy)273 void GrCCPerFlushResources::recordCopyPathInstance(
274 const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule,
275 sk_sp<GrTextureProxy> srcProxy) {
276 SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
277
278 // Write the instance at the back of the array.
279 int currentInstanceIdx = fNextCopyInstanceIdx++;
280 constexpr uint64_t kWhite = (((uint64_t) SK_Half1) << 0) |
281 (((uint64_t) SK_Half1) << 16) |
282 (((uint64_t) SK_Half1) << 32) |
283 (((uint64_t) SK_Half1) << 48);
284 fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
285
286 // Percolate the instance forward until it's contiguous with other instances that share the same
287 // proxy.
288 for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
289 if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
290 ++fCopyPathRanges[i].fCount;
291 return;
292 }
293 int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
294 std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
295 currentInstanceIdx = rangeFirstInstanceIdx;
296 }
297
298 // An instance with this particular proxy did not yet exist in the array. Add a range for it,
299 // first moving any later ranges back to make space for it at fCurrCopyAtlasRangesIdx.
300 fCopyPathRanges.push_back();
301 std::move_backward(fCopyPathRanges.begin() + fCurrCopyAtlasRangesIdx,
302 fCopyPathRanges.end() - 1,
303 fCopyPathRanges.end());
304 fCopyPathRanges[fCurrCopyAtlasRangesIdx] = {std::move(srcProxy), 1};
305 }
306
transform_path_pts(const SkMatrix & m,const SkPath & path,const SkAutoSTArray<32,SkPoint> & outDevPts,GrOctoBounds * octoBounds)307 static bool transform_path_pts(
308 const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts,
309 GrOctoBounds* octoBounds) {
310 const SkPoint* pts = SkPathPriv::PointData(path);
311 int numPts = path.countPoints();
312 SkASSERT(numPts + 1 <= outDevPts.count());
313 SkASSERT(numPts);
314
315 // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
316 // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
317 // transform is not necessary as long as the shader uses the correct inverse.
318 SkMatrix m45;
319 m45.setSinCos(1, 1);
320 m45.preConcat(m);
321
322 // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
323 // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
324 // | 1 1 |
325 Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
326 Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
327 Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
328
329 // Map the path's points to device space and accumulate bounding boxes.
330 Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
331 devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
332 Sk4f topLeft = devPt;
333 Sk4f bottomRight = devPt;
334
335 // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
336 // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
337 // be at least one larger than the number of points.
338 devPt.store(&outDevPts[0]);
339
340 for (int i = 1; i < numPts; ++i) {
341 devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
342 devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
343 topLeft = Sk4f::Min(topLeft, devPt);
344 bottomRight = Sk4f::Max(bottomRight, devPt);
345 devPt.store(&outDevPts[i]);
346 }
347
348 if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
349 // The bounds are infinite or NaN.
350 return false;
351 }
352
353 SkPoint topLeftPts[2], bottomRightPts[2];
354 topLeft.store(topLeftPts);
355 bottomRight.store(bottomRightPts);
356
357 const SkRect& devBounds = SkRect::MakeLTRB(
358 topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y());
359 const SkRect& devBounds45 = SkRect::MakeLTRB(
360 topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y());
361
362 octoBounds->set(devBounds, devBounds45);
363 return true;
364 }
365
renderShapeInAtlas(const SkIRect & clipIBounds,const SkMatrix & m,const GrShape & shape,float strokeDevWidth,GrOctoBounds * octoBounds,SkIRect * devIBounds,SkIVector * devToAtlasOffset)366 GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
367 const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
368 GrOctoBounds* octoBounds, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
369 SkASSERT(this->isMapped());
370 SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
371
372 SkPath path;
373 shape.asPath(&path);
374 if (path.isEmpty()) {
375 SkDEBUGCODE(--fEndPathInstance);
376 SkDEBUGCODE(--fEndStencilResolveInstance);
377 return nullptr;
378 }
379 if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) {
380 // The transformed path had infinite or NaN bounds.
381 SkDEBUGCODE(--fEndPathInstance);
382 SkDEBUGCODE(--fEndStencilResolveInstance);
383 return nullptr;
384 }
385
386 const SkStrokeRec& stroke = shape.style().strokeRec();
387 if (!stroke.isFillStyle()) {
388 float r = SkStrokeRec::GetInflationRadius(
389 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth);
390 octoBounds->outset(r);
391 }
392
393 GrScissorTest enableScissorInAtlas;
394 if (clipIBounds.contains(octoBounds->bounds())) {
395 enableScissorInAtlas = GrScissorTest::kDisabled;
396 } else if (octoBounds->clip(clipIBounds)) {
397 enableScissorInAtlas = GrScissorTest::kEnabled;
398 } else {
399 // The clip and octo bounds do not intersect. Draw nothing.
400 SkDEBUGCODE(--fEndPathInstance);
401 SkDEBUGCODE(--fEndStencilResolveInstance);
402 return nullptr;
403 }
404 octoBounds->roundOut(devIBounds);
405 SkASSERT(clipIBounds.contains(*devIBounds));
406
407 this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset);
408
409 GrFillRule fillRule;
410 if (stroke.isFillStyle()) {
411 SkASSERT(0 == strokeDevWidth);
412 fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas,
413 *devIBounds, *devToAtlasOffset);
414 fillRule = GrFillRuleForSkPath(path);
415 } else {
416 // Stroke-and-fill is not yet supported.
417 SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle());
418 SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth);
419 fStroker.parseDeviceSpaceStroke(
420 path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth, enableScissorInAtlas,
421 *devIBounds, *devToAtlasOffset);
422 fillRule = GrFillRule::kNonzero;
423 }
424
425 if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
426 this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule);
427 }
428
429 return &fRenderedAtlasStack.current();
430 }
431
renderDeviceSpacePathInAtlas(const SkIRect & clipIBounds,const SkPath & devPath,const SkIRect & devPathIBounds,GrFillRule fillRule,SkIVector * devToAtlasOffset)432 const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
433 const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
434 GrFillRule fillRule, SkIVector* devToAtlasOffset) {
435 SkASSERT(this->isMapped());
436
437 if (devPath.isEmpty()) {
438 SkDEBUGCODE(--fEndStencilResolveInstance);
439 return nullptr;
440 }
441
442 GrScissorTest enableScissorInAtlas;
443 SkIRect clippedPathIBounds;
444 if (clipIBounds.contains(devPathIBounds)) {
445 clippedPathIBounds = devPathIBounds;
446 enableScissorInAtlas = GrScissorTest::kDisabled;
447 } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) {
448 enableScissorInAtlas = GrScissorTest::kEnabled;
449 } else {
450 // The clip and path bounds do not intersect. Draw nothing.
451 SkDEBUGCODE(--fEndStencilResolveInstance);
452 return nullptr;
453 }
454
455 this->placeRenderedPathInAtlas(clippedPathIBounds, enableScissorInAtlas, devToAtlasOffset);
456 fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), enableScissorInAtlas,
457 clippedPathIBounds, *devToAtlasOffset);
458
459 // In MSAA mode we also record an internal draw instance that will be used to resolve stencil
460 // winding values to coverage when the atlas is generated.
461 if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
462 this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
463 }
464
465 return &fRenderedAtlasStack.current();
466 }
467
placeRenderedPathInAtlas(const SkIRect & clippedPathIBounds,GrScissorTest scissorTest,SkIVector * devToAtlasOffset)468 void GrCCPerFlushResources::placeRenderedPathInAtlas(
469 const SkIRect& clippedPathIBounds, GrScissorTest scissorTest, SkIVector* devToAtlasOffset) {
470 if (GrCCAtlas* retiredAtlas =
471 fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
472 // We did not fit in the previous coverage count atlas and it was retired. Close the path
473 // parser's current batch (which does not yet include the path we just parsed). We will
474 // render this batch into the retired atlas during finalize().
475 retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch());
476 retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch());
477 retiredAtlas->setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
478 }
479 }
480
recordStencilResolveInstance(const SkIRect & clippedPathIBounds,const SkIVector & devToAtlasOffset,GrFillRule fillRule)481 void GrCCPerFlushResources::recordStencilResolveInstance(
482 const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) {
483 SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType());
484 SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance);
485
486 SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset);
487 if (GrFillRule::kEvenOdd == fillRule) {
488 // Make even/odd fills counterclockwise. The resolve draw uses two-sided stencil, with
489 // "nonzero" settings in front and "even/odd" settings in back.
490 std::swap(atlasIBounds.fLeft, atlasIBounds.fRight);
491 }
492 fStencilResolveInstanceData[fNextStencilResolveInstanceIdx++] = {
493 (int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(),
494 (int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
495 }
496
finalize(GrOnFlushResourceProvider * onFlushRP)497 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
498 SkASSERT(this->isMapped());
499 SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
500 SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
501 SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
502 fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
503
504 fInstanceBuffer->unmap();
505 fPathInstanceData = nullptr;
506
507 if (fStencilResolveBuffer) {
508 fStencilResolveBuffer->unmap();
509 fStencilResolveInstanceData = nullptr;
510 }
511
512 if (!fCopyAtlasStack.empty()) {
513 fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
514 fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
515 }
516 if (!fRenderedAtlasStack.empty()) {
517 fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
518 fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch());
519 fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
520 }
521
522 // Build the GPU buffers to render path coverage counts. (This must not happen until after the
523 // final calls to fFiller/fStroker.closeCurrentBatch().)
524 if (!fFiller.prepareToDraw(onFlushRP)) {
525 return false;
526 }
527 if (!fStroker.prepareToDraw(onFlushRP)) {
528 return false;
529 }
530
531 // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es).
532 int copyRangeIdx = 0;
533 int baseCopyInstance = 0;
534 for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
535 int endCopyRange = atlas->getFillBatchID();
536 SkASSERT(endCopyRange > copyRangeIdx);
537
538 auto rtc = atlas->instantiate(onFlushRP);
539 for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
540 const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
541 int endCopyInstance = baseCopyInstance + copyRange.fCount;
542 if (rtc) {
543 auto op = CopyAtlasOp::Make(
544 rtc->surfPriv().getContext(), sk_ref_sp(this), copyRange.fSrcProxy,
545 baseCopyInstance, endCopyInstance, atlas->drawBounds());
546 rtc->addDrawOp(GrNoClip(), std::move(op));
547 }
548 baseCopyInstance = endCopyInstance;
549 }
550 }
551 SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
552 SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
553 SkASSERT(baseCopyInstance == fEndCopyInstance);
554
555 // Render the coverage count atlas(es).
556 int baseStencilResolveInstance = 0;
557 for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
558 // Copies will be finished by the time we get to rendering new atlases. See if we can
559 // recycle any previous invalidated atlas textures instead of creating new ones.
560 sk_sp<GrTexture> backingTexture;
561 for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
562 if (texture && atlas->currentHeight() == texture->height() &&
563 atlas->currentWidth() == texture->width()) {
564 backingTexture = skstd::exchange(texture, nullptr);
565 break;
566 }
567 }
568
569 if (auto rtc = atlas->instantiate(onFlushRP, std::move(backingTexture))) {
570 std::unique_ptr<GrDrawOp> op;
571 if (CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType()) {
572 op = GrStencilAtlasOp::Make(
573 rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
574 atlas->getStrokeBatchID(), baseStencilResolveInstance,
575 atlas->getEndStencilResolveInstance(), atlas->drawBounds());
576 } else if (onFlushRP->caps()->shaderCaps()->geometryShaderSupport()) {
577 op = RenderAtlasOp<GrGSCoverageProcessor>::Make(
578 rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
579 atlas->getStrokeBatchID(), atlas->drawBounds());
580 } else {
581 op = RenderAtlasOp<GrVSCoverageProcessor>::Make(
582 rtc->surfPriv().getContext(), sk_ref_sp(this), atlas->getFillBatchID(),
583 atlas->getStrokeBatchID(), atlas->drawBounds());
584 }
585 rtc->addDrawOp(GrNoClip(), std::move(op));
586 if (rtc->asSurfaceProxy()->requiresManualMSAAResolve()) {
587 onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->asTextureProxy()),
588 GrSurfaceProxy::ResolveFlags::kMSAA);
589 }
590 }
591
592 SkASSERT(atlas->getEndStencilResolveInstance() >= baseStencilResolveInstance);
593 baseStencilResolveInstance = atlas->getEndStencilResolveInstance();
594 }
595 SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
596 baseStencilResolveInstance == fEndStencilResolveInstance);
597
598 return true;
599 }
600
cancelCopies()601 void GrCCPerFlushResourceSpecs::cancelCopies() {
602 // Convert copies to cached draws.
603 fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
604 fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
605 fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
606 fCopyAtlasSpecs = GrCCAtlas::Specs();
607 }
608