1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ops/GrTriangulatingPathRenderer.h"
9
10 #include "include/private/SkIDChangeListener.h"
11 #include "src/core/SkGeometry.h"
12 #include "src/gpu/GrAATriangulator.h"
13 #include "src/gpu/GrAuditTrail.h"
14 #include "src/gpu/GrCaps.h"
15 #include "src/gpu/GrDefaultGeoProcFactory.h"
16 #include "src/gpu/GrDrawOpTest.h"
17 #include "src/gpu/GrEagerVertexAllocator.h"
18 #include "src/gpu/GrOpFlushState.h"
19 #include "src/gpu/GrProgramInfo.h"
20 #include "src/gpu/GrRecordingContextPriv.h"
21 #include "src/gpu/GrResourceCache.h"
22 #include "src/gpu/GrResourceProvider.h"
23 #include "src/gpu/GrSimpleMesh.h"
24 #include "src/gpu/GrStyle.h"
25 #include "src/gpu/GrSurfaceDrawContext.h"
26 #include "src/gpu/GrThreadSafeCache.h"
27 #include "src/gpu/GrTriangulator.h"
28 #include "src/gpu/geometry/GrPathUtils.h"
29 #include "src/gpu/geometry/GrStyledShape.h"
30 #include "src/gpu/ops/GrMeshDrawOp.h"
31 #include "src/gpu/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
32
33 #include <cstdio>
34
35 #ifndef GR_AA_TESSELLATOR_MAX_VERB_COUNT
36 #define GR_AA_TESSELLATOR_MAX_VERB_COUNT 10
37 #endif
38
39 /*
40 * This path renderer linearizes and decomposes the path into triangles using GrTriangulator,
41 * uploads the triangles to a vertex buffer, and renders them with a single draw call. It can do
42 * screenspace antialiasing with a one-pixel coverage ramp.
43 */
44 namespace {
45
46 // The TessInfo struct contains ancillary data not specifically required for the triangle
47 // data (which is stored in a GrThreadSafeCache::VertexData object).
48 // The 'fNumVertices' field is a temporary exception. It is still needed to support the
49 // AA triangulated path case - which doesn't use the GrThreadSafeCache nor the VertexData object).
50 // When there is an associated VertexData, its numVertices should always match the TessInfo's
51 // value.
52 struct TessInfo {
53 int fNumVertices;
54 bool fIsLinear;
55 SkScalar fTolerance;
56 };
57
create_data(int numVertices,bool isLinear,SkScalar tol)58 static sk_sp<SkData> create_data(int numVertices, bool isLinear, SkScalar tol) {
59 TessInfo info { numVertices, isLinear, tol };
60 return SkData::MakeWithCopy(&info, sizeof(info));
61 }
62
cache_match(const SkData * data,SkScalar tol)63 bool cache_match(const SkData* data, SkScalar tol) {
64 SkASSERT(data);
65
66 const TessInfo* info = static_cast<const TessInfo*>(data->data());
67
68 return info->fIsLinear || info->fTolerance < 3.0f * tol;
69 }
70
71 // Should 'challenger' replace 'incumbent' in the cache if there is a collision?
is_newer_better(SkData * incumbent,SkData * challenger)72 bool is_newer_better(SkData* incumbent, SkData* challenger) {
73 const TessInfo* i = static_cast<const TessInfo*>(incumbent->data());
74 const TessInfo* c = static_cast<const TessInfo*>(challenger->data());
75
76 if (i->fIsLinear || i->fTolerance <= c->fTolerance) {
77 return false; // prefer the incumbent
78 }
79
80 return true;
81 }
82
83 // When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
84 class UniqueKeyInvalidator : public SkIDChangeListener {
85 public:
UniqueKeyInvalidator(const GrUniqueKey & key,uint32_t contextUniqueID)86 UniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
87 : fMsg(key, contextUniqueID, /* inThreadSafeCache */ true) {}
88
89 private:
90 GrUniqueKeyInvalidatedMessage fMsg;
91
changed()92 void changed() override { SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(fMsg); }
93 };
94
95 class StaticVertexAllocator : public GrEagerVertexAllocator {
96 public:
StaticVertexAllocator(GrResourceProvider * resourceProvider,bool canMapVB)97 StaticVertexAllocator(GrResourceProvider* resourceProvider, bool canMapVB)
98 : fResourceProvider(resourceProvider)
99 , fCanMapVB(canMapVB) {
100 }
101
102 #ifdef SK_DEBUG
~StaticVertexAllocator()103 ~StaticVertexAllocator() override {
104 SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && !fVertexData);
105 }
106 #endif
107
lock(size_t stride,int eagerCount)108 void* lock(size_t stride, int eagerCount) override {
109 SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && !fVertexData);
110 SkASSERT(stride && eagerCount);
111
112 size_t size = eagerCount * stride;
113 fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
114 kStatic_GrAccessPattern);
115 if (!fVertexBuffer) {
116 return nullptr;
117 }
118 if (fCanMapVB) {
119 fVertices = fVertexBuffer->map();
120 }
121 if (!fVertices) {
122 fVertices = sk_malloc_throw(eagerCount * stride);
123 fCanMapVB = false;
124 }
125 fLockStride = stride;
126 return fVertices;
127 }
128
unlock(int actualCount)129 void unlock(int actualCount) override {
130 SkASSERT(fLockStride && fVertices && fVertexBuffer && !fVertexData);
131
132 if (fCanMapVB) {
133 fVertexBuffer->unmap();
134 } else {
135 fVertexBuffer->updateData(fVertices, actualCount * fLockStride);
136 sk_free(fVertices);
137 }
138
139 fVertexData = GrThreadSafeCache::MakeVertexData(std::move(fVertexBuffer),
140 actualCount, fLockStride);
141
142 fVertices = nullptr;
143 fLockStride = 0;
144 }
145
detachVertexData()146 sk_sp<GrThreadSafeCache::VertexData> detachVertexData() {
147 SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && fVertexData);
148
149 return std::move(fVertexData);
150 }
151
152 private:
153 sk_sp<GrThreadSafeCache::VertexData> fVertexData;
154 sk_sp<GrGpuBuffer> fVertexBuffer;
155 GrResourceProvider* fResourceProvider;
156 bool fCanMapVB;
157 void* fVertices = nullptr;
158 size_t fLockStride = 0;
159 };
160
161 } // namespace
162
163 //-------------------------------------------------------------------------------------------------
lock(size_t stride,int eagerCount)164 void* GrCpuVertexAllocator::lock(size_t stride, int eagerCount) {
165 SkASSERT(!fLockStride && !fVertices && !fVertexData);
166 SkASSERT(stride && eagerCount);
167
168 fVertices = sk_malloc_throw(eagerCount * stride);
169 fLockStride = stride;
170
171 return fVertices;
172 }
173
unlock(int actualCount)174 void GrCpuVertexAllocator::unlock(int actualCount) {
175 SkASSERT(fLockStride && fVertices && !fVertexData);
176
177 fVertices = sk_realloc_throw(fVertices, actualCount * fLockStride);
178
179 fVertexData = GrThreadSafeCache::MakeVertexData(fVertices, actualCount, fLockStride);
180
181 fVertices = nullptr;
182 fLockStride = 0;
183 }
184
detachVertexData()185 sk_sp<GrThreadSafeCache::VertexData> GrCpuVertexAllocator::detachVertexData() {
186 SkASSERT(!fLockStride && !fVertices && fVertexData);
187
188 return std::move(fVertexData);
189 }
190
191 //-------------------------------------------------------------------------------------------------
GrTriangulatingPathRenderer()192 GrTriangulatingPathRenderer::GrTriangulatingPathRenderer()
193 : fMaxVerbCount(GR_AA_TESSELLATOR_MAX_VERB_COUNT) {
194 }
195
196 GrPathRenderer::CanDrawPath
onCanDrawPath(const CanDrawPathArgs & args) const197 GrTriangulatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
198 // Don't use this path renderer with dynamic MSAA. DMSAA tries to not rely on caching.
199 if (args.fSurfaceProps->flags() & kDMSAA_SkSurfacePropsPrivateFlag) {
200 return CanDrawPath::kNo;
201 }
202 // This path renderer can draw fill styles, and can do screenspace antialiasing via a
203 // one-pixel coverage ramp. It can do convex and concave paths, but we'll leave the convex
204 // ones to simpler algorithms. We pass on paths that have styles, though they may come back
205 // around after applying the styling information to the geometry to create a filled path.
206 if (!args.fShape->style().isSimpleFill() || args.fShape->knownToBeConvex()) {
207 return CanDrawPath::kNo;
208 }
209 switch (args.fAAType) {
210 case GrAAType::kNone:
211 case GrAAType::kMSAA:
212 // Prefer MSAA, if any antialiasing. In the non-analytic-AA case, We skip paths that
213 // don't have a key since the real advantage of this path renderer comes from caching
214 // the tessellated geometry.
215 if (!args.fShape->hasUnstyledKey()) {
216 return CanDrawPath::kNo;
217 }
218 break;
219 case GrAAType::kCoverage:
220 // Use analytic AA if we don't have MSAA. In this case, we do not cache, so we accept
221 // paths without keys.
222 SkPath path;
223 args.fShape->asPath(&path);
224 if (path.countVerbs() > fMaxVerbCount) {
225 return CanDrawPath::kNo;
226 }
227 break;
228 }
229 return CanDrawPath::kYes;
230 }
231
232 namespace {
233
234 class TriangulatingPathOp final : public GrMeshDrawOp {
235 private:
236 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
237
238 public:
239 DEFINE_OP_CLASS_ID
240
Make(GrRecordingContext * context,GrPaint && paint,const GrStyledShape & shape,const SkMatrix & viewMatrix,SkIRect devClipBounds,GrAAType aaType,const GrUserStencilSettings * stencilSettings)241 static GrOp::Owner Make(GrRecordingContext* context,
242 GrPaint&& paint,
243 const GrStyledShape& shape,
244 const SkMatrix& viewMatrix,
245 SkIRect devClipBounds,
246 GrAAType aaType,
247 const GrUserStencilSettings* stencilSettings) {
248 return Helper::FactoryHelper<TriangulatingPathOp>(context, std::move(paint), shape,
249 viewMatrix, devClipBounds, aaType,
250 stencilSettings);
251 }
252
name() const253 const char* name() const override { return "TriangulatingPathOp"; }
254
visitProxies(const VisitProxyFunc & func) const255 void visitProxies(const VisitProxyFunc& func) const override {
256 if (fProgramInfo) {
257 fProgramInfo->visitFPProxies(func);
258 } else {
259 fHelper.visitProxies(func);
260 }
261 }
262
TriangulatingPathOp(GrProcessorSet * processorSet,const SkPMColor4f & color,const GrStyledShape & shape,const SkMatrix & viewMatrix,const SkIRect & devClipBounds,GrAAType aaType,const GrUserStencilSettings * stencilSettings)263 TriangulatingPathOp(GrProcessorSet* processorSet,
264 const SkPMColor4f& color,
265 const GrStyledShape& shape,
266 const SkMatrix& viewMatrix,
267 const SkIRect& devClipBounds,
268 GrAAType aaType,
269 const GrUserStencilSettings* stencilSettings)
270 : INHERITED(ClassID())
271 , fHelper(processorSet, aaType, stencilSettings)
272 , fColor(color)
273 , fShape(shape)
274 , fViewMatrix(viewMatrix)
275 , fDevClipBounds(devClipBounds)
276 , fAntiAlias(GrAAType::kCoverage == aaType) {
277 SkRect devBounds;
278 viewMatrix.mapRect(&devBounds, shape.bounds());
279 if (shape.inverseFilled()) {
280 // Because the clip bounds are used to add a contour for inverse fills, they must also
281 // include the path bounds.
282 devBounds.join(SkRect::Make(fDevClipBounds));
283 }
284 this->setBounds(devBounds, HasAABloat(fAntiAlias), IsHairline::kNo);
285 }
286
fixedFunctionFlags() const287 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
288
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)289 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
290 GrClampType clampType) override {
291 GrProcessorAnalysisCoverage coverage = fAntiAlias
292 ? GrProcessorAnalysisCoverage::kSingleChannel
293 : GrProcessorAnalysisCoverage::kNone;
294 // This Op uses uniform (not vertex) color, so doesn't need to track wide color.
295 return fHelper.finalizeProcessors(caps, clip, clampType, coverage, &fColor, nullptr);
296 }
297
298 private:
getPath() const299 SkPath getPath() const {
300 SkASSERT(!fShape.style().applies());
301 SkPath path;
302 fShape.asPath(&path);
303 return path;
304 }
305
CreateKey(GrUniqueKey * key,const GrStyledShape & shape,const SkIRect & devClipBounds)306 static void CreateKey(GrUniqueKey* key,
307 const GrStyledShape& shape,
308 const SkIRect& devClipBounds) {
309 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
310
311 bool inverseFill = shape.inverseFilled();
312
313 static constexpr int kClipBoundsCnt = sizeof(devClipBounds) / sizeof(uint32_t);
314 int shapeKeyDataCnt = shape.unstyledKeySize();
315 SkASSERT(shapeKeyDataCnt >= 0);
316 GrUniqueKey::Builder builder(key, kDomain, shapeKeyDataCnt + kClipBoundsCnt, "Path");
317 shape.writeUnstyledKey(&builder[0]);
318 // For inverse fills, the tessellation is dependent on clip bounds.
319 if (inverseFill) {
320 memcpy(&builder[shapeKeyDataCnt], &devClipBounds, sizeof(devClipBounds));
321 } else {
322 memset(&builder[shapeKeyDataCnt], 0, sizeof(devClipBounds));
323 }
324
325 builder.finish();
326 }
327
328 // Triangulate the provided 'shape' in the shape's coordinate space. 'tol' should already
329 // have been mapped back from device space.
Triangulate(GrEagerVertexAllocator * allocator,const SkMatrix & viewMatrix,const GrStyledShape & shape,const SkIRect & devClipBounds,SkScalar tol,bool * isLinear)330 static int Triangulate(GrEagerVertexAllocator* allocator,
331 const SkMatrix& viewMatrix,
332 const GrStyledShape& shape,
333 const SkIRect& devClipBounds,
334 SkScalar tol,
335 bool* isLinear) {
336 SkRect clipBounds = SkRect::Make(devClipBounds);
337
338 SkMatrix vmi;
339 if (!viewMatrix.invert(&vmi)) {
340 return 0;
341 }
342 vmi.mapRect(&clipBounds);
343
344 SkASSERT(!shape.style().applies());
345 SkPath path;
346 shape.asPath(&path);
347
348 return GrTriangulator::PathToTriangles(path, tol, clipBounds, allocator, isLinear);
349 }
350
createNonAAMesh(Target * target)351 void createNonAAMesh(Target* target) {
352 SkASSERT(!fAntiAlias);
353 GrResourceProvider* rp = target->resourceProvider();
354 auto threadSafeCache = target->threadSafeCache();
355
356 GrUniqueKey key;
357 CreateKey(&key, fShape, fDevClipBounds);
358
359 SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
360 fViewMatrix, fShape.bounds());
361
362 if (!fVertexData) {
363 auto [cachedVerts, data] = threadSafeCache->findVertsWithData(key);
364 if (cachedVerts && cache_match(data.get(), tol)) {
365 fVertexData = std::move(cachedVerts);
366 }
367 }
368
369 if (fVertexData) {
370 if (!fVertexData->gpuBuffer()) {
371 sk_sp<GrGpuBuffer> buffer = rp->createBuffer(fVertexData->size(),
372 GrGpuBufferType::kVertex,
373 kStatic_GrAccessPattern,
374 fVertexData->vertices());
375 if (!buffer) {
376 return;
377 }
378
379 // Since we have a direct context and a ref on 'fVertexData' we need not worry
380 // about any threading issues in this call.
381 fVertexData->setGpuBuffer(std::move(buffer));
382 }
383
384 fMesh = CreateMesh(target, fVertexData->refGpuBuffer(), 0, fVertexData->numVertices());
385 return;
386 }
387
388 bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
389 StaticVertexAllocator allocator(rp, canMapVB);
390
391 bool isLinear;
392 int vertexCount = Triangulate(&allocator, fViewMatrix, fShape, fDevClipBounds, tol,
393 &isLinear);
394 if (vertexCount == 0) {
395 return;
396 }
397
398 fVertexData = allocator.detachVertexData();
399
400 key.setCustomData(create_data(vertexCount, isLinear, tol));
401
402 auto [tmpV, tmpD] = threadSafeCache->addVertsWithData(key, fVertexData, is_newer_better);
403 if (tmpV != fVertexData) {
404 SkASSERT(!tmpV->gpuBuffer());
405 // In this case, although the different triangulation found in the cache is better,
406 // we will continue on with the current triangulation since it is already on the gpu.
407 } else {
408 // This isn't perfect. The current triangulation is in the cache but it may have
409 // replaced a pre-existing one. A duplicated listener is unlikely and not that
410 // expensive so we just roll with it.
411 fShape.addGenIDChangeListener(
412 sk_make_sp<UniqueKeyInvalidator>(key, target->contextUniqueID()));
413 }
414
415 fMesh = CreateMesh(target, fVertexData->refGpuBuffer(), 0, fVertexData->numVertices());
416 }
417
createAAMesh(Target * target)418 void createAAMesh(Target* target) {
419 SkASSERT(!fVertexData);
420 SkASSERT(fAntiAlias);
421 SkPath path = this->getPath();
422 if (path.isEmpty()) {
423 return;
424 }
425 SkRect clipBounds = SkRect::Make(fDevClipBounds);
426 path.transform(fViewMatrix);
427 SkScalar tol = GrPathUtils::kDefaultTolerance;
428 sk_sp<const GrBuffer> vertexBuffer;
429 int firstVertex;
430 GrEagerDynamicVertexAllocator allocator(target, &vertexBuffer, &firstVertex);
431 int vertexCount = GrAATriangulator::PathToAATriangles(path, tol, clipBounds, &allocator);
432 if (vertexCount == 0) {
433 return;
434 }
435 fMesh = CreateMesh(target, std::move(vertexBuffer), firstVertex, vertexCount);
436 }
437
programInfo()438 GrProgramInfo* programInfo() override { return fProgramInfo; }
439
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,GrAppliedClip && appliedClip,const GrXferProcessor::DstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)440 void onCreateProgramInfo(const GrCaps* caps,
441 SkArenaAlloc* arena,
442 const GrSurfaceProxyView& writeView,
443 GrAppliedClip&& appliedClip,
444 const GrXferProcessor::DstProxyView& dstProxyView,
445 GrXferBarrierFlags renderPassXferBarriers,
446 GrLoadOp colorLoadOp) override {
447 GrGeometryProcessor* gp;
448 {
449 using namespace GrDefaultGeoProcFactory;
450
451 Color color(fColor);
452 LocalCoords::Type localCoordsType = fHelper.usesLocalCoords()
453 ? LocalCoords::kUsePosition_Type
454 : LocalCoords::kUnused_Type;
455 Coverage::Type coverageType;
456 if (fAntiAlias) {
457 if (fHelper.compatibleWithCoverageAsAlpha()) {
458 coverageType = Coverage::kAttributeTweakAlpha_Type;
459 } else {
460 coverageType = Coverage::kAttribute_Type;
461 }
462 } else {
463 coverageType = Coverage::kSolid_Type;
464 }
465 if (fAntiAlias) {
466 gp = GrDefaultGeoProcFactory::MakeForDeviceSpace(arena, color, coverageType,
467 localCoordsType, fViewMatrix);
468 } else {
469 gp = GrDefaultGeoProcFactory::Make(arena, color, coverageType, localCoordsType,
470 fViewMatrix);
471 }
472 }
473 if (!gp) {
474 return;
475 }
476
477 #ifdef SK_DEBUG
478 auto vertexStride = sizeof(SkPoint);
479 if (fAntiAlias) {
480 vertexStride += sizeof(float);
481 }
482 SkASSERT(vertexStride == gp->vertexStride());
483 #endif
484
485 GrPrimitiveType primitiveType = TRIANGULATOR_WIREFRAME ? GrPrimitiveType::kLines
486 : GrPrimitiveType::kTriangles;
487
488 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView,
489 std::move(appliedClip), dstProxyView,
490 gp, primitiveType,
491 renderPassXferBarriers, colorLoadOp);
492 }
493
onPrePrepareDraws(GrRecordingContext * rContext,const GrSurfaceProxyView & writeView,GrAppliedClip * clip,const GrXferProcessor::DstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)494 void onPrePrepareDraws(GrRecordingContext* rContext,
495 const GrSurfaceProxyView& writeView,
496 GrAppliedClip* clip,
497 const GrXferProcessor::DstProxyView& dstProxyView,
498 GrXferBarrierFlags renderPassXferBarriers,
499 GrLoadOp colorLoadOp) override {
500 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
501
502 INHERITED::onPrePrepareDraws(rContext, writeView, clip, dstProxyView,
503 renderPassXferBarriers, colorLoadOp);
504
505 if (fAntiAlias) {
506 // TODO: pull the triangulation work forward to the recording thread for the AA case
507 // too.
508 return;
509 }
510
511 auto threadSafeViewCache = rContext->priv().threadSafeCache();
512
513 GrUniqueKey key;
514 CreateKey(&key, fShape, fDevClipBounds);
515
516 SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
517 fViewMatrix, fShape.bounds());
518
519 auto [cachedVerts, data] = threadSafeViewCache->findVertsWithData(key);
520 if (cachedVerts && cache_match(data.get(), tol)) {
521 fVertexData = std::move(cachedVerts);
522 return;
523 }
524
525 GrCpuVertexAllocator allocator;
526
527 bool isLinear;
528 int vertexCount = Triangulate(&allocator, fViewMatrix, fShape, fDevClipBounds, tol,
529 &isLinear);
530 if (vertexCount == 0) {
531 return;
532 }
533
534 fVertexData = allocator.detachVertexData();
535
536 key.setCustomData(create_data(vertexCount, isLinear, tol));
537
538 // If some other thread created and cached its own triangulation, the 'is_newer_better'
539 // predicate will replace the version in the cache if 'fVertexData' is a more accurate
540 // triangulation. This will leave some other recording threads using a poorer triangulation
541 // but will result in a version with greater applicability being in the cache.
542 auto [tmpV, tmpD] = threadSafeViewCache->addVertsWithData(key, fVertexData,
543 is_newer_better);
544 if (tmpV != fVertexData) {
545 // Someone beat us to creating the triangulation (and it is better than ours) so
546 // just go ahead and use it.
547 SkASSERT(cache_match(tmpD.get(), tol));
548 fVertexData = std::move(tmpV);
549 } else {
550 // This isn't perfect. The current triangulation is in the cache but it may have
551 // replaced a pre-existing one. A duplicated listener is unlikely and not that
552 // expensive so we just roll with it.
553 fShape.addGenIDChangeListener(
554 sk_make_sp<UniqueKeyInvalidator>(key, rContext->priv().contextID()));
555 }
556 }
557
onPrepareDraws(Target * target)558 void onPrepareDraws(Target* target) override {
559 if (fAntiAlias) {
560 this->createAAMesh(target);
561 } else {
562 this->createNonAAMesh(target);
563 }
564 }
565
CreateMesh(Target * target,sk_sp<const GrBuffer> vb,int firstVertex,int count)566 static GrSimpleMesh* CreateMesh(Target* target, sk_sp<const GrBuffer> vb,
567 int firstVertex, int count) {
568 auto mesh = target->allocMesh();
569 mesh->set(std::move(vb), count, firstVertex);
570 return mesh;
571 }
572
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)573 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
574 if (!fProgramInfo) {
575 this->createProgramInfo(flushState);
576 }
577
578 if (!fProgramInfo || !fMesh) {
579 return;
580 }
581
582 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
583 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
584 flushState->drawMesh(*fMesh);
585 }
586
587 #if GR_TEST_UTILS
onDumpInfo() const588 SkString onDumpInfo() const override {
589 return SkStringPrintf("Color 0x%08x, aa: %d\n%s",
590 fColor.toBytes_RGBA(), fAntiAlias, fHelper.dumpInfo().c_str());
591 }
592 #endif
593
594 Helper fHelper;
595 SkPMColor4f fColor;
596 GrStyledShape fShape;
597 SkMatrix fViewMatrix;
598 SkIRect fDevClipBounds;
599 bool fAntiAlias;
600
601 GrSimpleMesh* fMesh = nullptr;
602 GrProgramInfo* fProgramInfo = nullptr;
603
604 sk_sp<GrThreadSafeCache::VertexData> fVertexData;
605
606 using INHERITED = GrMeshDrawOp;
607 };
608
609 } // anonymous namespace
610
onDrawPath(const DrawPathArgs & args)611 bool GrTriangulatingPathRenderer::onDrawPath(const DrawPathArgs& args) {
612 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
613 "GrTriangulatingPathRenderer::onDrawPath");
614
615 GrOp::Owner op = TriangulatingPathOp::Make(
616 args.fContext, std::move(args.fPaint), *args.fShape, *args.fViewMatrix,
617 *args.fClipConservativeBounds, args.fAAType, args.fUserStencilSettings);
618 args.fRenderTargetContext->addDrawOp(args.fClip, std::move(op));
619 return true;
620 }
621
622 ///////////////////////////////////////////////////////////////////////////////////////////////////
623
624 #if GR_TEST_UTILS
625
GR_DRAW_OP_TEST_DEFINE(TriangulatingPathOp)626 GR_DRAW_OP_TEST_DEFINE(TriangulatingPathOp) {
627 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
628 const SkPath& path = GrTest::TestPath(random);
629 SkIRect devClipBounds = SkIRect::MakeLTRB(
630 random->nextU(), random->nextU(), random->nextU(), random->nextU());
631 devClipBounds.sort();
632 static constexpr GrAAType kAATypes[] = {GrAAType::kNone, GrAAType::kMSAA, GrAAType::kCoverage};
633 GrAAType aaType;
634 do {
635 aaType = kAATypes[random->nextULessThan(SK_ARRAY_COUNT(kAATypes))];
636 } while(GrAAType::kMSAA == aaType && numSamples <= 1);
637 GrStyle style;
638 do {
639 GrTest::TestStyle(random, &style);
640 } while (!style.isSimpleFill());
641 GrStyledShape shape(path, style);
642 return TriangulatingPathOp::Make(context, std::move(paint), shape, viewMatrix, devClipBounds,
643 aaType, GrGetRandomStencil(random, context));
644 }
645
646 #endif
647