• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/ops/AAConvexPathRenderer.h"
9 
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/KeyBuilder.h"
18 #include "src/gpu/ganesh/GrAuditTrail.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDrawOpTest.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrProcessor.h"
23 #include "src/gpu/ganesh/GrProcessorUnitTest.h"
24 #include "src/gpu/ganesh/GrProgramInfo.h"
25 #include "src/gpu/ganesh/SurfaceDrawContext.h"
26 #include "src/gpu/ganesh/geometry/GrPathUtils.h"
27 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
28 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
29 #include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
30 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
31 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
32 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
33 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
34 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
35 
36 using namespace skia_private;
37 
38 namespace skgpu::ganesh {
39 
40 namespace {
41 
42 struct Segment {
43     enum {
44         // These enum values are assumed in member functions below.
45         kLine = 0,
46         kQuad = 1,
47     } fType;
48 
49     // line uses one pt, quad uses 2 pts
50     SkPoint fPts[2];
51     // normal to edge ending at each pt
52     SkVector fNorms[2];
53     // is the corner where the previous segment meets this segment
54     // sharp. If so, fMid is a normalized bisector facing outward.
55     SkVector fMid;
56 
countPointsskgpu::ganesh::__anon220d157e0111::Segment57     int countPoints() {
58         static_assert(0 == kLine && 1 == kQuad);
59         return fType + 1;
60     }
endPtskgpu::ganesh::__anon220d157e0111::Segment61     const SkPoint& endPt() const {
62         static_assert(0 == kLine && 1 == kQuad);
63         return fPts[fType];
64     }
endNormskgpu::ganesh::__anon220d157e0111::Segment65     const SkPoint& endNorm() const {
66         static_assert(0 == kLine && 1 == kQuad);
67         return fNorms[fType];
68     }
69 };
70 
71 typedef TArray<Segment, true> SegmentArray;
72 
center_of_mass(const SegmentArray & segments,SkPoint * c)73 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
74     SkScalar area = 0;
75     SkPoint center = {0, 0};
76     int count = segments.size();
77     if (count <= 0) {
78         return false;
79     }
80     SkPoint p0 = {0, 0};
81     if (count > 2) {
82         // We translate the polygon so that the first point is at the origin.
83         // This avoids some precision issues with small area polygons far away
84         // from the origin.
85         p0 = segments[0].endPt();
86         SkPoint pi;
87         SkPoint pj;
88         // the first and last iteration of the below loop would compute
89         // zeros since the starting / ending point is (0,0). So instead we start
90         // at i=1 and make the last iteration i=count-2.
91         pj = segments[1].endPt() - p0;
92         for (int i = 1; i < count - 1; ++i) {
93             pi = pj;
94             pj = segments[i + 1].endPt() - p0;
95 
96             SkScalar t = SkPoint::CrossProduct(pi, pj);
97             area += t;
98             center.fX += (pi.fX + pj.fX) * t;
99             center.fY += (pi.fY + pj.fY) * t;
100         }
101     }
102 
103     // If the poly has no area then we instead return the average of
104     // its points.
105     if (SkScalarNearlyZero(area)) {
106         SkPoint avg;
107         avg.set(0, 0);
108         for (int i = 0; i < count; ++i) {
109             const SkPoint& pt = segments[i].endPt();
110             avg.fX += pt.fX;
111             avg.fY += pt.fY;
112         }
113         SkScalar denom = SK_Scalar1 / count;
114         avg.scale(denom);
115         *c = avg;
116     } else {
117         area *= 3;
118         area = SkScalarInvert(area);
119         center.scale(area);
120         // undo the translate of p0 to the origin.
121         *c = center + p0;
122     }
123     return !SkIsNaN(c->fX) && !SkIsNaN(c->fY) && c->isFinite();
124 }
125 
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathFirstDirection dir,int * vCount,int * iCount)126 bool compute_vectors(SegmentArray* segments,
127                      SkPoint* fanPt,
128                      SkPathFirstDirection dir,
129                      int* vCount,
130                      int* iCount) {
131     if (!center_of_mass(*segments, fanPt)) {
132         return false;
133     }
134     int count = segments->size();
135 
136     // Make the normals point towards the outside
137     SkPointPriv::Side normSide;
138     if (dir == SkPathFirstDirection::kCCW) {
139         normSide = SkPointPriv::kRight_Side;
140     } else {
141         normSide = SkPointPriv::kLeft_Side;
142     }
143 
144     int64_t vCount64 = 0;
145     int64_t iCount64 = 0;
146     // compute normals at all points
147     for (int a = 0; a < count; ++a) {
148         Segment& sega = (*segments)[a];
149         int b = (a + 1) % count;
150         Segment& segb = (*segments)[b];
151 
152         const SkPoint* prevPt = &sega.endPt();
153         int n = segb.countPoints();
154         for (int p = 0; p < n; ++p) {
155             segb.fNorms[p] = segb.fPts[p] - *prevPt;
156             segb.fNorms[p].normalize();
157             segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
158             prevPt = &segb.fPts[p];
159         }
160         if (Segment::kLine == segb.fType) {
161             vCount64 += 5;
162             iCount64 += 9;
163         } else {
164             vCount64 += 6;
165             iCount64 += 12;
166         }
167     }
168 
169     // compute mid-vectors where segments meet. TODO: Detect shallow corners
170     // and leave out the wedges and close gaps by stitching segments together.
171     for (int a = 0; a < count; ++a) {
172         const Segment& sega = (*segments)[a];
173         int b = (a + 1) % count;
174         Segment& segb = (*segments)[b];
175         segb.fMid = segb.fNorms[0] + sega.endNorm();
176         segb.fMid.normalize();
177         // corner wedges
178         vCount64 += 4;
179         iCount64 += 6;
180     }
181     if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
182         return false;
183     }
184     *vCount = vCount64;
185     *iCount = iCount64;
186     return true;
187 }
188 
189 struct DegenerateTestData {
DegenerateTestDataskgpu::ganesh::__anon220d157e0111::DegenerateTestData190     DegenerateTestData() { fStage = kInitial; }
isDegenerateskgpu::ganesh::__anon220d157e0111::DegenerateTestData191     bool isDegenerate() const { return kNonDegenerate != fStage; }
192     enum {
193         kInitial,
194         kPoint,
195         kLine,
196         kNonDegenerate
197     }           fStage;
198     SkPoint     fFirstPoint;
199     SkVector    fLineNormal;
200     SkScalar    fLineC;
201 };
202 
203 static const SkScalar kClose = (SK_Scalar1 / 16);
204 static const SkScalar kCloseSqd = kClose * kClose;
205 
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)206 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
207     switch (data->fStage) {
208         case DegenerateTestData::kInitial:
209             data->fFirstPoint = pt;
210             data->fStage = DegenerateTestData::kPoint;
211             break;
212         case DegenerateTestData::kPoint:
213             if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
214                 data->fLineNormal = pt - data->fFirstPoint;
215                 data->fLineNormal.normalize();
216                 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
217                 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
218                 data->fStage = DegenerateTestData::kLine;
219             }
220             break;
221         case DegenerateTestData::kLine:
222             if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
223                 data->fStage = DegenerateTestData::kNonDegenerate;
224             }
225             break;
226         case DegenerateTestData::kNonDegenerate:
227             break;
228         default:
229             SK_ABORT("Unexpected degenerate test stage.");
230     }
231 }
232 
get_direction(const SkPath & path,const SkMatrix & m,SkPathFirstDirection * dir)233 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
234     // At this point, we've already returned true from canDraw(), which checked that the path's
235     // direction could be determined, so this should just be fetching the cached direction.
236     // However, if perspective is involved, we're operating on a transformed path, which may no
237     // longer have a computable direction.
238     *dir = SkPathPriv::ComputeFirstDirection(path);
239     if (*dir == SkPathFirstDirection::kUnknown) {
240         return false;
241     }
242 
243     // check whether m reverses the orientation
244     SkASSERT(!m.hasPerspective());
245     SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
246                       m.get(SkMatrix::kMSkewX)  * m.get(SkMatrix::kMSkewY);
247     if (det2x2 < 0) {
248         *dir = SkPathPriv::OppositeFirstDirection(*dir);
249     }
250 
251     return true;
252 }
253 
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)254 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
255     segments->push_back();
256     segments->back().fType = Segment::kLine;
257     segments->back().fPts[0] = pt;
258 }
259 
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)260 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
261     if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
262         if (pts[0] != pts[2]) {
263             add_line_to_segment(pts[2], segments);
264         }
265     } else {
266         segments->push_back();
267         segments->back().fType = Segment::kQuad;
268         segments->back().fPts[0] = pts[1];
269         segments->back().fPts[1] = pts[2];
270     }
271 }
272 
add_cubic_segments(const SkPoint pts[4],SkPathFirstDirection dir,SegmentArray * segments)273 inline void add_cubic_segments(const SkPoint pts[4],
274                                SkPathFirstDirection dir,
275                                SegmentArray* segments) {
276     STArray<15, SkPoint, true> quads;
277     GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
278     int count = quads.size();
279     for (int q = 0; q < count; q += 3) {
280         add_quad_segment(&quads[q], segments);
281     }
282 }
283 
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)284 bool get_segments(const SkPath& path,
285                   const SkMatrix& m,
286                   SegmentArray* segments,
287                   SkPoint* fanPt,
288                   int* vCount,
289                   int* iCount) {
290     SkPath::Iter iter(path, true);
291     // This renderer over-emphasizes very thin path regions. We use the distance
292     // to the path from the sample to compute coverage. Every pixel intersected
293     // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
294     // notice that the sample may be close to a very thin area of the path and
295     // thus should be very light. This is particularly egregious for degenerate
296     // line paths. We detect paths that are very close to a line (zero area) and
297     // draw nothing.
298     DegenerateTestData degenerateData;
299     SkPathFirstDirection dir;
300     if (!get_direction(path, m, &dir)) {
301         return false;
302     }
303 
304     for (;;) {
305         SkPoint pts[4];
306         SkPath::Verb verb = iter.next(pts);
307         switch (verb) {
308             case SkPath::kMove_Verb:
309                 m.mapPoints(pts, 1);
310                 update_degenerate_test(&degenerateData, pts[0]);
311                 break;
312             case SkPath::kLine_Verb: {
313                 if (!SkPathPriv::AllPointsEq(pts, 2)) {
314                     m.mapPoints(&pts[1], 1);
315                     update_degenerate_test(&degenerateData, pts[1]);
316                     add_line_to_segment(pts[1], segments);
317                 }
318                 break;
319             }
320             case SkPath::kQuad_Verb:
321                 if (!SkPathPriv::AllPointsEq(pts, 3)) {
322                     m.mapPoints(pts, 3);
323                     update_degenerate_test(&degenerateData, pts[1]);
324                     update_degenerate_test(&degenerateData, pts[2]);
325                     add_quad_segment(pts, segments);
326                 }
327                 break;
328             case SkPath::kConic_Verb: {
329                 if (!SkPathPriv::AllPointsEq(pts, 3)) {
330                     m.mapPoints(pts, 3);
331                     SkScalar weight = iter.conicWeight();
332                     SkAutoConicToQuads converter;
333                     const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
334                     for (int i = 0; i < converter.countQuads(); ++i) {
335                         update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
336                         update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
337                         add_quad_segment(quadPts + 2*i, segments);
338                     }
339                 }
340                 break;
341             }
342             case SkPath::kCubic_Verb: {
343                 if (!SkPathPriv::AllPointsEq(pts, 4)) {
344                     m.mapPoints(pts, 4);
345                     update_degenerate_test(&degenerateData, pts[1]);
346                     update_degenerate_test(&degenerateData, pts[2]);
347                     update_degenerate_test(&degenerateData, pts[3]);
348                     add_cubic_segments(pts, dir, segments);
349                 }
350                 break;
351             }
352             case SkPath::kDone_Verb:
353                 if (degenerateData.isDegenerate()) {
354                     return false;
355                 } else {
356                     return compute_vectors(segments, fanPt, dir, vCount, iCount);
357                 }
358             default:
359                 break;
360         }
361     }
362 }
363 
364 struct Draw {
Drawskgpu::ganesh::__anon220d157e0111::Draw365     Draw() : fVertexCnt(0), fIndexCnt(0) {}
366     int fVertexCnt;
367     int fIndexCnt;
368 };
369 
370 typedef TArray<Draw, true> DrawArray;
371 
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,const VertexColor & color,DrawArray * draws,VertexWriter & verts,uint16_t * idxs,size_t vertexStride)372 void create_vertices(const SegmentArray& segments,
373                      const SkPoint& fanPt,
374                      const VertexColor& color,
375                      DrawArray* draws,
376                      VertexWriter& verts,
377                      uint16_t* idxs,
378                      size_t vertexStride) {
379     Draw* draw = &draws->push_back();
380     // alias just to make vert/index assignments easier to read.
381     int* v = &draw->fVertexCnt;
382     int* i = &draw->fIndexCnt;
383 
384     int count = segments.size();
385     for (int a = 0; a < count; ++a) {
386         const Segment& sega = segments[a];
387         int b = (a + 1) % count;
388         const Segment& segb = segments[b];
389 
390         // Check whether adding the verts for this segment to the current draw would cause index
391         // values to overflow.
392         int vCount = 4;
393         if (Segment::kLine == segb.fType) {
394             vCount += 5;
395         } else {
396             vCount += 6;
397         }
398         if (draw->fVertexCnt + vCount > (1 << 16)) {
399             idxs += *i;
400             draw = &draws->push_back();
401             v = &draw->fVertexCnt;
402             i = &draw->fIndexCnt;
403         }
404 
405         const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
406 
407         // FIXME: These tris are inset in the 1 unit arc around the corner
408         SkPoint p0 = sega.endPt();
409         // Position, Color, UV, D0, D1
410         verts << p0                    << color << SkPoint{0, 0}           << negOneDists;
411         verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
412         verts << (p0 + segb.fMid)      << color << SkPoint{0, -SK_Scalar1} << negOneDists;
413         verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
414 
415         idxs[*i + 0] = *v + 0;
416         idxs[*i + 1] = *v + 2;
417         idxs[*i + 2] = *v + 1;
418         idxs[*i + 3] = *v + 0;
419         idxs[*i + 4] = *v + 3;
420         idxs[*i + 5] = *v + 2;
421 
422         *v += 4;
423         *i += 6;
424 
425         if (Segment::kLine == segb.fType) {
426             // we draw the line edge as a degenerate quad (u is 0, v is the
427             // signed distance to the edge)
428             SkPoint v1Pos = sega.endPt();
429             SkPoint v2Pos = segb.fPts[0];
430             SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
431 
432             verts << fanPt                    << color << SkPoint{0, dist}        << negOneDists;
433             verts << v1Pos                    << color << SkPoint{0, 0}           << negOneDists;
434             verts << v2Pos                    << color << SkPoint{0, 0}           << negOneDists;
435             verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
436             verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
437 
438             idxs[*i + 0] = *v + 3;
439             idxs[*i + 1] = *v + 1;
440             idxs[*i + 2] = *v + 2;
441 
442             idxs[*i + 3] = *v + 4;
443             idxs[*i + 4] = *v + 3;
444             idxs[*i + 5] = *v + 2;
445 
446             *i += 6;
447 
448             // Draw the interior fan if it exists.
449             // TODO: Detect and combine colinear segments. This will ensure we catch every case
450             // with no interior, and that the resulting shared edge uses the same endpoints.
451             if (count >= 3) {
452                 idxs[*i + 0] = *v + 0;
453                 idxs[*i + 1] = *v + 2;
454                 idxs[*i + 2] = *v + 1;
455 
456                 *i += 3;
457             }
458 
459             *v += 5;
460         } else {
461             SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
462 
463             SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
464             SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
465 
466             // We must transform the positions into UV in cpu memory and then copy them to the gpu
467             // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
468             // will cause us to read from the GPU buffer which can be very slow.
469             struct PosAndUV {
470                 SkPoint fPos;
471                 SkPoint fUV;
472             };
473             PosAndUV posAndUVPoints[6];
474             posAndUVPoints[0].fPos = fanPt;
475             posAndUVPoints[1].fPos = qpts[0];
476             posAndUVPoints[2].fPos = qpts[2];
477             posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
478             posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
479             SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
480             midVec.normalize();
481             posAndUVPoints[5].fPos = qpts[1] + midVec;
482 
483             GrPathUtils::QuadUVMatrix toUV(qpts);
484             toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
485 
486             verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
487                   << (-segb.fNorms[0].dot(fanPt) + c0)
488                   << (-segb.fNorms[1].dot(fanPt) + c1);
489 
490             verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
491                   << 0.0f
492                   << (-segb.fNorms[1].dot(qpts[0]) + c1);
493 
494             verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
495                   << (-segb.fNorms[0].dot(qpts[2]) + c0)
496                   << 0.0f;
497             // We need a negative value that is very large that it won't effect results if it is
498             // interpolated with. However, the value can't be too large of a negative that it
499             // effects numerical precision on less powerful GPUs.
500             static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
501             verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
502                   << kStableLargeNegativeValue
503                   << kStableLargeNegativeValue;
504 
505             verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
506                   << kStableLargeNegativeValue
507                   << kStableLargeNegativeValue;
508 
509             verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
510                   << kStableLargeNegativeValue
511                   << kStableLargeNegativeValue;
512 
513             idxs[*i + 0] = *v + 3;
514             idxs[*i + 1] = *v + 1;
515             idxs[*i + 2] = *v + 2;
516             idxs[*i + 3] = *v + 4;
517             idxs[*i + 4] = *v + 3;
518             idxs[*i + 5] = *v + 2;
519 
520             idxs[*i + 6] = *v + 5;
521             idxs[*i + 7] = *v + 3;
522             idxs[*i + 8] = *v + 4;
523 
524             *i += 9;
525 
526             // Draw the interior fan if it exists.
527             // TODO: Detect and combine colinear segments. This will ensure we catch every case
528             // with no interior, and that the resulting shared edge uses the same endpoints.
529             if (count >= 3) {
530                 idxs[*i + 0] = *v + 0;
531                 idxs[*i + 1] = *v + 2;
532                 idxs[*i + 2] = *v + 1;
533 
534                 *i += 3;
535             }
536 
537             *v += 6;
538         }
539     }
540 }
541 
542 ///////////////////////////////////////////////////////////////////////////////
543 
544 /*
545  * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
546  * two components of the vertex attribute. Coverage is based on signed
547  * distance with negative being inside, positive outside. The edge is specified in
548  * window space (y-down). If either the third or fourth component of the interpolated
549  * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
550  * attempt to trim to a portion of the infinite quad.
551  * Requires shader derivative instruction support.
552  */
553 
554 class QuadEdgeEffect : public GrGeometryProcessor {
555 public:
Make(SkArenaAlloc * arena,const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)556     static GrGeometryProcessor* Make(SkArenaAlloc* arena,
557                                      const SkMatrix& localMatrix,
558                                      bool usesLocalCoords,
559                                      bool wideColor) {
560         return arena->make([&](void* ptr) {
561             return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
562         });
563     }
564 
~QuadEdgeEffect()565     ~QuadEdgeEffect() override {}
566 
name() const567     const char* name() const override { return "QuadEdge"; }
568 
addToKey(const GrShaderCaps & caps,KeyBuilder * b) const569     void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
570         b->addBool(fUsesLocalCoords, "usesLocalCoords");
571         b->addBits(ProgramImpl::kMatrixKeyBits,
572                    ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
573                    "localMatrixType");
574     }
575 
576     std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
577 
578 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)579     QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
580             : INHERITED(kQuadEdgeEffect_ClassID)
581             , fLocalMatrix(localMatrix)
582             , fUsesLocalCoords(usesLocalCoords) {
583         fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
584         fInColor = MakeColorAttribute("inColor", wideColor);
585         // GL on iOS 14 needs more precision for the quadedge attributes
586         fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
587         this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
588     }
589 
590     Attribute fInPosition;
591     Attribute fInColor;
592     Attribute fInQuadEdge;
593 
594     SkMatrix fLocalMatrix;
595     bool fUsesLocalCoords;
596 
597     GR_DECLARE_GEOMETRY_PROCESSOR_TEST
598 
599     using INHERITED = GrGeometryProcessor;
600 };
601 
makeProgramImpl(const GrShaderCaps &) const602 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
603         const GrShaderCaps&) const {
604     class Impl : public ProgramImpl {
605     public:
606         void setData(const GrGLSLProgramDataManager& pdman,
607                      const GrShaderCaps& shaderCaps,
608                      const GrGeometryProcessor& geomProc) override {
609             const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
610             SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
611         }
612 
613     private:
614         void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
615             const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
616             GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
617             GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
618             GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
619             GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
620 
621             // emit attributes
622             varyingHandler->emitAttributes(qe);
623 
624             // GL on iOS 14 needs more precision for the quadedge attributes
625             // We might as well enable it everywhere
626             GrGLSLVarying v(SkSLType::kFloat4);
627             varyingHandler->addVarying("QuadEdge", &v);
628             vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
629 
630             // Setup pass through color
631             fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
632             varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
633 
634             // Setup position
635             WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
636             if (qe.fUsesLocalCoords) {
637                 WriteLocalCoord(vertBuilder,
638                                 uniformHandler,
639                                 *args.fShaderCaps,
640                                 gpArgs,
641                                 qe.fInPosition.asShaderVar(),
642                                 qe.fLocalMatrix,
643                                 &fLocalMatrixUniform);
644             }
645 
646             fragBuilder->codeAppendf("half edgeAlpha;");
647 
648             // keep the derivative instructions outside the conditional
649             fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
650             fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
651             fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
652             // today we know z and w are in device space. We could use derivatives
653             fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
654                                      v.fsIn());
655             fragBuilder->codeAppendf ("} else {");
656             fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
657                                      "                 half(2.0*%s.x*duvdy.x - duvdy.y));",
658                                      v.fsIn(), v.fsIn());
659             fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
660                                      v.fsIn());
661             fragBuilder->codeAppendf("edgeAlpha = "
662                                      "saturate(0.5 - edgeAlpha / length(gF));}");
663 
664             fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
665         }
666 
667     private:
668         SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
669 
670         UniformHandle fLocalMatrixUniform;
671     };
672 
673     return std::make_unique<Impl>();
674 }
675 
GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)676 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)
677 
678 #if defined(GR_TEST_UTILS)
679 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
680     SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
681     bool usesLocalCoords = d->fRandom->nextBool();
682     bool wideColor = d->fRandom->nextBool();
683     // Doesn't work without derivative instructions.
684     return d->caps()->shaderCaps()->fShaderDerivativeSupport
685                    ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
686                    : nullptr;
687 }
688 #endif
689 
690 class AAConvexPathOp final : public GrMeshDrawOp {
691 private:
692     using Helper = GrSimpleMeshDrawOpHelperWithStencil;
693 
694 public:
695     DEFINE_OP_CLASS_ID
696 
Make(GrRecordingContext * context,GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)697     static GrOp::Owner Make(GrRecordingContext* context,
698                             GrPaint&& paint,
699                             const SkMatrix& viewMatrix,
700                             const SkPath& path,
701                             const GrUserStencilSettings* stencilSettings) {
702         return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
703                                                      stencilSettings);
704     }
705 
AAConvexPathOp(GrProcessorSet * processorSet,const SkPMColor4f & color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)706     AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
707                    const SkMatrix& viewMatrix, const SkPath& path,
708                    const GrUserStencilSettings* stencilSettings)
709             : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
710         fPaths.emplace_back(PathData{viewMatrix, path, color});
711         this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
712                                    IsHairline::kNo);
713     }
714 
name() const715     const char* name() const override { return "AAConvexPathOp"; }
716 
visitProxies(const GrVisitProxyFunc & func) const717     void visitProxies(const GrVisitProxyFunc& func) const override {
718         if (fProgramInfo) {
719             fProgramInfo->visitFPProxies(func);
720         } else {
721             fHelper.visitProxies(func);
722         }
723     }
724 
fixedFunctionFlags() const725     FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
726 
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)727     GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
728                                       GrClampType clampType) override {
729         return fHelper.finalizeProcessors(
730                 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
731                 &fPaths.back().fColor, &fWideColor);
732     }
733 
734 private:
programInfo()735     GrProgramInfo* programInfo() override { return fProgramInfo; }
736 
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)737     void onCreateProgramInfo(const GrCaps* caps,
738                              SkArenaAlloc* arena,
739                              const GrSurfaceProxyView& writeView,
740                              bool usesMSAASurface,
741                              GrAppliedClip&& appliedClip,
742                              const GrDstProxyView& dstProxyView,
743                              GrXferBarrierFlags renderPassXferBarriers,
744                              GrLoadOp colorLoadOp) override {
745         SkMatrix invert;
746         if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
747             return;
748         }
749 
750         GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
751                                                                   fHelper.usesLocalCoords(),
752                                                                   fWideColor);
753 
754         fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
755                                                             std::move(appliedClip),
756                                                             dstProxyView, quadProcessor,
757                                                             GrPrimitiveType::kTriangles,
758                                                             renderPassXferBarriers, colorLoadOp);
759     }
760 
onPrepareDraws(GrMeshDrawTarget * target)761     void onPrepareDraws(GrMeshDrawTarget* target) override {
762         int instanceCount = fPaths.size();
763 
764         if (!fProgramInfo) {
765             this->createProgramInfo(target);
766             if (!fProgramInfo) {
767                 return;
768             }
769         }
770 
771         const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
772 
773         fDraws.reserve(instanceCount);
774 
775         // TODO generate all segments for all paths and use one vertex buffer
776         for (int i = 0; i < instanceCount; i++) {
777             const PathData& args = fPaths[i];
778 
779             // We use the fact that SkPath::transform path does subdivision based on
780             // perspective. Otherwise, we apply the view matrix when copying to the
781             // segment representation.
782             const SkMatrix* viewMatrix = &args.fViewMatrix;
783 
784             // We avoid initializing the path unless we have to
785             const SkPath* pathPtr = &args.fPath;
786             SkTLazy<SkPath> tmpPath;
787             if (viewMatrix->hasPerspective()) {
788                 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
789                 tmpPathPtr->setIsVolatile(true);
790                 tmpPathPtr->transform(*viewMatrix);
791                 viewMatrix = &SkMatrix::I();
792                 pathPtr = tmpPathPtr;
793             }
794 
795             int vertexCount;
796             int indexCount;
797             enum {
798                 kPreallocSegmentCnt = 512 / sizeof(Segment),
799                 kPreallocDrawCnt = 4,
800             };
801             STArray<kPreallocSegmentCnt, Segment, true> segments;
802             SkPoint fanPt;
803 
804             if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
805                               &indexCount)) {
806                 continue;
807             }
808 
809             sk_sp<const GrBuffer> vertexBuffer;
810             int firstVertex;
811 
812             VertexWriter verts = target->makeVertexWriter(kVertexStride,
813                                                           vertexCount,
814                                                           &vertexBuffer,
815                                                           &firstVertex);
816 
817             if (!verts) {
818                 SkDebugf("Could not allocate vertices\n");
819                 return;
820             }
821 
822             sk_sp<const GrBuffer> indexBuffer;
823             int firstIndex;
824 
825             uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
826             if (!idxs) {
827                 SkDebugf("Could not allocate indices\n");
828                 return;
829             }
830 
831             STArray<kPreallocDrawCnt, Draw, true> draws;
832             VertexColor color(args.fColor, fWideColor);
833             create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
834 
835             GrSimpleMesh* meshes = target->allocMeshes(draws.size());
836             for (int j = 0; j < draws.size(); ++j) {
837                 const Draw& draw = draws[j];
838                 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
839                                      draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
840                                      firstVertex);
841                 firstIndex += draw.fIndexCnt;
842                 firstVertex += draw.fVertexCnt;
843             }
844 
845             fDraws.push_back({ meshes, draws.size() });
846         }
847     }
848 
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)849     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
850         if (!fProgramInfo || fDraws.empty()) {
851             return;
852         }
853 
854         flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
855         flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
856         for (int i = 0; i < fDraws.size(); ++i) {
857             for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
858                 flushState->drawMesh(fDraws[i].fMeshes[j]);
859             }
860         }
861     }
862 
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)863     CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
864         AAConvexPathOp* that = t->cast<AAConvexPathOp>();
865         if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
866             return CombineResult::kCannotCombine;
867         }
868         if (fHelper.usesLocalCoords() &&
869             !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
870             return CombineResult::kCannotCombine;
871         }
872 
873         fPaths.push_back_n(that->fPaths.size(), that->fPaths.begin());
874         fWideColor |= that->fWideColor;
875         return CombineResult::kMerged;
876     }
877 
878 #if defined(GR_TEST_UTILS)
onDumpInfo() const879     SkString onDumpInfo() const override {
880         return SkStringPrintf("Count: %d\n%s", fPaths.size(), fHelper.dumpInfo().c_str());
881     }
882 #endif
883 
884     struct PathData {
885         SkMatrix    fViewMatrix;
886         SkPath      fPath;
887         SkPMColor4f fColor;
888     };
889 
890     Helper fHelper;
891     STArray<1, PathData, true> fPaths;
892     bool fWideColor;
893 
894     struct MeshDraw {
895         GrSimpleMesh* fMeshes;
896         int fMeshCount;
897     };
898 
899     SkTDArray<MeshDraw> fDraws;
900     GrProgramInfo*      fProgramInfo = nullptr;
901 
902     using INHERITED = GrMeshDrawOp;
903 };
904 
905 } // anonymous namespace
906 
907 ///////////////////////////////////////////////////////////////////////////////
908 
onCanDrawPath(const CanDrawPathArgs & args) const909 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
910     // This check requires convexity and known direction, since the direction is used to build
911     // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
912     if (args.fCaps->shaderCaps()->fShaderDerivativeSupport &&
913         (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
914         !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
915         args.fShape->knownDirection()) {
916         return CanDrawPath::kYes;
917     }
918     return CanDrawPath::kNo;
919 }
920 
onDrawPath(const DrawPathArgs & args)921 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
922     GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
923                               "AAConvexPathRenderer::onDrawPath");
924     SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
925     SkASSERT(!args.fShape->isEmpty());
926 
927     SkPath path;
928     args.fShape->asPath(&path);
929 
930     GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
931                                           *args.fViewMatrix,
932                                           path, args.fUserStencilSettings);
933     args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
934     return true;
935 }
936 
937 }  // namespace skgpu::ganesh
938 
939 #if defined(GR_TEST_UTILS)
940 
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)941 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
942     SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
943     const SkPath& path = GrTest::TestPathConvex(random);
944     const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
945     return skgpu::ganesh::AAConvexPathOp::Make(
946             context, std::move(paint), viewMatrix, path, stencilSettings);
947 }
948 
949 #endif
950