1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/ops/AAConvexPathRenderer.h"
9
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/KeyBuilder.h"
18 #include "src/gpu/ganesh/GrAuditTrail.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDrawOpTest.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrProcessor.h"
23 #include "src/gpu/ganesh/GrProcessorUnitTest.h"
24 #include "src/gpu/ganesh/GrProgramInfo.h"
25 #include "src/gpu/ganesh/SurfaceDrawContext.h"
26 #include "src/gpu/ganesh/geometry/GrPathUtils.h"
27 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
28 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
29 #include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
30 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
31 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
32 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
33 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
34 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
35
36 namespace skgpu::v1 {
37
38 namespace {
39
40 struct Segment {
41 enum {
42 // These enum values are assumed in member functions below.
43 kLine = 0,
44 kQuad = 1,
45 } fType;
46
47 // line uses one pt, quad uses 2 pts
48 SkPoint fPts[2];
49 // normal to edge ending at each pt
50 SkVector fNorms[2];
51 // is the corner where the previous segment meets this segment
52 // sharp. If so, fMid is a normalized bisector facing outward.
53 SkVector fMid;
54
countPointsskgpu::v1::__anon7fae288f0111::Segment55 int countPoints() {
56 static_assert(0 == kLine && 1 == kQuad);
57 return fType + 1;
58 }
endPtskgpu::v1::__anon7fae288f0111::Segment59 const SkPoint& endPt() const {
60 static_assert(0 == kLine && 1 == kQuad);
61 return fPts[fType];
62 }
endNormskgpu::v1::__anon7fae288f0111::Segment63 const SkPoint& endNorm() const {
64 static_assert(0 == kLine && 1 == kQuad);
65 return fNorms[fType];
66 }
67 };
68
69 typedef SkTArray<Segment, true> SegmentArray;
70
center_of_mass(const SegmentArray & segments,SkPoint * c)71 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
72 SkScalar area = 0;
73 SkPoint center = {0, 0};
74 int count = segments.size();
75 if (count <= 0) {
76 return false;
77 }
78 SkPoint p0 = {0, 0};
79 if (count > 2) {
80 // We translate the polygon so that the first point is at the origin.
81 // This avoids some precision issues with small area polygons far away
82 // from the origin.
83 p0 = segments[0].endPt();
84 SkPoint pi;
85 SkPoint pj;
86 // the first and last iteration of the below loop would compute
87 // zeros since the starting / ending point is (0,0). So instead we start
88 // at i=1 and make the last iteration i=count-2.
89 pj = segments[1].endPt() - p0;
90 for (int i = 1; i < count - 1; ++i) {
91 pi = pj;
92 pj = segments[i + 1].endPt() - p0;
93
94 SkScalar t = SkPoint::CrossProduct(pi, pj);
95 area += t;
96 center.fX += (pi.fX + pj.fX) * t;
97 center.fY += (pi.fY + pj.fY) * t;
98 }
99 }
100
101 // If the poly has no area then we instead return the average of
102 // its points.
103 if (SkScalarNearlyZero(area)) {
104 SkPoint avg;
105 avg.set(0, 0);
106 for (int i = 0; i < count; ++i) {
107 const SkPoint& pt = segments[i].endPt();
108 avg.fX += pt.fX;
109 avg.fY += pt.fY;
110 }
111 SkScalar denom = SK_Scalar1 / count;
112 avg.scale(denom);
113 *c = avg;
114 } else {
115 area *= 3;
116 area = SkScalarInvert(area);
117 center.scale(area);
118 // undo the translate of p0 to the origin.
119 *c = center + p0;
120 }
121 return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
122 }
123
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathFirstDirection dir,int * vCount,int * iCount)124 bool compute_vectors(SegmentArray* segments,
125 SkPoint* fanPt,
126 SkPathFirstDirection dir,
127 int* vCount,
128 int* iCount) {
129 if (!center_of_mass(*segments, fanPt)) {
130 return false;
131 }
132 int count = segments->size();
133
134 // Make the normals point towards the outside
135 SkPointPriv::Side normSide;
136 if (dir == SkPathFirstDirection::kCCW) {
137 normSide = SkPointPriv::kRight_Side;
138 } else {
139 normSide = SkPointPriv::kLeft_Side;
140 }
141
142 int64_t vCount64 = 0;
143 int64_t iCount64 = 0;
144 // compute normals at all points
145 for (int a = 0; a < count; ++a) {
146 Segment& sega = (*segments)[a];
147 int b = (a + 1) % count;
148 Segment& segb = (*segments)[b];
149
150 const SkPoint* prevPt = &sega.endPt();
151 int n = segb.countPoints();
152 for (int p = 0; p < n; ++p) {
153 segb.fNorms[p] = segb.fPts[p] - *prevPt;
154 segb.fNorms[p].normalize();
155 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
156 prevPt = &segb.fPts[p];
157 }
158 if (Segment::kLine == segb.fType) {
159 vCount64 += 5;
160 iCount64 += 9;
161 } else {
162 vCount64 += 6;
163 iCount64 += 12;
164 }
165 }
166
167 // compute mid-vectors where segments meet. TODO: Detect shallow corners
168 // and leave out the wedges and close gaps by stitching segments together.
169 for (int a = 0; a < count; ++a) {
170 const Segment& sega = (*segments)[a];
171 int b = (a + 1) % count;
172 Segment& segb = (*segments)[b];
173 segb.fMid = segb.fNorms[0] + sega.endNorm();
174 segb.fMid.normalize();
175 // corner wedges
176 vCount64 += 4;
177 iCount64 += 6;
178 }
179 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
180 return false;
181 }
182 *vCount = vCount64;
183 *iCount = iCount64;
184 return true;
185 }
186
187 struct DegenerateTestData {
DegenerateTestDataskgpu::v1::__anon7fae288f0111::DegenerateTestData188 DegenerateTestData() { fStage = kInitial; }
isDegenerateskgpu::v1::__anon7fae288f0111::DegenerateTestData189 bool isDegenerate() const { return kNonDegenerate != fStage; }
190 enum {
191 kInitial,
192 kPoint,
193 kLine,
194 kNonDegenerate
195 } fStage;
196 SkPoint fFirstPoint;
197 SkVector fLineNormal;
198 SkScalar fLineC;
199 };
200
201 static const SkScalar kClose = (SK_Scalar1 / 16);
202 static const SkScalar kCloseSqd = kClose * kClose;
203
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)204 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
205 switch (data->fStage) {
206 case DegenerateTestData::kInitial:
207 data->fFirstPoint = pt;
208 data->fStage = DegenerateTestData::kPoint;
209 break;
210 case DegenerateTestData::kPoint:
211 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
212 data->fLineNormal = pt - data->fFirstPoint;
213 data->fLineNormal.normalize();
214 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
215 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
216 data->fStage = DegenerateTestData::kLine;
217 }
218 break;
219 case DegenerateTestData::kLine:
220 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
221 data->fStage = DegenerateTestData::kNonDegenerate;
222 }
223 break;
224 case DegenerateTestData::kNonDegenerate:
225 break;
226 default:
227 SK_ABORT("Unexpected degenerate test stage.");
228 }
229 }
230
get_direction(const SkPath & path,const SkMatrix & m,SkPathFirstDirection * dir)231 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
232 // At this point, we've already returned true from canDraw(), which checked that the path's
233 // direction could be determined, so this should just be fetching the cached direction.
234 // However, if perspective is involved, we're operating on a transformed path, which may no
235 // longer have a computable direction.
236 *dir = SkPathPriv::ComputeFirstDirection(path);
237 if (*dir == SkPathFirstDirection::kUnknown) {
238 return false;
239 }
240
241 // check whether m reverses the orientation
242 SkASSERT(!m.hasPerspective());
243 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
244 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
245 if (det2x2 < 0) {
246 *dir = SkPathPriv::OppositeFirstDirection(*dir);
247 }
248
249 return true;
250 }
251
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)252 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
253 segments->push_back();
254 segments->back().fType = Segment::kLine;
255 segments->back().fPts[0] = pt;
256 }
257
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)258 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
259 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
260 if (pts[0] != pts[2]) {
261 add_line_to_segment(pts[2], segments);
262 }
263 } else {
264 segments->push_back();
265 segments->back().fType = Segment::kQuad;
266 segments->back().fPts[0] = pts[1];
267 segments->back().fPts[1] = pts[2];
268 }
269 }
270
add_cubic_segments(const SkPoint pts[4],SkPathFirstDirection dir,SegmentArray * segments)271 inline void add_cubic_segments(const SkPoint pts[4],
272 SkPathFirstDirection dir,
273 SegmentArray* segments) {
274 SkSTArray<15, SkPoint, true> quads;
275 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
276 int count = quads.size();
277 for (int q = 0; q < count; q += 3) {
278 add_quad_segment(&quads[q], segments);
279 }
280 }
281
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)282 bool get_segments(const SkPath& path,
283 const SkMatrix& m,
284 SegmentArray* segments,
285 SkPoint* fanPt,
286 int* vCount,
287 int* iCount) {
288 SkPath::Iter iter(path, true);
289 // This renderer over-emphasizes very thin path regions. We use the distance
290 // to the path from the sample to compute coverage. Every pixel intersected
291 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
292 // notice that the sample may be close to a very thin area of the path and
293 // thus should be very light. This is particularly egregious for degenerate
294 // line paths. We detect paths that are very close to a line (zero area) and
295 // draw nothing.
296 DegenerateTestData degenerateData;
297 SkPathFirstDirection dir;
298 if (!get_direction(path, m, &dir)) {
299 return false;
300 }
301
302 for (;;) {
303 SkPoint pts[4];
304 SkPath::Verb verb = iter.next(pts);
305 switch (verb) {
306 case SkPath::kMove_Verb:
307 m.mapPoints(pts, 1);
308 update_degenerate_test(°enerateData, pts[0]);
309 break;
310 case SkPath::kLine_Verb: {
311 if (!SkPathPriv::AllPointsEq(pts, 2)) {
312 m.mapPoints(&pts[1], 1);
313 update_degenerate_test(°enerateData, pts[1]);
314 add_line_to_segment(pts[1], segments);
315 }
316 break;
317 }
318 case SkPath::kQuad_Verb:
319 if (!SkPathPriv::AllPointsEq(pts, 3)) {
320 m.mapPoints(pts, 3);
321 update_degenerate_test(°enerateData, pts[1]);
322 update_degenerate_test(°enerateData, pts[2]);
323 add_quad_segment(pts, segments);
324 }
325 break;
326 case SkPath::kConic_Verb: {
327 if (!SkPathPriv::AllPointsEq(pts, 3)) {
328 m.mapPoints(pts, 3);
329 SkScalar weight = iter.conicWeight();
330 SkAutoConicToQuads converter;
331 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
332 for (int i = 0; i < converter.countQuads(); ++i) {
333 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
334 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
335 add_quad_segment(quadPts + 2*i, segments);
336 }
337 }
338 break;
339 }
340 case SkPath::kCubic_Verb: {
341 if (!SkPathPriv::AllPointsEq(pts, 4)) {
342 m.mapPoints(pts, 4);
343 update_degenerate_test(°enerateData, pts[1]);
344 update_degenerate_test(°enerateData, pts[2]);
345 update_degenerate_test(°enerateData, pts[3]);
346 add_cubic_segments(pts, dir, segments);
347 }
348 break;
349 }
350 case SkPath::kDone_Verb:
351 if (degenerateData.isDegenerate()) {
352 return false;
353 } else {
354 return compute_vectors(segments, fanPt, dir, vCount, iCount);
355 }
356 default:
357 break;
358 }
359 }
360 }
361
362 struct Draw {
Drawskgpu::v1::__anon7fae288f0111::Draw363 Draw() : fVertexCnt(0), fIndexCnt(0) {}
364 int fVertexCnt;
365 int fIndexCnt;
366 };
367
368 typedef SkTArray<Draw, true> DrawArray;
369
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,const VertexColor & color,DrawArray * draws,VertexWriter & verts,uint16_t * idxs,size_t vertexStride)370 void create_vertices(const SegmentArray& segments,
371 const SkPoint& fanPt,
372 const VertexColor& color,
373 DrawArray* draws,
374 VertexWriter& verts,
375 uint16_t* idxs,
376 size_t vertexStride) {
377 Draw* draw = &draws->push_back();
378 // alias just to make vert/index assignments easier to read.
379 int* v = &draw->fVertexCnt;
380 int* i = &draw->fIndexCnt;
381
382 int count = segments.size();
383 for (int a = 0; a < count; ++a) {
384 const Segment& sega = segments[a];
385 int b = (a + 1) % count;
386 const Segment& segb = segments[b];
387
388 // Check whether adding the verts for this segment to the current draw would cause index
389 // values to overflow.
390 int vCount = 4;
391 if (Segment::kLine == segb.fType) {
392 vCount += 5;
393 } else {
394 vCount += 6;
395 }
396 if (draw->fVertexCnt + vCount > (1 << 16)) {
397 idxs += *i;
398 draw = &draws->push_back();
399 v = &draw->fVertexCnt;
400 i = &draw->fIndexCnt;
401 }
402
403 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
404
405 // FIXME: These tris are inset in the 1 unit arc around the corner
406 SkPoint p0 = sega.endPt();
407 // Position, Color, UV, D0, D1
408 verts << p0 << color << SkPoint{0, 0} << negOneDists;
409 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
410 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
411 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
412
413 idxs[*i + 0] = *v + 0;
414 idxs[*i + 1] = *v + 2;
415 idxs[*i + 2] = *v + 1;
416 idxs[*i + 3] = *v + 0;
417 idxs[*i + 4] = *v + 3;
418 idxs[*i + 5] = *v + 2;
419
420 *v += 4;
421 *i += 6;
422
423 if (Segment::kLine == segb.fType) {
424 // we draw the line edge as a degenerate quad (u is 0, v is the
425 // signed distance to the edge)
426 SkPoint v1Pos = sega.endPt();
427 SkPoint v2Pos = segb.fPts[0];
428 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
429
430 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
431 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
432 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
433 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
434 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
435
436 idxs[*i + 0] = *v + 3;
437 idxs[*i + 1] = *v + 1;
438 idxs[*i + 2] = *v + 2;
439
440 idxs[*i + 3] = *v + 4;
441 idxs[*i + 4] = *v + 3;
442 idxs[*i + 5] = *v + 2;
443
444 *i += 6;
445
446 // Draw the interior fan if it exists.
447 // TODO: Detect and combine colinear segments. This will ensure we catch every case
448 // with no interior, and that the resulting shared edge uses the same endpoints.
449 if (count >= 3) {
450 idxs[*i + 0] = *v + 0;
451 idxs[*i + 1] = *v + 2;
452 idxs[*i + 2] = *v + 1;
453
454 *i += 3;
455 }
456
457 *v += 5;
458 } else {
459 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
460
461 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
462 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
463
464 // We must transform the positions into UV in cpu memory and then copy them to the gpu
465 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
466 // will cause us to read from the GPU buffer which can be very slow.
467 struct PosAndUV {
468 SkPoint fPos;
469 SkPoint fUV;
470 };
471 PosAndUV posAndUVPoints[6];
472 posAndUVPoints[0].fPos = fanPt;
473 posAndUVPoints[1].fPos = qpts[0];
474 posAndUVPoints[2].fPos = qpts[2];
475 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
476 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
477 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
478 midVec.normalize();
479 posAndUVPoints[5].fPos = qpts[1] + midVec;
480
481 GrPathUtils::QuadUVMatrix toUV(qpts);
482 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
483
484 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
485 << (-segb.fNorms[0].dot(fanPt) + c0)
486 << (-segb.fNorms[1].dot(fanPt) + c1);
487
488 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
489 << 0.0f
490 << (-segb.fNorms[1].dot(qpts[0]) + c1);
491
492 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
493 << (-segb.fNorms[0].dot(qpts[2]) + c0)
494 << 0.0f;
495 // We need a negative value that is very large that it won't effect results if it is
496 // interpolated with. However, the value can't be too large of a negative that it
497 // effects numerical precision on less powerful GPUs.
498 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
499 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
500 << kStableLargeNegativeValue
501 << kStableLargeNegativeValue;
502
503 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
504 << kStableLargeNegativeValue
505 << kStableLargeNegativeValue;
506
507 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
508 << kStableLargeNegativeValue
509 << kStableLargeNegativeValue;
510
511 idxs[*i + 0] = *v + 3;
512 idxs[*i + 1] = *v + 1;
513 idxs[*i + 2] = *v + 2;
514 idxs[*i + 3] = *v + 4;
515 idxs[*i + 4] = *v + 3;
516 idxs[*i + 5] = *v + 2;
517
518 idxs[*i + 6] = *v + 5;
519 idxs[*i + 7] = *v + 3;
520 idxs[*i + 8] = *v + 4;
521
522 *i += 9;
523
524 // Draw the interior fan if it exists.
525 // TODO: Detect and combine colinear segments. This will ensure we catch every case
526 // with no interior, and that the resulting shared edge uses the same endpoints.
527 if (count >= 3) {
528 idxs[*i + 0] = *v + 0;
529 idxs[*i + 1] = *v + 2;
530 idxs[*i + 2] = *v + 1;
531
532 *i += 3;
533 }
534
535 *v += 6;
536 }
537 }
538 }
539
540 ///////////////////////////////////////////////////////////////////////////////
541
542 /*
543 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
544 * two components of the vertex attribute. Coverage is based on signed
545 * distance with negative being inside, positive outside. The edge is specified in
546 * window space (y-down). If either the third or fourth component of the interpolated
547 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
548 * attempt to trim to a portion of the infinite quad.
549 * Requires shader derivative instruction support.
550 */
551
552 class QuadEdgeEffect : public GrGeometryProcessor {
553 public:
Make(SkArenaAlloc * arena,const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)554 static GrGeometryProcessor* Make(SkArenaAlloc* arena,
555 const SkMatrix& localMatrix,
556 bool usesLocalCoords,
557 bool wideColor) {
558 return arena->make([&](void* ptr) {
559 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
560 });
561 }
562
~QuadEdgeEffect()563 ~QuadEdgeEffect() override {}
564
name() const565 const char* name() const override { return "QuadEdge"; }
566
addToKey(const GrShaderCaps & caps,KeyBuilder * b) const567 void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
568 b->addBool(fUsesLocalCoords, "usesLocalCoords");
569 b->addBits(ProgramImpl::kMatrixKeyBits,
570 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
571 "localMatrixType");
572 }
573
574 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
575
576 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)577 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
578 : INHERITED(kQuadEdgeEffect_ClassID)
579 , fLocalMatrix(localMatrix)
580 , fUsesLocalCoords(usesLocalCoords) {
581 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, SkSLType::kFloat2};
582 fInColor = MakeColorAttribute("inColor", wideColor);
583 // GL on iOS 14 needs more precision for the quadedge attributes
584 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, SkSLType::kFloat4};
585 this->setVertexAttributesWithImplicitOffsets(&fInPosition, 3);
586 }
587
588 Attribute fInPosition;
589 Attribute fInColor;
590 Attribute fInQuadEdge;
591
592 SkMatrix fLocalMatrix;
593 bool fUsesLocalCoords;
594
595 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
596
597 using INHERITED = GrGeometryProcessor;
598 };
599
makeProgramImpl(const GrShaderCaps &) const600 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
601 const GrShaderCaps&) const {
602 class Impl : public ProgramImpl {
603 public:
604 void setData(const GrGLSLProgramDataManager& pdman,
605 const GrShaderCaps& shaderCaps,
606 const GrGeometryProcessor& geomProc) override {
607 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
608 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
609 }
610
611 private:
612 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
613 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
614 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
615 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
616 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
617 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
618
619 // emit attributes
620 varyingHandler->emitAttributes(qe);
621
622 // GL on iOS 14 needs more precision for the quadedge attributes
623 // We might as well enable it everywhere
624 GrGLSLVarying v(SkSLType::kFloat4);
625 varyingHandler->addVarying("QuadEdge", &v);
626 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
627
628 // Setup pass through color
629 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
630 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
631
632 // Setup position
633 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
634 if (qe.fUsesLocalCoords) {
635 WriteLocalCoord(vertBuilder,
636 uniformHandler,
637 *args.fShaderCaps,
638 gpArgs,
639 qe.fInPosition.asShaderVar(),
640 qe.fLocalMatrix,
641 &fLocalMatrixUniform);
642 }
643
644 fragBuilder->codeAppendf("half edgeAlpha;");
645
646 // keep the derivative instructions outside the conditional
647 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
648 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
649 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
650 // today we know z and w are in device space. We could use derivatives
651 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
652 v.fsIn());
653 fragBuilder->codeAppendf ("} else {");
654 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
655 " half(2.0*%s.x*duvdy.x - duvdy.y));",
656 v.fsIn(), v.fsIn());
657 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
658 v.fsIn());
659 fragBuilder->codeAppendf("edgeAlpha = "
660 "saturate(0.5 - edgeAlpha / length(gF));}");
661
662 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
663 }
664
665 private:
666 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
667
668 UniformHandle fLocalMatrixUniform;
669 };
670
671 return std::make_unique<Impl>();
672 }
673
GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)674 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect)
675
676 #if GR_TEST_UTILS
677 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
678 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
679 bool usesLocalCoords = d->fRandom->nextBool();
680 bool wideColor = d->fRandom->nextBool();
681 // Doesn't work without derivative instructions.
682 return d->caps()->shaderCaps()->fShaderDerivativeSupport
683 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
684 : nullptr;
685 }
686 #endif
687
688 class AAConvexPathOp final : public GrMeshDrawOp {
689 private:
690 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
691
692 public:
693 DEFINE_OP_CLASS_ID
694
Make(GrRecordingContext * context,GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)695 static GrOp::Owner Make(GrRecordingContext* context,
696 GrPaint&& paint,
697 const SkMatrix& viewMatrix,
698 const SkPath& path,
699 const GrUserStencilSettings* stencilSettings) {
700 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
701 stencilSettings);
702 }
703
AAConvexPathOp(GrProcessorSet * processorSet,const SkPMColor4f & color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)704 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
705 const SkMatrix& viewMatrix, const SkPath& path,
706 const GrUserStencilSettings* stencilSettings)
707 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
708 fPaths.emplace_back(PathData{viewMatrix, path, color});
709 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
710 IsHairline::kNo);
711 }
712
name() const713 const char* name() const override { return "AAConvexPathOp"; }
714
visitProxies(const GrVisitProxyFunc & func) const715 void visitProxies(const GrVisitProxyFunc& func) const override {
716 if (fProgramInfo) {
717 fProgramInfo->visitFPProxies(func);
718 } else {
719 fHelper.visitProxies(func);
720 }
721 }
722
fixedFunctionFlags() const723 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
724
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)725 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
726 GrClampType clampType) override {
727 return fHelper.finalizeProcessors(
728 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
729 &fPaths.back().fColor, &fWideColor);
730 }
731
732 private:
programInfo()733 GrProgramInfo* programInfo() override { return fProgramInfo; }
734
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)735 void onCreateProgramInfo(const GrCaps* caps,
736 SkArenaAlloc* arena,
737 const GrSurfaceProxyView& writeView,
738 bool usesMSAASurface,
739 GrAppliedClip&& appliedClip,
740 const GrDstProxyView& dstProxyView,
741 GrXferBarrierFlags renderPassXferBarriers,
742 GrLoadOp colorLoadOp) override {
743 SkMatrix invert;
744 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
745 return;
746 }
747
748 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
749 fHelper.usesLocalCoords(),
750 fWideColor);
751
752 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
753 std::move(appliedClip),
754 dstProxyView, quadProcessor,
755 GrPrimitiveType::kTriangles,
756 renderPassXferBarriers, colorLoadOp);
757 }
758
onPrepareDraws(GrMeshDrawTarget * target)759 void onPrepareDraws(GrMeshDrawTarget* target) override {
760 int instanceCount = fPaths.size();
761
762 if (!fProgramInfo) {
763 this->createProgramInfo(target);
764 if (!fProgramInfo) {
765 return;
766 }
767 }
768
769 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
770
771 fDraws.reserve(instanceCount);
772
773 // TODO generate all segments for all paths and use one vertex buffer
774 for (int i = 0; i < instanceCount; i++) {
775 const PathData& args = fPaths[i];
776
777 // We use the fact that SkPath::transform path does subdivision based on
778 // perspective. Otherwise, we apply the view matrix when copying to the
779 // segment representation.
780 const SkMatrix* viewMatrix = &args.fViewMatrix;
781
782 // We avoid initializing the path unless we have to
783 const SkPath* pathPtr = &args.fPath;
784 SkTLazy<SkPath> tmpPath;
785 if (viewMatrix->hasPerspective()) {
786 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
787 tmpPathPtr->setIsVolatile(true);
788 tmpPathPtr->transform(*viewMatrix);
789 viewMatrix = &SkMatrix::I();
790 pathPtr = tmpPathPtr;
791 }
792
793 int vertexCount;
794 int indexCount;
795 enum {
796 kPreallocSegmentCnt = 512 / sizeof(Segment),
797 kPreallocDrawCnt = 4,
798 };
799 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
800 SkPoint fanPt;
801
802 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
803 &indexCount)) {
804 continue;
805 }
806
807 sk_sp<const GrBuffer> vertexBuffer;
808 int firstVertex;
809
810 VertexWriter verts = target->makeVertexWriter(kVertexStride,
811 vertexCount,
812 &vertexBuffer,
813 &firstVertex);
814
815 if (!verts) {
816 SkDebugf("Could not allocate vertices\n");
817 return;
818 }
819
820 sk_sp<const GrBuffer> indexBuffer;
821 int firstIndex;
822
823 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
824 if (!idxs) {
825 SkDebugf("Could not allocate indices\n");
826 return;
827 }
828
829 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
830 VertexColor color(args.fColor, fWideColor);
831 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
832
833 GrSimpleMesh* meshes = target->allocMeshes(draws.size());
834 for (int j = 0; j < draws.size(); ++j) {
835 const Draw& draw = draws[j];
836 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
837 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
838 firstVertex);
839 firstIndex += draw.fIndexCnt;
840 firstVertex += draw.fVertexCnt;
841 }
842
843 fDraws.push_back({ meshes, draws.size() });
844 }
845 }
846
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)847 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
848 if (!fProgramInfo || fDraws.empty()) {
849 return;
850 }
851
852 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
853 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
854 for (int i = 0; i < fDraws.size(); ++i) {
855 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
856 flushState->drawMesh(fDraws[i].fMeshes[j]);
857 }
858 }
859 }
860
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)861 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
862 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
863 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
864 return CombineResult::kCannotCombine;
865 }
866 if (fHelper.usesLocalCoords() &&
867 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
868 return CombineResult::kCannotCombine;
869 }
870
871 fPaths.push_back_n(that->fPaths.size(), that->fPaths.begin());
872 fWideColor |= that->fWideColor;
873 return CombineResult::kMerged;
874 }
875
876 #if GR_TEST_UTILS
onDumpInfo() const877 SkString onDumpInfo() const override {
878 return SkStringPrintf("Count: %d\n%s", fPaths.size(), fHelper.dumpInfo().c_str());
879 }
880 #endif
881
882 struct PathData {
883 SkMatrix fViewMatrix;
884 SkPath fPath;
885 SkPMColor4f fColor;
886 };
887
888 Helper fHelper;
889 SkSTArray<1, PathData, true> fPaths;
890 bool fWideColor;
891
892 struct MeshDraw {
893 GrSimpleMesh* fMeshes;
894 int fMeshCount;
895 };
896
897 SkTDArray<MeshDraw> fDraws;
898 GrProgramInfo* fProgramInfo = nullptr;
899
900 using INHERITED = GrMeshDrawOp;
901 };
902
903 } // anonymous namespace
904
905 ///////////////////////////////////////////////////////////////////////////////
906
onCanDrawPath(const CanDrawPathArgs & args) const907 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
908 // This check requires convexity and known direction, since the direction is used to build
909 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
910 if (args.fCaps->shaderCaps()->fShaderDerivativeSupport &&
911 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
912 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
913 args.fShape->knownDirection()) {
914 return CanDrawPath::kYes;
915 }
916 return CanDrawPath::kNo;
917 }
918
onDrawPath(const DrawPathArgs & args)919 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
920 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
921 "AAConvexPathRenderer::onDrawPath");
922 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
923 SkASSERT(!args.fShape->isEmpty());
924
925 SkPath path;
926 args.fShape->asPath(&path);
927
928 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
929 *args.fViewMatrix,
930 path, args.fUserStencilSettings);
931 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
932 return true;
933 }
934
935 } // namespace skgpu::v1
936
937 #if GR_TEST_UTILS
938
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)939 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
940 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
941 const SkPath& path = GrTest::TestPathConvex(random);
942 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
943 return skgpu::v1::AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path,
944 stencilSettings);
945 }
946
947 #endif
948