1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ops/AAConvexPathRenderer.h"
9
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/GrAuditTrail.h"
18 #include "src/gpu/GrCaps.h"
19 #include "src/gpu/GrDrawOpTest.h"
20 #include "src/gpu/GrGeometryProcessor.h"
21 #include "src/gpu/GrProcessor.h"
22 #include "src/gpu/GrProgramInfo.h"
23 #include "src/gpu/geometry/GrPathUtils.h"
24 #include "src/gpu/geometry/GrStyledShape.h"
25 #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
26 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
27 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
28 #include "src/gpu/glsl/GrGLSLVarying.h"
29 #include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
30 #include "src/gpu/ops/GrMeshDrawOp.h"
31 #include "src/gpu/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
32 #include "src/gpu/v1/SurfaceDrawContext_v1.h"
33
34 namespace skgpu::v1 {
35
36 namespace {
37
38 struct Segment {
39 enum {
40 // These enum values are assumed in member functions below.
41 kLine = 0,
42 kQuad = 1,
43 } fType;
44
45 // line uses one pt, quad uses 2 pts
46 SkPoint fPts[2];
47 // normal to edge ending at each pt
48 SkVector fNorms[2];
49 // is the corner where the previous segment meets this segment
50 // sharp. If so, fMid is a normalized bisector facing outward.
51 SkVector fMid;
52
countPointsskgpu::v1::__anondb05a0d70111::Segment53 int countPoints() {
54 static_assert(0 == kLine && 1 == kQuad);
55 return fType + 1;
56 }
endPtskgpu::v1::__anondb05a0d70111::Segment57 const SkPoint& endPt() const {
58 static_assert(0 == kLine && 1 == kQuad);
59 return fPts[fType];
60 }
endNormskgpu::v1::__anondb05a0d70111::Segment61 const SkPoint& endNorm() const {
62 static_assert(0 == kLine && 1 == kQuad);
63 return fNorms[fType];
64 }
65 };
66
67 typedef SkTArray<Segment, true> SegmentArray;
68
center_of_mass(const SegmentArray & segments,SkPoint * c)69 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
70 SkScalar area = 0;
71 SkPoint center = {0, 0};
72 int count = segments.count();
73 SkPoint p0 = {0, 0};
74 if (count > 2) {
75 // We translate the polygon so that the first point is at the origin.
76 // This avoids some precision issues with small area polygons far away
77 // from the origin.
78 p0 = segments[0].endPt();
79 SkPoint pi;
80 SkPoint pj;
81 // the first and last iteration of the below loop would compute
82 // zeros since the starting / ending point is (0,0). So instead we start
83 // at i=1 and make the last iteration i=count-2.
84 pj = segments[1].endPt() - p0;
85 for (int i = 1; i < count - 1; ++i) {
86 pi = pj;
87 pj = segments[i + 1].endPt() - p0;
88
89 SkScalar t = SkPoint::CrossProduct(pi, pj);
90 area += t;
91 center.fX += (pi.fX + pj.fX) * t;
92 center.fY += (pi.fY + pj.fY) * t;
93 }
94 }
95
96 // If the poly has no area then we instead return the average of
97 // its points.
98 if (SkScalarNearlyZero(area)) {
99 SkPoint avg;
100 avg.set(0, 0);
101 for (int i = 0; i < count; ++i) {
102 const SkPoint& pt = segments[i].endPt();
103 avg.fX += pt.fX;
104 avg.fY += pt.fY;
105 }
106 SkScalar denom = SK_Scalar1 / count;
107 avg.scale(denom);
108 *c = avg;
109 } else {
110 area *= 3;
111 area = SkScalarInvert(area);
112 center.scale(area);
113 // undo the translate of p0 to the origin.
114 *c = center + p0;
115 }
116 return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
117 }
118
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathFirstDirection dir,int * vCount,int * iCount)119 bool compute_vectors(SegmentArray* segments,
120 SkPoint* fanPt,
121 SkPathFirstDirection dir,
122 int* vCount,
123 int* iCount) {
124 if (!center_of_mass(*segments, fanPt)) {
125 return false;
126 }
127 int count = segments->count();
128
129 // Make the normals point towards the outside
130 SkPointPriv::Side normSide;
131 if (dir == SkPathFirstDirection::kCCW) {
132 normSide = SkPointPriv::kRight_Side;
133 } else {
134 normSide = SkPointPriv::kLeft_Side;
135 }
136
137 int64_t vCount64 = 0;
138 int64_t iCount64 = 0;
139 // compute normals at all points
140 for (int a = 0; a < count; ++a) {
141 Segment& sega = (*segments)[a];
142 int b = (a + 1) % count;
143 Segment& segb = (*segments)[b];
144
145 const SkPoint* prevPt = &sega.endPt();
146 int n = segb.countPoints();
147 for (int p = 0; p < n; ++p) {
148 segb.fNorms[p] = segb.fPts[p] - *prevPt;
149 segb.fNorms[p].normalize();
150 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
151 prevPt = &segb.fPts[p];
152 }
153 if (Segment::kLine == segb.fType) {
154 vCount64 += 5;
155 iCount64 += 9;
156 } else {
157 vCount64 += 6;
158 iCount64 += 12;
159 }
160 }
161
162 // compute mid-vectors where segments meet. TODO: Detect shallow corners
163 // and leave out the wedges and close gaps by stitching segments together.
164 for (int a = 0; a < count; ++a) {
165 const Segment& sega = (*segments)[a];
166 int b = (a + 1) % count;
167 Segment& segb = (*segments)[b];
168 segb.fMid = segb.fNorms[0] + sega.endNorm();
169 segb.fMid.normalize();
170 // corner wedges
171 vCount64 += 4;
172 iCount64 += 6;
173 }
174 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
175 return false;
176 }
177 *vCount = vCount64;
178 *iCount = iCount64;
179 return true;
180 }
181
182 struct DegenerateTestData {
DegenerateTestDataskgpu::v1::__anondb05a0d70111::DegenerateTestData183 DegenerateTestData() { fStage = kInitial; }
isDegenerateskgpu::v1::__anondb05a0d70111::DegenerateTestData184 bool isDegenerate() const { return kNonDegenerate != fStage; }
185 enum {
186 kInitial,
187 kPoint,
188 kLine,
189 kNonDegenerate
190 } fStage;
191 SkPoint fFirstPoint;
192 SkVector fLineNormal;
193 SkScalar fLineC;
194 };
195
196 static const SkScalar kClose = (SK_Scalar1 / 16);
197 static const SkScalar kCloseSqd = kClose * kClose;
198
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)199 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
200 switch (data->fStage) {
201 case DegenerateTestData::kInitial:
202 data->fFirstPoint = pt;
203 data->fStage = DegenerateTestData::kPoint;
204 break;
205 case DegenerateTestData::kPoint:
206 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
207 data->fLineNormal = pt - data->fFirstPoint;
208 data->fLineNormal.normalize();
209 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
210 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
211 data->fStage = DegenerateTestData::kLine;
212 }
213 break;
214 case DegenerateTestData::kLine:
215 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
216 data->fStage = DegenerateTestData::kNonDegenerate;
217 }
218 break;
219 case DegenerateTestData::kNonDegenerate:
220 break;
221 default:
222 SK_ABORT("Unexpected degenerate test stage.");
223 }
224 }
225
get_direction(const SkPath & path,const SkMatrix & m,SkPathFirstDirection * dir)226 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
227 // At this point, we've already returned true from canDraw(), which checked that the path's
228 // direction could be determined, so this should just be fetching the cached direction.
229 // However, if perspective is involved, we're operating on a transformed path, which may no
230 // longer have a computable direction.
231 *dir = SkPathPriv::ComputeFirstDirection(path);
232 if (*dir == SkPathFirstDirection::kUnknown) {
233 return false;
234 }
235
236 // check whether m reverses the orientation
237 SkASSERT(!m.hasPerspective());
238 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
239 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
240 if (det2x2 < 0) {
241 *dir = SkPathPriv::OppositeFirstDirection(*dir);
242 }
243
244 return true;
245 }
246
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)247 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
248 segments->push_back();
249 segments->back().fType = Segment::kLine;
250 segments->back().fPts[0] = pt;
251 }
252
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)253 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
254 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
255 if (pts[0] != pts[2]) {
256 add_line_to_segment(pts[2], segments);
257 }
258 } else {
259 segments->push_back();
260 segments->back().fType = Segment::kQuad;
261 segments->back().fPts[0] = pts[1];
262 segments->back().fPts[1] = pts[2];
263 }
264 }
265
add_cubic_segments(const SkPoint pts[4],SkPathFirstDirection dir,SegmentArray * segments)266 inline void add_cubic_segments(const SkPoint pts[4],
267 SkPathFirstDirection dir,
268 SegmentArray* segments) {
269 SkSTArray<15, SkPoint, true> quads;
270 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
271 int count = quads.count();
272 for (int q = 0; q < count; q += 3) {
273 add_quad_segment(&quads[q], segments);
274 }
275 }
276
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)277 bool get_segments(const SkPath& path,
278 const SkMatrix& m,
279 SegmentArray* segments,
280 SkPoint* fanPt,
281 int* vCount,
282 int* iCount) {
283 SkPath::Iter iter(path, true);
284 // This renderer over-emphasizes very thin path regions. We use the distance
285 // to the path from the sample to compute coverage. Every pixel intersected
286 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
287 // notice that the sample may be close to a very thin area of the path and
288 // thus should be very light. This is particularly egregious for degenerate
289 // line paths. We detect paths that are very close to a line (zero area) and
290 // draw nothing.
291 DegenerateTestData degenerateData;
292 SkPathFirstDirection dir;
293 if (!get_direction(path, m, &dir)) {
294 return false;
295 }
296
297 for (;;) {
298 SkPoint pts[4];
299 SkPath::Verb verb = iter.next(pts);
300 switch (verb) {
301 case SkPath::kMove_Verb:
302 m.mapPoints(pts, 1);
303 update_degenerate_test(°enerateData, pts[0]);
304 break;
305 case SkPath::kLine_Verb: {
306 if (!SkPathPriv::AllPointsEq(pts, 2)) {
307 m.mapPoints(&pts[1], 1);
308 update_degenerate_test(°enerateData, pts[1]);
309 add_line_to_segment(pts[1], segments);
310 }
311 break;
312 }
313 case SkPath::kQuad_Verb:
314 if (!SkPathPriv::AllPointsEq(pts, 3)) {
315 m.mapPoints(pts, 3);
316 update_degenerate_test(°enerateData, pts[1]);
317 update_degenerate_test(°enerateData, pts[2]);
318 add_quad_segment(pts, segments);
319 }
320 break;
321 case SkPath::kConic_Verb: {
322 if (!SkPathPriv::AllPointsEq(pts, 3)) {
323 m.mapPoints(pts, 3);
324 SkScalar weight = iter.conicWeight();
325 SkAutoConicToQuads converter;
326 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
327 for (int i = 0; i < converter.countQuads(); ++i) {
328 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
329 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
330 add_quad_segment(quadPts + 2*i, segments);
331 }
332 }
333 break;
334 }
335 case SkPath::kCubic_Verb: {
336 if (!SkPathPriv::AllPointsEq(pts, 4)) {
337 m.mapPoints(pts, 4);
338 update_degenerate_test(°enerateData, pts[1]);
339 update_degenerate_test(°enerateData, pts[2]);
340 update_degenerate_test(°enerateData, pts[3]);
341 add_cubic_segments(pts, dir, segments);
342 }
343 break;
344 }
345 case SkPath::kDone_Verb:
346 if (degenerateData.isDegenerate()) {
347 return false;
348 } else {
349 return compute_vectors(segments, fanPt, dir, vCount, iCount);
350 }
351 default:
352 break;
353 }
354 }
355 }
356
357 struct Draw {
Drawskgpu::v1::__anondb05a0d70111::Draw358 Draw() : fVertexCnt(0), fIndexCnt(0) {}
359 int fVertexCnt;
360 int fIndexCnt;
361 };
362
363 typedef SkTArray<Draw, true> DrawArray;
364
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,const GrVertexColor & color,DrawArray * draws,VertexWriter & verts,uint16_t * idxs,size_t vertexStride)365 void create_vertices(const SegmentArray& segments,
366 const SkPoint& fanPt,
367 const GrVertexColor& color,
368 DrawArray* draws,
369 VertexWriter& verts,
370 uint16_t* idxs,
371 size_t vertexStride) {
372 Draw* draw = &draws->push_back();
373 // alias just to make vert/index assignments easier to read.
374 int* v = &draw->fVertexCnt;
375 int* i = &draw->fIndexCnt;
376
377 int count = segments.count();
378 for (int a = 0; a < count; ++a) {
379 const Segment& sega = segments[a];
380 int b = (a + 1) % count;
381 const Segment& segb = segments[b];
382
383 // Check whether adding the verts for this segment to the current draw would cause index
384 // values to overflow.
385 int vCount = 4;
386 if (Segment::kLine == segb.fType) {
387 vCount += 5;
388 } else {
389 vCount += 6;
390 }
391 if (draw->fVertexCnt + vCount > (1 << 16)) {
392 idxs += *i;
393 draw = &draws->push_back();
394 v = &draw->fVertexCnt;
395 i = &draw->fIndexCnt;
396 }
397
398 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
399
400 // FIXME: These tris are inset in the 1 unit arc around the corner
401 SkPoint p0 = sega.endPt();
402 // Position, Color, UV, D0, D1
403 verts << p0 << color << SkPoint{0, 0} << negOneDists;
404 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
405 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
406 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
407
408 idxs[*i + 0] = *v + 0;
409 idxs[*i + 1] = *v + 2;
410 idxs[*i + 2] = *v + 1;
411 idxs[*i + 3] = *v + 0;
412 idxs[*i + 4] = *v + 3;
413 idxs[*i + 5] = *v + 2;
414
415 *v += 4;
416 *i += 6;
417
418 if (Segment::kLine == segb.fType) {
419 // we draw the line edge as a degenerate quad (u is 0, v is the
420 // signed distance to the edge)
421 SkPoint v1Pos = sega.endPt();
422 SkPoint v2Pos = segb.fPts[0];
423 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
424
425 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
426 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
427 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
428 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
429 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
430
431 idxs[*i + 0] = *v + 3;
432 idxs[*i + 1] = *v + 1;
433 idxs[*i + 2] = *v + 2;
434
435 idxs[*i + 3] = *v + 4;
436 idxs[*i + 4] = *v + 3;
437 idxs[*i + 5] = *v + 2;
438
439 *i += 6;
440
441 // Draw the interior fan if it exists.
442 // TODO: Detect and combine colinear segments. This will ensure we catch every case
443 // with no interior, and that the resulting shared edge uses the same endpoints.
444 if (count >= 3) {
445 idxs[*i + 0] = *v + 0;
446 idxs[*i + 1] = *v + 2;
447 idxs[*i + 2] = *v + 1;
448
449 *i += 3;
450 }
451
452 *v += 5;
453 } else {
454 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
455
456 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
457 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
458
459 // We must transform the positions into UV in cpu memory and then copy them to the gpu
460 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
461 // will cause us to read from the GPU buffer which can be very slow.
462 struct PosAndUV {
463 SkPoint fPos;
464 SkPoint fUV;
465 };
466 PosAndUV posAndUVPoints[6];
467 posAndUVPoints[0].fPos = fanPt;
468 posAndUVPoints[1].fPos = qpts[0];
469 posAndUVPoints[2].fPos = qpts[2];
470 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
471 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
472 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
473 midVec.normalize();
474 posAndUVPoints[5].fPos = qpts[1] + midVec;
475
476 GrPathUtils::QuadUVMatrix toUV(qpts);
477 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
478
479 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
480 << (-segb.fNorms[0].dot(fanPt) + c0)
481 << (-segb.fNorms[1].dot(fanPt) + c1);
482
483 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
484 << 0.0f
485 << (-segb.fNorms[1].dot(qpts[0]) + c1);
486
487 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
488 << (-segb.fNorms[0].dot(qpts[2]) + c0)
489 << 0.0f;
490 // We need a negative value that is very large that it won't effect results if it is
491 // interpolated with. However, the value can't be too large of a negative that it
492 // effects numerical precision on less powerful GPUs.
493 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
494 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
495 << kStableLargeNegativeValue
496 << kStableLargeNegativeValue;
497
498 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
499 << kStableLargeNegativeValue
500 << kStableLargeNegativeValue;
501
502 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
503 << kStableLargeNegativeValue
504 << kStableLargeNegativeValue;
505
506 idxs[*i + 0] = *v + 3;
507 idxs[*i + 1] = *v + 1;
508 idxs[*i + 2] = *v + 2;
509 idxs[*i + 3] = *v + 4;
510 idxs[*i + 4] = *v + 3;
511 idxs[*i + 5] = *v + 2;
512
513 idxs[*i + 6] = *v + 5;
514 idxs[*i + 7] = *v + 3;
515 idxs[*i + 8] = *v + 4;
516
517 *i += 9;
518
519 // Draw the interior fan if it exists.
520 // TODO: Detect and combine colinear segments. This will ensure we catch every case
521 // with no interior, and that the resulting shared edge uses the same endpoints.
522 if (count >= 3) {
523 idxs[*i + 0] = *v + 0;
524 idxs[*i + 1] = *v + 2;
525 idxs[*i + 2] = *v + 1;
526
527 *i += 3;
528 }
529
530 *v += 6;
531 }
532 }
533 }
534
535 ///////////////////////////////////////////////////////////////////////////////
536
537 /*
538 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
539 * two components of the vertex attribute. Coverage is based on signed
540 * distance with negative being inside, positive outside. The edge is specified in
541 * window space (y-down). If either the third or fourth component of the interpolated
542 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
543 * attempt to trim to a portion of the infinite quad.
544 * Requires shader derivative instruction support.
545 */
546
547 class QuadEdgeEffect : public GrGeometryProcessor {
548 public:
Make(SkArenaAlloc * arena,const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)549 static GrGeometryProcessor* Make(SkArenaAlloc* arena,
550 const SkMatrix& localMatrix,
551 bool usesLocalCoords,
552 bool wideColor) {
553 return arena->make([&](void* ptr) {
554 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
555 });
556 }
557
~QuadEdgeEffect()558 ~QuadEdgeEffect() override {}
559
name() const560 const char* name() const override { return "QuadEdge"; }
561
addToKey(const GrShaderCaps & caps,GrProcessorKeyBuilder * b) const562 void addToKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
563 b->addBool(fUsesLocalCoords, "usesLocalCoords");
564 b->addBits(ProgramImpl::kMatrixKeyBits,
565 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
566 "localMatrixType");
567 }
568
569 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
570
571 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)572 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
573 : INHERITED(kQuadEdgeEffect_ClassID)
574 , fLocalMatrix(localMatrix)
575 , fUsesLocalCoords(usesLocalCoords) {
576 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
577 fInColor = MakeColorAttribute("inColor", wideColor);
578 // GL on iOS 14 needs more precision for the quadedge attributes
579 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
580 this->setVertexAttributes(&fInPosition, 3);
581 }
582
583 Attribute fInPosition;
584 Attribute fInColor;
585 Attribute fInQuadEdge;
586
587 SkMatrix fLocalMatrix;
588 bool fUsesLocalCoords;
589
590 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
591
592 using INHERITED = GrGeometryProcessor;
593 };
594
makeProgramImpl(const GrShaderCaps &) const595 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
596 const GrShaderCaps&) const {
597 class Impl : public ProgramImpl {
598 public:
599 void setData(const GrGLSLProgramDataManager& pdman,
600 const GrShaderCaps& shaderCaps,
601 const GrGeometryProcessor& geomProc) override {
602 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
603 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
604 }
605
606 private:
607 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
608 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
609 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
610 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
611 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
612 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
613
614 // emit attributes
615 varyingHandler->emitAttributes(qe);
616
617 // GL on iOS 14 needs more precision for the quadedge attributes
618 // We might as well enable it everywhere
619 GrGLSLVarying v(kFloat4_GrSLType);
620 varyingHandler->addVarying("QuadEdge", &v);
621 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
622
623 // Setup pass through color
624 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
625 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
626
627 // Setup position
628 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
629 if (qe.fUsesLocalCoords) {
630 WriteLocalCoord(vertBuilder,
631 uniformHandler,
632 *args.fShaderCaps,
633 gpArgs,
634 qe.fInPosition.asShaderVar(),
635 qe.fLocalMatrix,
636 &fLocalMatrixUniform);
637 }
638
639 fragBuilder->codeAppendf("half edgeAlpha;");
640
641 // keep the derivative instructions outside the conditional
642 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
643 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
644 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
645 // today we know z and w are in device space. We could use derivatives
646 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
647 v.fsIn());
648 fragBuilder->codeAppendf ("} else {");
649 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
650 " half(2.0*%s.x*duvdy.x - duvdy.y));",
651 v.fsIn(), v.fsIn());
652 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
653 v.fsIn());
654 fragBuilder->codeAppendf("edgeAlpha = "
655 "saturate(0.5 - edgeAlpha / length(gF));}");
656
657 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
658 }
659
660 private:
661 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
662
663 UniformHandle fLocalMatrixUniform;
664 };
665
666 return std::make_unique<Impl>();
667 }
668
669 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
670
671 #if GR_TEST_UTILS
TestCreate(GrProcessorTestData * d)672 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
673 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
674 bool usesLocalCoords = d->fRandom->nextBool();
675 bool wideColor = d->fRandom->nextBool();
676 // Doesn't work without derivative instructions.
677 return d->caps()->shaderCaps()->shaderDerivativeSupport()
678 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
679 : nullptr;
680 }
681 #endif
682
683 class AAConvexPathOp final : public GrMeshDrawOp {
684 private:
685 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
686
687 public:
688 DEFINE_OP_CLASS_ID
689
Make(GrRecordingContext * context,GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)690 static GrOp::Owner Make(GrRecordingContext* context,
691 GrPaint&& paint,
692 const SkMatrix& viewMatrix,
693 const SkPath& path,
694 const GrUserStencilSettings* stencilSettings) {
695 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
696 stencilSettings);
697 }
698
AAConvexPathOp(GrProcessorSet * processorSet,const SkPMColor4f & color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)699 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
700 const SkMatrix& viewMatrix, const SkPath& path,
701 const GrUserStencilSettings* stencilSettings)
702 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
703 fPaths.emplace_back(PathData{viewMatrix, path, color});
704 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
705 IsHairline::kNo);
706 }
707
name() const708 const char* name() const override { return "AAConvexPathOp"; }
709
visitProxies(const GrVisitProxyFunc & func) const710 void visitProxies(const GrVisitProxyFunc& func) const override {
711 if (fProgramInfo) {
712 fProgramInfo->visitFPProxies(func);
713 } else {
714 fHelper.visitProxies(func);
715 }
716 }
717
fixedFunctionFlags() const718 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
719
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)720 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
721 GrClampType clampType) override {
722 return fHelper.finalizeProcessors(
723 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
724 &fPaths.back().fColor, &fWideColor);
725 }
726
727 private:
programInfo()728 GrProgramInfo* programInfo() override { return fProgramInfo; }
729
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)730 void onCreateProgramInfo(const GrCaps* caps,
731 SkArenaAlloc* arena,
732 const GrSurfaceProxyView& writeView,
733 bool usesMSAASurface,
734 GrAppliedClip&& appliedClip,
735 const GrDstProxyView& dstProxyView,
736 GrXferBarrierFlags renderPassXferBarriers,
737 GrLoadOp colorLoadOp) override {
738 SkMatrix invert;
739 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
740 return;
741 }
742
743 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
744 fHelper.usesLocalCoords(),
745 fWideColor);
746
747 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
748 std::move(appliedClip),
749 dstProxyView, quadProcessor,
750 GrPrimitiveType::kTriangles,
751 renderPassXferBarriers, colorLoadOp);
752 }
753
onPrepareDraws(GrMeshDrawTarget * target)754 void onPrepareDraws(GrMeshDrawTarget* target) override {
755 int instanceCount = fPaths.count();
756
757 if (!fProgramInfo) {
758 this->createProgramInfo(target);
759 if (!fProgramInfo) {
760 return;
761 }
762 }
763
764 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
765
766 fDraws.reserve(instanceCount);
767
768 // TODO generate all segments for all paths and use one vertex buffer
769 for (int i = 0; i < instanceCount; i++) {
770 const PathData& args = fPaths[i];
771
772 // We use the fact that SkPath::transform path does subdivision based on
773 // perspective. Otherwise, we apply the view matrix when copying to the
774 // segment representation.
775 const SkMatrix* viewMatrix = &args.fViewMatrix;
776
777 // We avoid initializing the path unless we have to
778 const SkPath* pathPtr = &args.fPath;
779 SkTLazy<SkPath> tmpPath;
780 if (viewMatrix->hasPerspective()) {
781 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
782 tmpPathPtr->setIsVolatile(true);
783 tmpPathPtr->transform(*viewMatrix);
784 viewMatrix = &SkMatrix::I();
785 pathPtr = tmpPathPtr;
786 }
787
788 int vertexCount;
789 int indexCount;
790 enum {
791 kPreallocSegmentCnt = 512 / sizeof(Segment),
792 kPreallocDrawCnt = 4,
793 };
794 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
795 SkPoint fanPt;
796
797 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
798 &indexCount)) {
799 continue;
800 }
801
802 sk_sp<const GrBuffer> vertexBuffer;
803 int firstVertex;
804
805 VertexWriter verts{target->makeVertexSpace(kVertexStride,
806 vertexCount,
807 &vertexBuffer,
808 &firstVertex)};
809
810 if (!verts) {
811 SkDebugf("Could not allocate vertices\n");
812 return;
813 }
814
815 sk_sp<const GrBuffer> indexBuffer;
816 int firstIndex;
817
818 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
819 if (!idxs) {
820 SkDebugf("Could not allocate indices\n");
821 return;
822 }
823
824 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
825 GrVertexColor color(args.fColor, fWideColor);
826 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
827
828 GrSimpleMesh* meshes = target->allocMeshes(draws.count());
829 for (int j = 0; j < draws.count(); ++j) {
830 const Draw& draw = draws[j];
831 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
832 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
833 firstVertex);
834 firstIndex += draw.fIndexCnt;
835 firstVertex += draw.fVertexCnt;
836 }
837
838 fDraws.push_back({ meshes, draws.count() });
839 }
840 }
841
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)842 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
843 if (!fProgramInfo || fDraws.isEmpty()) {
844 return;
845 }
846
847 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
848 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
849 for (int i = 0; i < fDraws.count(); ++i) {
850 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
851 flushState->drawMesh(fDraws[i].fMeshes[j]);
852 }
853 }
854 }
855
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)856 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
857 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
858 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
859 return CombineResult::kCannotCombine;
860 }
861 if (fHelper.usesLocalCoords() &&
862 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
863 return CombineResult::kCannotCombine;
864 }
865
866 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
867 fWideColor |= that->fWideColor;
868 return CombineResult::kMerged;
869 }
870
871 #if GR_TEST_UTILS
onDumpInfo() const872 SkString onDumpInfo() const override {
873 return SkStringPrintf("Count: %d\n%s", fPaths.count(), fHelper.dumpInfo().c_str());
874 }
875 #endif
876
877 struct PathData {
878 SkMatrix fViewMatrix;
879 SkPath fPath;
880 SkPMColor4f fColor;
881 };
882
883 Helper fHelper;
884 SkSTArray<1, PathData, true> fPaths;
885 bool fWideColor;
886
887 struct MeshDraw {
888 GrSimpleMesh* fMeshes;
889 int fMeshCount;
890 };
891
892 SkTDArray<MeshDraw> fDraws;
893 GrProgramInfo* fProgramInfo = nullptr;
894
895 using INHERITED = GrMeshDrawOp;
896 };
897
898 } // anonymous namespace
899
900 ///////////////////////////////////////////////////////////////////////////////
901
onCanDrawPath(const CanDrawPathArgs & args) const902 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
903 // This check requires convexity and known direction, since the direction is used to build
904 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
905 if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
906 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
907 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
908 args.fShape->knownDirection()) {
909 return CanDrawPath::kYes;
910 }
911 return CanDrawPath::kNo;
912 }
913
onDrawPath(const DrawPathArgs & args)914 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
915 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
916 "AAConvexPathRenderer::onDrawPath");
917 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
918 SkASSERT(!args.fShape->isEmpty());
919
920 SkPath path;
921 args.fShape->asPath(&path);
922
923 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
924 *args.fViewMatrix,
925 path, args.fUserStencilSettings);
926 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
927 return true;
928 }
929
930 } // namespace skgpu::v1
931
932 #if GR_TEST_UTILS
933
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)934 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
935 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
936 const SkPath& path = GrTest::TestPathConvex(random);
937 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
938 return skgpu::v1::AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path,
939 stencilSettings);
940 }
941
942 #endif
943