1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkString.h"
9 #include "include/core/SkTypes.h"
10 #include "src/core/SkGeometry.h"
11 #include "src/core/SkPathPriv.h"
12 #include "src/core/SkPointPriv.h"
13 #include "src/gpu/GrAuditTrail.h"
14 #include "src/gpu/GrCaps.h"
15 #include "src/gpu/GrDrawOpTest.h"
16 #include "src/gpu/GrGeometryProcessor.h"
17 #include "src/gpu/GrProcessor.h"
18 #include "src/gpu/GrRenderTargetContext.h"
19 #include "src/gpu/GrVertexWriter.h"
20 #include "src/gpu/geometry/GrPathUtils.h"
21 #include "src/gpu/geometry/GrShape.h"
22 #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
23 #include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
24 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
25 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
26 #include "src/gpu/glsl/GrGLSLVarying.h"
27 #include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
28 #include "src/gpu/ops/GrAAConvexPathRenderer.h"
29 #include "src/gpu/ops/GrMeshDrawOp.h"
30 #include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
31
GrAAConvexPathRenderer()32 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
33 }
34
35 struct Segment {
36 enum {
37 // These enum values are assumed in member functions below.
38 kLine = 0,
39 kQuad = 1,
40 } fType;
41
42 // line uses one pt, quad uses 2 pts
43 SkPoint fPts[2];
44 // normal to edge ending at each pt
45 SkVector fNorms[2];
46 // is the corner where the previous segment meets this segment
47 // sharp. If so, fMid is a normalized bisector facing outward.
48 SkVector fMid;
49
countPointsSegment50 int countPoints() {
51 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
52 return fType + 1;
53 }
endPtSegment54 const SkPoint& endPt() const {
55 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
56 return fPts[fType];
57 }
endNormSegment58 const SkPoint& endNorm() const {
59 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
60 return fNorms[fType];
61 }
62 };
63
64 typedef SkTArray<Segment, true> SegmentArray;
65
center_of_mass(const SegmentArray & segments,SkPoint * c)66 static bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
67 SkScalar area = 0;
68 SkPoint center = {0, 0};
69 int count = segments.count();
70 SkPoint p0 = {0, 0};
71 if (count > 2) {
72 // We translate the polygon so that the first point is at the origin.
73 // This avoids some precision issues with small area polygons far away
74 // from the origin.
75 p0 = segments[0].endPt();
76 SkPoint pi;
77 SkPoint pj;
78 // the first and last iteration of the below loop would compute
79 // zeros since the starting / ending point is (0,0). So instead we start
80 // at i=1 and make the last iteration i=count-2.
81 pj = segments[1].endPt() - p0;
82 for (int i = 1; i < count - 1; ++i) {
83 pi = pj;
84 pj = segments[i + 1].endPt() - p0;
85
86 SkScalar t = SkPoint::CrossProduct(pi, pj);
87 area += t;
88 center.fX += (pi.fX + pj.fX) * t;
89 center.fY += (pi.fY + pj.fY) * t;
90 }
91 }
92
93 // If the poly has no area then we instead return the average of
94 // its points.
95 if (SkScalarNearlyZero(area)) {
96 SkPoint avg;
97 avg.set(0, 0);
98 for (int i = 0; i < count; ++i) {
99 const SkPoint& pt = segments[i].endPt();
100 avg.fX += pt.fX;
101 avg.fY += pt.fY;
102 }
103 SkScalar denom = SK_Scalar1 / count;
104 avg.scale(denom);
105 *c = avg;
106 } else {
107 area *= 3;
108 area = SkScalarInvert(area);
109 center.scale(area);
110 // undo the translate of p0 to the origin.
111 *c = center + p0;
112 }
113 return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
114 }
115
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathPriv::FirstDirection dir,int * vCount,int * iCount)116 static bool compute_vectors(SegmentArray* segments,
117 SkPoint* fanPt,
118 SkPathPriv::FirstDirection dir,
119 int* vCount,
120 int* iCount) {
121 if (!center_of_mass(*segments, fanPt)) {
122 return false;
123 }
124 int count = segments->count();
125
126 // Make the normals point towards the outside
127 SkPointPriv::Side normSide;
128 if (dir == SkPathPriv::kCCW_FirstDirection) {
129 normSide = SkPointPriv::kRight_Side;
130 } else {
131 normSide = SkPointPriv::kLeft_Side;
132 }
133
134 int64_t vCount64 = 0;
135 int64_t iCount64 = 0;
136 // compute normals at all points
137 for (int a = 0; a < count; ++a) {
138 Segment& sega = (*segments)[a];
139 int b = (a + 1) % count;
140 Segment& segb = (*segments)[b];
141
142 const SkPoint* prevPt = &sega.endPt();
143 int n = segb.countPoints();
144 for (int p = 0; p < n; ++p) {
145 segb.fNorms[p] = segb.fPts[p] - *prevPt;
146 segb.fNorms[p].normalize();
147 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
148 prevPt = &segb.fPts[p];
149 }
150 if (Segment::kLine == segb.fType) {
151 vCount64 += 5;
152 iCount64 += 9;
153 } else {
154 vCount64 += 6;
155 iCount64 += 12;
156 }
157 }
158
159 // compute mid-vectors where segments meet. TODO: Detect shallow corners
160 // and leave out the wedges and close gaps by stitching segments together.
161 for (int a = 0; a < count; ++a) {
162 const Segment& sega = (*segments)[a];
163 int b = (a + 1) % count;
164 Segment& segb = (*segments)[b];
165 segb.fMid = segb.fNorms[0] + sega.endNorm();
166 segb.fMid.normalize();
167 // corner wedges
168 vCount64 += 4;
169 iCount64 += 6;
170 }
171 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
172 return false;
173 }
174 *vCount = vCount64;
175 *iCount = iCount64;
176 return true;
177 }
178
179 struct DegenerateTestData {
DegenerateTestDataDegenerateTestData180 DegenerateTestData() { fStage = kInitial; }
isDegenerateDegenerateTestData181 bool isDegenerate() const { return kNonDegenerate != fStage; }
182 enum {
183 kInitial,
184 kPoint,
185 kLine,
186 kNonDegenerate
187 } fStage;
188 SkPoint fFirstPoint;
189 SkVector fLineNormal;
190 SkScalar fLineC;
191 };
192
193 static const SkScalar kClose = (SK_Scalar1 / 16);
194 static const SkScalar kCloseSqd = kClose * kClose;
195
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)196 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
197 switch (data->fStage) {
198 case DegenerateTestData::kInitial:
199 data->fFirstPoint = pt;
200 data->fStage = DegenerateTestData::kPoint;
201 break;
202 case DegenerateTestData::kPoint:
203 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
204 data->fLineNormal = pt - data->fFirstPoint;
205 data->fLineNormal.normalize();
206 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
207 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
208 data->fStage = DegenerateTestData::kLine;
209 }
210 break;
211 case DegenerateTestData::kLine:
212 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
213 data->fStage = DegenerateTestData::kNonDegenerate;
214 }
215 case DegenerateTestData::kNonDegenerate:
216 break;
217 default:
218 SK_ABORT("Unexpected degenerate test stage.");
219 }
220 }
221
get_direction(const SkPath & path,const SkMatrix & m,SkPathPriv::FirstDirection * dir)222 static inline bool get_direction(const SkPath& path, const SkMatrix& m,
223 SkPathPriv::FirstDirection* dir) {
224 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
225 return false;
226 }
227 // check whether m reverses the orientation
228 SkASSERT(!m.hasPerspective());
229 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
230 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
231 if (det2x2 < 0) {
232 *dir = SkPathPriv::OppositeFirstDirection(*dir);
233 }
234 return true;
235 }
236
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)237 static inline void add_line_to_segment(const SkPoint& pt,
238 SegmentArray* segments) {
239 segments->push_back();
240 segments->back().fType = Segment::kLine;
241 segments->back().fPts[0] = pt;
242 }
243
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)244 static inline void add_quad_segment(const SkPoint pts[3],
245 SegmentArray* segments) {
246 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
247 if (pts[0] != pts[2]) {
248 add_line_to_segment(pts[2], segments);
249 }
250 } else {
251 segments->push_back();
252 segments->back().fType = Segment::kQuad;
253 segments->back().fPts[0] = pts[1];
254 segments->back().fPts[1] = pts[2];
255 }
256 }
257
add_cubic_segments(const SkPoint pts[4],SkPathPriv::FirstDirection dir,SegmentArray * segments)258 static inline void add_cubic_segments(const SkPoint pts[4],
259 SkPathPriv::FirstDirection dir,
260 SegmentArray* segments) {
261 SkSTArray<15, SkPoint, true> quads;
262 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
263 int count = quads.count();
264 for (int q = 0; q < count; q += 3) {
265 add_quad_segment(&quads[q], segments);
266 }
267 }
268
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)269 static bool get_segments(const SkPath& path,
270 const SkMatrix& m,
271 SegmentArray* segments,
272 SkPoint* fanPt,
273 int* vCount,
274 int* iCount) {
275 SkPath::Iter iter(path, true);
276 // This renderer over-emphasizes very thin path regions. We use the distance
277 // to the path from the sample to compute coverage. Every pixel intersected
278 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
279 // notice that the sample may be close to a very thin area of the path and
280 // thus should be very light. This is particularly egregious for degenerate
281 // line paths. We detect paths that are very close to a line (zero area) and
282 // draw nothing.
283 DegenerateTestData degenerateData;
284 SkPathPriv::FirstDirection dir;
285 // get_direction can fail for some degenerate paths.
286 if (!get_direction(path, m, &dir)) {
287 return false;
288 }
289
290 for (;;) {
291 SkPoint pts[4];
292 SkPath::Verb verb = iter.next(pts);
293 switch (verb) {
294 case SkPath::kMove_Verb:
295 m.mapPoints(pts, 1);
296 update_degenerate_test(°enerateData, pts[0]);
297 break;
298 case SkPath::kLine_Verb: {
299 if (!SkPathPriv::AllPointsEq(pts, 2)) {
300 m.mapPoints(&pts[1], 1);
301 update_degenerate_test(°enerateData, pts[1]);
302 add_line_to_segment(pts[1], segments);
303 }
304 break;
305 }
306 case SkPath::kQuad_Verb:
307 if (!SkPathPriv::AllPointsEq(pts, 3)) {
308 m.mapPoints(pts, 3);
309 update_degenerate_test(°enerateData, pts[1]);
310 update_degenerate_test(°enerateData, pts[2]);
311 add_quad_segment(pts, segments);
312 }
313 break;
314 case SkPath::kConic_Verb: {
315 if (!SkPathPriv::AllPointsEq(pts, 3)) {
316 m.mapPoints(pts, 3);
317 SkScalar weight = iter.conicWeight();
318 SkAutoConicToQuads converter;
319 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
320 for (int i = 0; i < converter.countQuads(); ++i) {
321 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
322 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
323 add_quad_segment(quadPts + 2*i, segments);
324 }
325 }
326 break;
327 }
328 case SkPath::kCubic_Verb: {
329 if (!SkPathPriv::AllPointsEq(pts, 4)) {
330 m.mapPoints(pts, 4);
331 update_degenerate_test(°enerateData, pts[1]);
332 update_degenerate_test(°enerateData, pts[2]);
333 update_degenerate_test(°enerateData, pts[3]);
334 add_cubic_segments(pts, dir, segments);
335 }
336 break;
337 }
338 case SkPath::kDone_Verb:
339 if (degenerateData.isDegenerate()) {
340 return false;
341 } else {
342 return compute_vectors(segments, fanPt, dir, vCount, iCount);
343 }
344 default:
345 break;
346 }
347 }
348 }
349
350 struct Draw {
DrawDraw351 Draw() : fVertexCnt(0), fIndexCnt(0) {}
352 int fVertexCnt;
353 int fIndexCnt;
354 };
355
356 typedef SkTArray<Draw, true> DrawArray;
357
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,const GrVertexColor & color,DrawArray * draws,GrVertexWriter & verts,uint16_t * idxs,size_t vertexStride)358 static void create_vertices(const SegmentArray& segments,
359 const SkPoint& fanPt,
360 const GrVertexColor& color,
361 DrawArray* draws,
362 GrVertexWriter& verts,
363 uint16_t* idxs,
364 size_t vertexStride) {
365 Draw* draw = &draws->push_back();
366 // alias just to make vert/index assignments easier to read.
367 int* v = &draw->fVertexCnt;
368 int* i = &draw->fIndexCnt;
369 const size_t uvOffset = sizeof(SkPoint) + color.size();
370
371 int count = segments.count();
372 for (int a = 0; a < count; ++a) {
373 const Segment& sega = segments[a];
374 int b = (a + 1) % count;
375 const Segment& segb = segments[b];
376
377 // Check whether adding the verts for this segment to the current draw would cause index
378 // values to overflow.
379 int vCount = 4;
380 if (Segment::kLine == segb.fType) {
381 vCount += 5;
382 } else {
383 vCount += 6;
384 }
385 if (draw->fVertexCnt + vCount > (1 << 16)) {
386 idxs += *i;
387 draw = &draws->push_back();
388 v = &draw->fVertexCnt;
389 i = &draw->fIndexCnt;
390 }
391
392 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
393
394 // FIXME: These tris are inset in the 1 unit arc around the corner
395 SkPoint p0 = sega.endPt();
396 // Position, Color, UV, D0, D1
397 verts.write(p0, color, SkPoint{0, 0}, negOneDists);
398 verts.write(p0 + sega.endNorm(), color, SkPoint{0, -SK_Scalar1}, negOneDists);
399 verts.write(p0 + segb.fMid, color, SkPoint{0, -SK_Scalar1}, negOneDists);
400 verts.write(p0 + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
401
402 idxs[*i + 0] = *v + 0;
403 idxs[*i + 1] = *v + 2;
404 idxs[*i + 2] = *v + 1;
405 idxs[*i + 3] = *v + 0;
406 idxs[*i + 4] = *v + 3;
407 idxs[*i + 5] = *v + 2;
408
409 *v += 4;
410 *i += 6;
411
412 if (Segment::kLine == segb.fType) {
413 // we draw the line edge as a degenerate quad (u is 0, v is the
414 // signed distance to the edge)
415 SkPoint v1Pos = sega.endPt();
416 SkPoint v2Pos = segb.fPts[0];
417 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
418
419 verts.write(fanPt, color, SkPoint{0, dist}, negOneDists);
420 verts.write(v1Pos, color, SkPoint{0, 0}, negOneDists);
421 verts.write(v2Pos, color, SkPoint{0, 0}, negOneDists);
422 verts.write(v1Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
423 verts.write(v2Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists);
424
425 idxs[*i + 0] = *v + 3;
426 idxs[*i + 1] = *v + 1;
427 idxs[*i + 2] = *v + 2;
428
429 idxs[*i + 3] = *v + 4;
430 idxs[*i + 4] = *v + 3;
431 idxs[*i + 5] = *v + 2;
432
433 *i += 6;
434
435 // Draw the interior fan if it exists.
436 // TODO: Detect and combine colinear segments. This will ensure we catch every case
437 // with no interior, and that the resulting shared edge uses the same endpoints.
438 if (count >= 3) {
439 idxs[*i + 0] = *v + 0;
440 idxs[*i + 1] = *v + 2;
441 idxs[*i + 2] = *v + 1;
442
443 *i += 3;
444 }
445
446 *v += 5;
447 } else {
448 void* quadVertsBegin = verts.fPtr;
449
450 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
451
452 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
453 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
454 GrVertexWriter::Skip<SkPoint> skipUVs;
455
456 verts.write(fanPt,
457 color, skipUVs,
458 -segb.fNorms[0].dot(fanPt) + c0,
459 -segb.fNorms[1].dot(fanPt) + c1);
460
461 verts.write(qpts[0],
462 color, skipUVs,
463 0.0f,
464 -segb.fNorms[1].dot(qpts[0]) + c1);
465
466 verts.write(qpts[2],
467 color, skipUVs,
468 -segb.fNorms[0].dot(qpts[2]) + c0,
469 0.0f);
470
471 verts.write(qpts[0] + segb.fNorms[0],
472 color, skipUVs,
473 -SK_ScalarMax/100,
474 -SK_ScalarMax/100);
475
476 verts.write(qpts[2] + segb.fNorms[1],
477 color, skipUVs,
478 -SK_ScalarMax/100,
479 -SK_ScalarMax/100);
480
481 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
482 midVec.normalize();
483
484 verts.write(qpts[1] + midVec,
485 color, skipUVs,
486 -SK_ScalarMax/100,
487 -SK_ScalarMax/100);
488
489 GrPathUtils::QuadUVMatrix toUV(qpts);
490 toUV.apply(quadVertsBegin, 6, vertexStride, uvOffset);
491
492 idxs[*i + 0] = *v + 3;
493 idxs[*i + 1] = *v + 1;
494 idxs[*i + 2] = *v + 2;
495 idxs[*i + 3] = *v + 4;
496 idxs[*i + 4] = *v + 3;
497 idxs[*i + 5] = *v + 2;
498
499 idxs[*i + 6] = *v + 5;
500 idxs[*i + 7] = *v + 3;
501 idxs[*i + 8] = *v + 4;
502
503 *i += 9;
504
505 // Draw the interior fan if it exists.
506 // TODO: Detect and combine colinear segments. This will ensure we catch every case
507 // with no interior, and that the resulting shared edge uses the same endpoints.
508 if (count >= 3) {
509 idxs[*i + 0] = *v + 0;
510 idxs[*i + 1] = *v + 2;
511 idxs[*i + 2] = *v + 1;
512
513 *i += 3;
514 }
515
516 *v += 6;
517 }
518 }
519 }
520
521 ///////////////////////////////////////////////////////////////////////////////
522
523 /*
524 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
525 * two components of the vertex attribute. Coverage is based on signed
526 * distance with negative being inside, positive outside. The edge is specified in
527 * window space (y-down). If either the third or fourth component of the interpolated
528 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
529 * attempt to trim to a portion of the infinite quad.
530 * Requires shader derivative instruction support.
531 */
532
533 class QuadEdgeEffect : public GrGeometryProcessor {
534 public:
Make(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)535 static sk_sp<GrGeometryProcessor> Make(const SkMatrix& localMatrix, bool usesLocalCoords,
536 bool wideColor) {
537 return sk_sp<GrGeometryProcessor>(
538 new QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor));
539 }
540
~QuadEdgeEffect()541 ~QuadEdgeEffect() override {}
542
name() const543 const char* name() const override { return "QuadEdge"; }
544
545 class GLSLProcessor : public GrGLSLGeometryProcessor {
546 public:
GLSLProcessor()547 GLSLProcessor() {}
548
onEmitCode(EmitArgs & args,GrGPArgs * gpArgs)549 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
550 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
551 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
552 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
553 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
554
555 // emit attributes
556 varyingHandler->emitAttributes(qe);
557
558 GrGLSLVarying v(kHalf4_GrSLType);
559 varyingHandler->addVarying("QuadEdge", &v);
560 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
561
562 // Setup pass through color
563 varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor);
564
565 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
566
567 // Setup position
568 this->writeOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
569
570 // emit transforms
571 this->emitTransforms(vertBuilder,
572 varyingHandler,
573 uniformHandler,
574 qe.fInPosition.asShaderVar(),
575 qe.fLocalMatrix,
576 args.fFPCoordTransformHandler);
577
578 fragBuilder->codeAppendf("half edgeAlpha;");
579
580 // keep the derivative instructions outside the conditional
581 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
582 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
583 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
584 // today we know z and w are in device space. We could use derivatives
585 fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
586 v.fsIn());
587 fragBuilder->codeAppendf ("} else {");
588 fragBuilder->codeAppendf("half2 gF = half2(2.0*%s.x*duvdx.x - duvdx.y,"
589 " 2.0*%s.x*duvdy.x - duvdy.y);",
590 v.fsIn(), v.fsIn());
591 fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
592 v.fsIn());
593 fragBuilder->codeAppendf("edgeAlpha = "
594 "saturate(0.5 - edgeAlpha / length(gF));}");
595
596 fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage);
597 }
598
GenKey(const GrGeometryProcessor & gp,const GrShaderCaps &,GrProcessorKeyBuilder * b)599 static inline void GenKey(const GrGeometryProcessor& gp,
600 const GrShaderCaps&,
601 GrProcessorKeyBuilder* b) {
602 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
603 b->add32(SkToBool(qee.fUsesLocalCoords && qee.fLocalMatrix.hasPerspective()));
604 }
605
setData(const GrGLSLProgramDataManager & pdman,const GrPrimitiveProcessor & gp,FPCoordTransformIter && transformIter)606 void setData(const GrGLSLProgramDataManager& pdman,
607 const GrPrimitiveProcessor& gp,
608 FPCoordTransformIter&& transformIter) override {
609 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
610 this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter);
611 }
612
613 private:
614 typedef GrGLSLGeometryProcessor INHERITED;
615 };
616
getGLSLProcessorKey(const GrShaderCaps & caps,GrProcessorKeyBuilder * b) const617 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
618 GLSLProcessor::GenKey(*this, caps, b);
619 }
620
createGLSLInstance(const GrShaderCaps &) const621 GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
622 return new GLSLProcessor();
623 }
624
625 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords,bool wideColor)626 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
627 : INHERITED(kQuadEdgeEffect_ClassID)
628 , fLocalMatrix(localMatrix)
629 , fUsesLocalCoords(usesLocalCoords) {
630 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
631 fInColor = MakeColorAttribute("inColor", wideColor);
632 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, kHalf4_GrSLType};
633 this->setVertexAttributes(&fInPosition, 3);
634 }
635
636 Attribute fInPosition;
637 Attribute fInColor;
638 Attribute fInQuadEdge;
639
640 SkMatrix fLocalMatrix;
641 bool fUsesLocalCoords;
642
643 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
644
645 typedef GrGeometryProcessor INHERITED;
646 };
647
648 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
649
650 #if GR_TEST_UTILS
TestCreate(GrProcessorTestData * d)651 sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
652 // Doesn't work without derivative instructions.
653 return d->caps()->shaderCaps()->shaderDerivativeSupport()
654 ? QuadEdgeEffect::Make(GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool(),
655 d->fRandom->nextBool())
656 : nullptr;
657 }
658 #endif
659
660 ///////////////////////////////////////////////////////////////////////////////
661
662 GrPathRenderer::CanDrawPath
onCanDrawPath(const CanDrawPathArgs & args) const663 GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
664 if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
665 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
666 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex()) {
667 return CanDrawPath::kYes;
668 }
669 return CanDrawPath::kNo;
670 }
671
672 namespace {
673
674 class AAConvexPathOp final : public GrMeshDrawOp {
675 private:
676 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
677
678 public:
679 DEFINE_OP_CLASS_ID
680
Make(GrRecordingContext * context,GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)681 static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
682 GrPaint&& paint,
683 const SkMatrix& viewMatrix,
684 const SkPath& path,
685 const GrUserStencilSettings* stencilSettings) {
686 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
687 stencilSettings);
688 }
689
AAConvexPathOp(const Helper::MakeArgs & helperArgs,const SkPMColor4f & color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)690 AAConvexPathOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color,
691 const SkMatrix& viewMatrix, const SkPath& path,
692 const GrUserStencilSettings* stencilSettings)
693 : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) {
694 fPaths.emplace_back(PathData{viewMatrix, path, color});
695 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
696 }
697
name() const698 const char* name() const override { return "AAConvexPathOp"; }
699
visitProxies(const VisitProxyFunc & func) const700 void visitProxies(const VisitProxyFunc& func) const override {
701 fHelper.visitProxies(func);
702 }
703
704 #ifdef SK_DEBUG
dumpInfo() const705 SkString dumpInfo() const override {
706 SkString string;
707 string.appendf("Count: %d\n", fPaths.count());
708 string += fHelper.dumpInfo();
709 string += INHERITED::dumpInfo();
710 return string;
711 }
712 #endif
713
fixedFunctionFlags() const714 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
715
finalize(const GrCaps & caps,const GrAppliedClip * clip,bool hasMixedSampledCoverage,GrClampType clampType)716 GrProcessorSet::Analysis finalize(
717 const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
718 GrClampType clampType) override {
719 return fHelper.finalizeProcessors(
720 caps, clip, hasMixedSampledCoverage, clampType,
721 GrProcessorAnalysisCoverage::kSingleChannel, &fPaths.back().fColor, &fWideColor);
722 }
723
724 private:
onPrepareDraws(Target * target)725 void onPrepareDraws(Target* target) override {
726 int instanceCount = fPaths.count();
727
728 SkMatrix invert;
729 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
730 return;
731 }
732
733 // Setup GrGeometryProcessor
734 sk_sp<GrGeometryProcessor> quadProcessor(
735 QuadEdgeEffect::Make(invert, fHelper.usesLocalCoords(), fWideColor));
736 const size_t kVertexStride = quadProcessor->vertexStride();
737
738 // TODO generate all segments for all paths and use one vertex buffer
739 for (int i = 0; i < instanceCount; i++) {
740 const PathData& args = fPaths[i];
741
742 // We use the fact that SkPath::transform path does subdivision based on
743 // perspective. Otherwise, we apply the view matrix when copying to the
744 // segment representation.
745 const SkMatrix* viewMatrix = &args.fViewMatrix;
746
747 // We avoid initializing the path unless we have to
748 const SkPath* pathPtr = &args.fPath;
749 SkTLazy<SkPath> tmpPath;
750 if (viewMatrix->hasPerspective()) {
751 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
752 tmpPathPtr->setIsVolatile(true);
753 tmpPathPtr->transform(*viewMatrix);
754 viewMatrix = &SkMatrix::I();
755 pathPtr = tmpPathPtr;
756 }
757
758 int vertexCount;
759 int indexCount;
760 enum {
761 kPreallocSegmentCnt = 512 / sizeof(Segment),
762 kPreallocDrawCnt = 4,
763 };
764 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
765 SkPoint fanPt;
766
767 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
768 &indexCount)) {
769 continue;
770 }
771
772 sk_sp<const GrBuffer> vertexBuffer;
773 int firstVertex;
774
775 GrVertexWriter verts{target->makeVertexSpace(kVertexStride, vertexCount,
776 &vertexBuffer, &firstVertex)};
777
778 if (!verts.fPtr) {
779 SkDebugf("Could not allocate vertices\n");
780 return;
781 }
782
783 sk_sp<const GrBuffer> indexBuffer;
784 int firstIndex;
785
786 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
787 if (!idxs) {
788 SkDebugf("Could not allocate indices\n");
789 return;
790 }
791
792 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
793 GrVertexColor color(args.fColor, fWideColor);
794 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
795
796 GrMesh* meshes = target->allocMeshes(draws.count());
797 for (int j = 0; j < draws.count(); ++j) {
798 const Draw& draw = draws[j];
799 meshes[j].setPrimitiveType(GrPrimitiveType::kTriangles);
800 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
801 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo);
802 meshes[j].setVertexData(vertexBuffer, firstVertex);
803 firstIndex += draw.fIndexCnt;
804 firstVertex += draw.fVertexCnt;
805 }
806 target->recordDraw(quadProcessor, meshes, draws.count());
807 }
808 }
809
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)810 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
811 fHelper.executeDrawsAndUploads(this, flushState, chainBounds);
812 }
813
onCombineIfPossible(GrOp * t,const GrCaps & caps)814 CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
815 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
816 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
817 return CombineResult::kCannotCombine;
818 }
819 if (fHelper.usesLocalCoords() &&
820 !fPaths[0].fViewMatrix.cheapEqualTo(that->fPaths[0].fViewMatrix)) {
821 return CombineResult::kCannotCombine;
822 }
823
824 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
825 fWideColor |= that->fWideColor;
826 return CombineResult::kMerged;
827 }
828
829 struct PathData {
830 SkMatrix fViewMatrix;
831 SkPath fPath;
832 SkPMColor4f fColor;
833 };
834
835 Helper fHelper;
836 SkSTArray<1, PathData, true> fPaths;
837 bool fWideColor;
838
839 typedef GrMeshDrawOp INHERITED;
840 };
841
842 } // anonymous namespace
843
onDrawPath(const DrawPathArgs & args)844 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
845 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
846 "GrAAConvexPathRenderer::onDrawPath");
847 SkASSERT(args.fRenderTargetContext->numSamples() <= 1);
848 SkASSERT(!args.fShape->isEmpty());
849
850 SkPath path;
851 args.fShape->asPath(&path);
852
853 std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
854 *args.fViewMatrix,
855 path, args.fUserStencilSettings);
856 args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
857 return true;
858 }
859
860 ///////////////////////////////////////////////////////////////////////////////////////////////////
861
862 #if GR_TEST_UTILS
863
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)864 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
865 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
866 SkPath path = GrTest::TestPathConvex(random);
867 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
868 return AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path, stencilSettings);
869 }
870
871 #endif
872