1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrAAConvexPathRenderer.h"
9
10 #include "GrAAConvexTessellator.h"
11 #include "GrCaps.h"
12 #include "GrContext.h"
13 #include "GrDefaultGeoProcFactory.h"
14 #include "GrDrawOpTest.h"
15 #include "GrGeometryProcessor.h"
16 #include "GrOpFlushState.h"
17 #include "GrPathUtils.h"
18 #include "GrPipelineBuilder.h"
19 #include "GrProcessor.h"
20 #include "SkGeometry.h"
21 #include "SkPathPriv.h"
22 #include "SkString.h"
23 #include "SkTraceEvent.h"
24 #include "glsl/GrGLSLFragmentShaderBuilder.h"
25 #include "glsl/GrGLSLGeometryProcessor.h"
26 #include "glsl/GrGLSLProgramDataManager.h"
27 #include "glsl/GrGLSLUniformHandler.h"
28 #include "glsl/GrGLSLVarying.h"
29 #include "glsl/GrGLSLVertexShaderBuilder.h"
30 #include "ops/GrMeshDrawOp.h"
31
GrAAConvexPathRenderer()32 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
33 }
34
35 struct Segment {
36 enum {
37 // These enum values are assumed in member functions below.
38 kLine = 0,
39 kQuad = 1,
40 } fType;
41
42 // line uses one pt, quad uses 2 pts
43 SkPoint fPts[2];
44 // normal to edge ending at each pt
45 SkVector fNorms[2];
46 // is the corner where the previous segment meets this segment
47 // sharp. If so, fMid is a normalized bisector facing outward.
48 SkVector fMid;
49
countPointsSegment50 int countPoints() {
51 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
52 return fType + 1;
53 }
endPtSegment54 const SkPoint& endPt() const {
55 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
56 return fPts[fType];
57 }
endNormSegment58 const SkPoint& endNorm() const {
59 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
60 return fNorms[fType];
61 }
62 };
63
64 typedef SkTArray<Segment, true> SegmentArray;
65
center_of_mass(const SegmentArray & segments,SkPoint * c)66 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
67 SkScalar area = 0;
68 SkPoint center = {0, 0};
69 int count = segments.count();
70 SkPoint p0 = {0, 0};
71 if (count > 2) {
72 // We translate the polygon so that the first point is at the origin.
73 // This avoids some precision issues with small area polygons far away
74 // from the origin.
75 p0 = segments[0].endPt();
76 SkPoint pi;
77 SkPoint pj;
78 // the first and last iteration of the below loop would compute
79 // zeros since the starting / ending point is (0,0). So instead we start
80 // at i=1 and make the last iteration i=count-2.
81 pj = segments[1].endPt() - p0;
82 for (int i = 1; i < count - 1; ++i) {
83 pi = pj;
84 pj = segments[i + 1].endPt() - p0;
85
86 SkScalar t = SkPoint::CrossProduct(pi, pj);
87 area += t;
88 center.fX += (pi.fX + pj.fX) * t;
89 center.fY += (pi.fY + pj.fY) * t;
90 }
91 }
92
93 // If the poly has no area then we instead return the average of
94 // its points.
95 if (SkScalarNearlyZero(area)) {
96 SkPoint avg;
97 avg.set(0, 0);
98 for (int i = 0; i < count; ++i) {
99 const SkPoint& pt = segments[i].endPt();
100 avg.fX += pt.fX;
101 avg.fY += pt.fY;
102 }
103 SkScalar denom = SK_Scalar1 / count;
104 avg.scale(denom);
105 *c = avg;
106 } else {
107 area *= 3;
108 area = SkScalarInvert(area);
109 center.scale(area);
110 // undo the translate of p0 to the origin.
111 *c = center + p0;
112 }
113 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
114 }
115
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathPriv::FirstDirection dir,int * vCount,int * iCount)116 static void compute_vectors(SegmentArray* segments,
117 SkPoint* fanPt,
118 SkPathPriv::FirstDirection dir,
119 int* vCount,
120 int* iCount) {
121 center_of_mass(*segments, fanPt);
122 int count = segments->count();
123
124 // Make the normals point towards the outside
125 SkPoint::Side normSide;
126 if (dir == SkPathPriv::kCCW_FirstDirection) {
127 normSide = SkPoint::kRight_Side;
128 } else {
129 normSide = SkPoint::kLeft_Side;
130 }
131
132 *vCount = 0;
133 *iCount = 0;
134 // compute normals at all points
135 for (int a = 0; a < count; ++a) {
136 Segment& sega = (*segments)[a];
137 int b = (a + 1) % count;
138 Segment& segb = (*segments)[b];
139
140 const SkPoint* prevPt = &sega.endPt();
141 int n = segb.countPoints();
142 for (int p = 0; p < n; ++p) {
143 segb.fNorms[p] = segb.fPts[p] - *prevPt;
144 segb.fNorms[p].normalize();
145 segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
146 prevPt = &segb.fPts[p];
147 }
148 if (Segment::kLine == segb.fType) {
149 *vCount += 5;
150 *iCount += 9;
151 } else {
152 *vCount += 6;
153 *iCount += 12;
154 }
155 }
156
157 // compute mid-vectors where segments meet. TODO: Detect shallow corners
158 // and leave out the wedges and close gaps by stitching segments together.
159 for (int a = 0; a < count; ++a) {
160 const Segment& sega = (*segments)[a];
161 int b = (a + 1) % count;
162 Segment& segb = (*segments)[b];
163 segb.fMid = segb.fNorms[0] + sega.endNorm();
164 segb.fMid.normalize();
165 // corner wedges
166 *vCount += 4;
167 *iCount += 6;
168 }
169 }
170
171 struct DegenerateTestData {
DegenerateTestDataDegenerateTestData172 DegenerateTestData() { fStage = kInitial; }
isDegenerateDegenerateTestData173 bool isDegenerate() const { return kNonDegenerate != fStage; }
174 enum {
175 kInitial,
176 kPoint,
177 kLine,
178 kNonDegenerate
179 } fStage;
180 SkPoint fFirstPoint;
181 SkVector fLineNormal;
182 SkScalar fLineC;
183 };
184
185 static const SkScalar kClose = (SK_Scalar1 / 16);
186 static const SkScalar kCloseSqd = kClose * kClose;
187
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)188 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
189 switch (data->fStage) {
190 case DegenerateTestData::kInitial:
191 data->fFirstPoint = pt;
192 data->fStage = DegenerateTestData::kPoint;
193 break;
194 case DegenerateTestData::kPoint:
195 if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
196 data->fLineNormal = pt - data->fFirstPoint;
197 data->fLineNormal.normalize();
198 data->fLineNormal.setOrthog(data->fLineNormal);
199 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
200 data->fStage = DegenerateTestData::kLine;
201 }
202 break;
203 case DegenerateTestData::kLine:
204 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
205 data->fStage = DegenerateTestData::kNonDegenerate;
206 }
207 case DegenerateTestData::kNonDegenerate:
208 break;
209 default:
210 SkFAIL("Unexpected degenerate test stage.");
211 }
212 }
213
get_direction(const SkPath & path,const SkMatrix & m,SkPathPriv::FirstDirection * dir)214 static inline bool get_direction(const SkPath& path, const SkMatrix& m,
215 SkPathPriv::FirstDirection* dir) {
216 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
217 return false;
218 }
219 // check whether m reverses the orientation
220 SkASSERT(!m.hasPerspective());
221 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
222 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
223 if (det2x2 < 0) {
224 *dir = SkPathPriv::OppositeFirstDirection(*dir);
225 }
226 return true;
227 }
228
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)229 static inline void add_line_to_segment(const SkPoint& pt,
230 SegmentArray* segments) {
231 segments->push_back();
232 segments->back().fType = Segment::kLine;
233 segments->back().fPts[0] = pt;
234 }
235
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)236 static inline void add_quad_segment(const SkPoint pts[3],
237 SegmentArray* segments) {
238 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
239 if (pts[0] != pts[2]) {
240 add_line_to_segment(pts[2], segments);
241 }
242 } else {
243 segments->push_back();
244 segments->back().fType = Segment::kQuad;
245 segments->back().fPts[0] = pts[1];
246 segments->back().fPts[1] = pts[2];
247 }
248 }
249
add_cubic_segments(const SkPoint pts[4],SkPathPriv::FirstDirection dir,SegmentArray * segments)250 static inline void add_cubic_segments(const SkPoint pts[4],
251 SkPathPriv::FirstDirection dir,
252 SegmentArray* segments) {
253 SkSTArray<15, SkPoint, true> quads;
254 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
255 int count = quads.count();
256 for (int q = 0; q < count; q += 3) {
257 add_quad_segment(&quads[q], segments);
258 }
259 }
260
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)261 static bool get_segments(const SkPath& path,
262 const SkMatrix& m,
263 SegmentArray* segments,
264 SkPoint* fanPt,
265 int* vCount,
266 int* iCount) {
267 SkPath::Iter iter(path, true);
268 // This renderer over-emphasizes very thin path regions. We use the distance
269 // to the path from the sample to compute coverage. Every pixel intersected
270 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
271 // notice that the sample may be close to a very thin area of the path and
272 // thus should be very light. This is particularly egregious for degenerate
273 // line paths. We detect paths that are very close to a line (zero area) and
274 // draw nothing.
275 DegenerateTestData degenerateData;
276 SkPathPriv::FirstDirection dir;
277 // get_direction can fail for some degenerate paths.
278 if (!get_direction(path, m, &dir)) {
279 return false;
280 }
281
282 for (;;) {
283 SkPoint pts[4];
284 SkPath::Verb verb = iter.next(pts, true, true);
285 switch (verb) {
286 case SkPath::kMove_Verb:
287 m.mapPoints(pts, 1);
288 update_degenerate_test(°enerateData, pts[0]);
289 break;
290 case SkPath::kLine_Verb: {
291 m.mapPoints(&pts[1], 1);
292 update_degenerate_test(°enerateData, pts[1]);
293 add_line_to_segment(pts[1], segments);
294 break;
295 }
296 case SkPath::kQuad_Verb:
297 m.mapPoints(pts, 3);
298 update_degenerate_test(°enerateData, pts[1]);
299 update_degenerate_test(°enerateData, pts[2]);
300 add_quad_segment(pts, segments);
301 break;
302 case SkPath::kConic_Verb: {
303 m.mapPoints(pts, 3);
304 SkScalar weight = iter.conicWeight();
305 SkAutoConicToQuads converter;
306 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
307 for (int i = 0; i < converter.countQuads(); ++i) {
308 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
309 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
310 add_quad_segment(quadPts + 2*i, segments);
311 }
312 break;
313 }
314 case SkPath::kCubic_Verb: {
315 m.mapPoints(pts, 4);
316 update_degenerate_test(°enerateData, pts[1]);
317 update_degenerate_test(°enerateData, pts[2]);
318 update_degenerate_test(°enerateData, pts[3]);
319 add_cubic_segments(pts, dir, segments);
320 break;
321 };
322 case SkPath::kDone_Verb:
323 if (degenerateData.isDegenerate()) {
324 return false;
325 } else {
326 compute_vectors(segments, fanPt, dir, vCount, iCount);
327 return true;
328 }
329 default:
330 break;
331 }
332 }
333 }
334
335 struct QuadVertex {
336 SkPoint fPos;
337 SkPoint fUV;
338 SkScalar fD0;
339 SkScalar fD1;
340 };
341
342 struct Draw {
DrawDraw343 Draw() : fVertexCnt(0), fIndexCnt(0) {}
344 int fVertexCnt;
345 int fIndexCnt;
346 };
347
348 typedef SkTArray<Draw, true> DrawArray;
349
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,DrawArray * draws,QuadVertex * verts,uint16_t * idxs)350 static void create_vertices(const SegmentArray& segments,
351 const SkPoint& fanPt,
352 DrawArray* draws,
353 QuadVertex* verts,
354 uint16_t* idxs) {
355 Draw* draw = &draws->push_back();
356 // alias just to make vert/index assignments easier to read.
357 int* v = &draw->fVertexCnt;
358 int* i = &draw->fIndexCnt;
359
360 int count = segments.count();
361 for (int a = 0; a < count; ++a) {
362 const Segment& sega = segments[a];
363 int b = (a + 1) % count;
364 const Segment& segb = segments[b];
365
366 // Check whether adding the verts for this segment to the current draw would cause index
367 // values to overflow.
368 int vCount = 4;
369 if (Segment::kLine == segb.fType) {
370 vCount += 5;
371 } else {
372 vCount += 6;
373 }
374 if (draw->fVertexCnt + vCount > (1 << 16)) {
375 verts += *v;
376 idxs += *i;
377 draw = &draws->push_back();
378 v = &draw->fVertexCnt;
379 i = &draw->fIndexCnt;
380 }
381
382 // FIXME: These tris are inset in the 1 unit arc around the corner
383 verts[*v + 0].fPos = sega.endPt();
384 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
385 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
386 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
387 verts[*v + 0].fUV.set(0,0);
388 verts[*v + 1].fUV.set(0,-SK_Scalar1);
389 verts[*v + 2].fUV.set(0,-SK_Scalar1);
390 verts[*v + 3].fUV.set(0,-SK_Scalar1);
391 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
392 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
393 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
394 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
395
396 idxs[*i + 0] = *v + 0;
397 idxs[*i + 1] = *v + 2;
398 idxs[*i + 2] = *v + 1;
399 idxs[*i + 3] = *v + 0;
400 idxs[*i + 4] = *v + 3;
401 idxs[*i + 5] = *v + 2;
402
403 *v += 4;
404 *i += 6;
405
406 if (Segment::kLine == segb.fType) {
407 verts[*v + 0].fPos = fanPt;
408 verts[*v + 1].fPos = sega.endPt();
409 verts[*v + 2].fPos = segb.fPts[0];
410
411 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
412 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
413
414 // we draw the line edge as a degenerate quad (u is 0, v is the
415 // signed distance to the edge)
416 SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
417 verts[*v + 2].fPos);
418 verts[*v + 0].fUV.set(0, dist);
419 verts[*v + 1].fUV.set(0, 0);
420 verts[*v + 2].fUV.set(0, 0);
421 verts[*v + 3].fUV.set(0, -SK_Scalar1);
422 verts[*v + 4].fUV.set(0, -SK_Scalar1);
423
424 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
425 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
426 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
427 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
428 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
429
430 idxs[*i + 0] = *v + 3;
431 idxs[*i + 1] = *v + 1;
432 idxs[*i + 2] = *v + 2;
433
434 idxs[*i + 3] = *v + 4;
435 idxs[*i + 4] = *v + 3;
436 idxs[*i + 5] = *v + 2;
437
438 *i += 6;
439
440 // Draw the interior fan if it exists.
441 // TODO: Detect and combine colinear segments. This will ensure we catch every case
442 // with no interior, and that the resulting shared edge uses the same endpoints.
443 if (count >= 3) {
444 idxs[*i + 0] = *v + 0;
445 idxs[*i + 1] = *v + 2;
446 idxs[*i + 2] = *v + 1;
447
448 *i += 3;
449 }
450
451 *v += 5;
452 } else {
453 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
454
455 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
456 midVec.normalize();
457
458 verts[*v + 0].fPos = fanPt;
459 verts[*v + 1].fPos = qpts[0];
460 verts[*v + 2].fPos = qpts[2];
461 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
462 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
463 verts[*v + 5].fPos = qpts[1] + midVec;
464
465 SkScalar c = segb.fNorms[0].dot(qpts[0]);
466 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
467 verts[*v + 1].fD0 = 0.f;
468 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
469 verts[*v + 3].fD0 = -SK_ScalarMax/100;
470 verts[*v + 4].fD0 = -SK_ScalarMax/100;
471 verts[*v + 5].fD0 = -SK_ScalarMax/100;
472
473 c = segb.fNorms[1].dot(qpts[2]);
474 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
475 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
476 verts[*v + 2].fD1 = 0.f;
477 verts[*v + 3].fD1 = -SK_ScalarMax/100;
478 verts[*v + 4].fD1 = -SK_ScalarMax/100;
479 verts[*v + 5].fD1 = -SK_ScalarMax/100;
480
481 GrPathUtils::QuadUVMatrix toUV(qpts);
482 toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
483
484 idxs[*i + 0] = *v + 3;
485 idxs[*i + 1] = *v + 1;
486 idxs[*i + 2] = *v + 2;
487 idxs[*i + 3] = *v + 4;
488 idxs[*i + 4] = *v + 3;
489 idxs[*i + 5] = *v + 2;
490
491 idxs[*i + 6] = *v + 5;
492 idxs[*i + 7] = *v + 3;
493 idxs[*i + 8] = *v + 4;
494
495 *i += 9;
496
497 // Draw the interior fan if it exists.
498 // TODO: Detect and combine colinear segments. This will ensure we catch every case
499 // with no interior, and that the resulting shared edge uses the same endpoints.
500 if (count >= 3) {
501 idxs[*i + 0] = *v + 0;
502 idxs[*i + 1] = *v + 2;
503 idxs[*i + 2] = *v + 1;
504
505 *i += 3;
506 }
507
508 *v += 6;
509 }
510 }
511 }
512
513 ///////////////////////////////////////////////////////////////////////////////
514
515 /*
516 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
517 * two components of the vertex attribute. Coverage is based on signed
518 * distance with negative being inside, positive outside. The edge is specified in
519 * window space (y-down). If either the third or fourth component of the interpolated
520 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
521 * attempt to trim to a portion of the infinite quad.
522 * Requires shader derivative instruction support.
523 */
524
525 class QuadEdgeEffect : public GrGeometryProcessor {
526 public:
527
Make(GrColor color,const SkMatrix & localMatrix,bool usesLocalCoords)528 static sk_sp<GrGeometryProcessor> Make(GrColor color, const SkMatrix& localMatrix,
529 bool usesLocalCoords) {
530 return sk_sp<GrGeometryProcessor>(new QuadEdgeEffect(color, localMatrix, usesLocalCoords));
531 }
532
~QuadEdgeEffect()533 ~QuadEdgeEffect() override {}
534
name() const535 const char* name() const override { return "QuadEdge"; }
536
inPosition() const537 const Attribute* inPosition() const { return fInPosition; }
inQuadEdge() const538 const Attribute* inQuadEdge() const { return fInQuadEdge; }
color() const539 GrColor color() const { return fColor; }
localMatrix() const540 const SkMatrix& localMatrix() const { return fLocalMatrix; }
usesLocalCoords() const541 bool usesLocalCoords() const { return fUsesLocalCoords; }
542
543 class GLSLProcessor : public GrGLSLGeometryProcessor {
544 public:
GLSLProcessor()545 GLSLProcessor() : fColor(GrColor_ILLEGAL) {}
546
onEmitCode(EmitArgs & args,GrGPArgs * gpArgs)547 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
548 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
549 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
550 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
551 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
552
553 // emit attributes
554 varyingHandler->emitAttributes(qe);
555
556 GrGLSLVertToFrag v(kVec4f_GrSLType);
557 varyingHandler->addVarying("QuadEdge", &v);
558 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
559
560 GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
561 // Setup pass through color
562 this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
563 &fColorUniform);
564
565 // Setup position
566 this->setupPosition(vertBuilder, gpArgs, qe.inPosition()->fName);
567
568 // emit transforms
569 this->emitTransforms(vertBuilder,
570 varyingHandler,
571 uniformHandler,
572 gpArgs->fPositionVar,
573 qe.inPosition()->fName,
574 qe.localMatrix(),
575 args.fFPCoordTransformHandler);
576
577 fragBuilder->codeAppendf("float edgeAlpha;");
578
579 // keep the derivative instructions outside the conditional
580 fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
581 fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
582 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
583 // today we know z and w are in device space. We could use derivatives
584 fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
585 v.fsIn());
586 fragBuilder->codeAppendf ("} else {");
587 fragBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
588 " 2.0*%s.x*duvdy.x - duvdy.y);",
589 v.fsIn(), v.fsIn());
590 fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
591 v.fsIn());
592 fragBuilder->codeAppendf("edgeAlpha = "
593 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
594
595 fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
596 }
597
GenKey(const GrGeometryProcessor & gp,const GrShaderCaps &,GrProcessorKeyBuilder * b)598 static inline void GenKey(const GrGeometryProcessor& gp,
599 const GrShaderCaps&,
600 GrProcessorKeyBuilder* b) {
601 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
602 b->add32(SkToBool(qee.usesLocalCoords() && qee.localMatrix().hasPerspective()));
603 }
604
setData(const GrGLSLProgramDataManager & pdman,const GrPrimitiveProcessor & gp,FPCoordTransformIter && transformIter)605 void setData(const GrGLSLProgramDataManager& pdman,
606 const GrPrimitiveProcessor& gp,
607 FPCoordTransformIter&& transformIter) override {
608 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
609 if (qe.color() != fColor) {
610 float c[4];
611 GrColorToRGBAFloat(qe.color(), c);
612 pdman.set4fv(fColorUniform, 1, c);
613 fColor = qe.color();
614 }
615 this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter);
616 }
617
618 private:
619 GrColor fColor;
620 UniformHandle fColorUniform;
621
622 typedef GrGLSLGeometryProcessor INHERITED;
623 };
624
getGLSLProcessorKey(const GrShaderCaps & caps,GrProcessorKeyBuilder * b) const625 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
626 GLSLProcessor::GenKey(*this, caps, b);
627 }
628
createGLSLInstance(const GrShaderCaps &) const629 GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
630 return new GLSLProcessor();
631 }
632
633 private:
QuadEdgeEffect(GrColor color,const SkMatrix & localMatrix,bool usesLocalCoords)634 QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoords)
635 : fColor(color)
636 , fLocalMatrix(localMatrix)
637 , fUsesLocalCoords(usesLocalCoords) {
638 this->initClassID<QuadEdgeEffect>();
639 fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
640 fInQuadEdge = &this->addVertexAttrib("inQuadEdge", kVec4f_GrVertexAttribType);
641 }
642
643 const Attribute* fInPosition;
644 const Attribute* fInQuadEdge;
645 GrColor fColor;
646 SkMatrix fLocalMatrix;
647 bool fUsesLocalCoords;
648
649 GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
650
651 typedef GrGeometryProcessor INHERITED;
652 };
653
654 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
655
656 #if GR_TEST_UTILS
TestCreate(GrProcessorTestData * d)657 sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
658 // Doesn't work without derivative instructions.
659 return d->caps()->shaderCaps()->shaderDerivativeSupport()
660 ? QuadEdgeEffect::Make(GrRandomColor(d->fRandom),
661 GrTest::TestMatrix(d->fRandom),
662 d->fRandom->nextBool())
663 : nullptr;
664 }
665 #endif
666
667 ///////////////////////////////////////////////////////////////////////////////
668
onCanDrawPath(const CanDrawPathArgs & args) const669 bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
670 return (args.fShaderCaps->shaderDerivativeSupport() && (GrAAType::kCoverage == args.fAAType) &&
671 args.fShape->style().isSimpleFill() && !args.fShape->inverseFilled() &&
672 args.fShape->knownToBeConvex());
673 }
674
675 // extract the result vertices and indices from the GrAAConvexTessellator
extract_verts(const GrAAConvexTessellator & tess,void * vertices,size_t vertexStride,GrColor color,uint16_t * idxs,bool tweakAlphaForCoverage)676 static void extract_verts(const GrAAConvexTessellator& tess,
677 void* vertices,
678 size_t vertexStride,
679 GrColor color,
680 uint16_t* idxs,
681 bool tweakAlphaForCoverage) {
682 intptr_t verts = reinterpret_cast<intptr_t>(vertices);
683
684 for (int i = 0; i < tess.numPts(); ++i) {
685 *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
686 }
687
688 // Make 'verts' point to the colors
689 verts += sizeof(SkPoint);
690 for (int i = 0; i < tess.numPts(); ++i) {
691 if (tweakAlphaForCoverage) {
692 SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
693 unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
694 GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
695 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
696 } else {
697 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
698 *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
699 tess.coverage(i);
700 }
701 }
702
703 for (int i = 0; i < tess.numIndices(); ++i) {
704 idxs[i] = tess.index(i);
705 }
706 }
707
create_fill_gp(bool tweakAlphaForCoverage,const SkMatrix & viewMatrix,bool usesLocalCoords)708 static sk_sp<GrGeometryProcessor> create_fill_gp(bool tweakAlphaForCoverage,
709 const SkMatrix& viewMatrix,
710 bool usesLocalCoords) {
711 using namespace GrDefaultGeoProcFactory;
712
713 Coverage::Type coverageType;
714 if (tweakAlphaForCoverage) {
715 coverageType = Coverage::kSolid_Type;
716 } else {
717 coverageType = Coverage::kAttribute_Type;
718 }
719 LocalCoords::Type localCoordsType =
720 usesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type;
721 return MakeForDeviceSpace(Color::kPremulGrColorAttribute_Type, coverageType, localCoordsType,
722 viewMatrix);
723 }
724
725 class AAConvexPathOp final : public GrMeshDrawOp {
726 public:
727 DEFINE_OP_CLASS_ID
Make(GrColor color,const SkMatrix & viewMatrix,const SkPath & path)728 static std::unique_ptr<GrMeshDrawOp> Make(GrColor color, const SkMatrix& viewMatrix,
729 const SkPath& path) {
730 return std::unique_ptr<GrMeshDrawOp>(new AAConvexPathOp(color, viewMatrix, path));
731 }
732
name() const733 const char* name() const override { return "AAConvexPathOp"; }
734
dumpInfo() const735 SkString dumpInfo() const override {
736 SkString string;
737 string.appendf("Color: 0x%08x, Count: %d\n", fColor, fPaths.count());
738 string.append(DumpPipelineInfo(*this->pipeline()));
739 string.append(INHERITED::dumpInfo());
740 return string;
741 }
742
743 private:
AAConvexPathOp(GrColor color,const SkMatrix & viewMatrix,const SkPath & path)744 AAConvexPathOp(GrColor color, const SkMatrix& viewMatrix, const SkPath& path)
745 : INHERITED(ClassID()), fColor(color) {
746 fPaths.emplace_back(PathData{viewMatrix, path});
747 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
748 }
749
getFragmentProcessorAnalysisInputs(GrPipelineAnalysisColor * color,GrPipelineAnalysisCoverage * coverage) const750 void getFragmentProcessorAnalysisInputs(GrPipelineAnalysisColor* color,
751 GrPipelineAnalysisCoverage* coverage) const override {
752 color->setToConstant(fColor);
753 *coverage = GrPipelineAnalysisCoverage::kSingleChannel;
754 }
755
applyPipelineOptimizations(const GrPipelineOptimizations & optimizations)756 void applyPipelineOptimizations(const GrPipelineOptimizations& optimizations) override {
757 optimizations.getOverrideColorIfSet(&fColor);
758
759 fUsesLocalCoords = optimizations.readsLocalCoords();
760 fLinesOnly = SkPath::kLine_SegmentMask == fPaths[0].fPath.getSegmentMasks();
761 fCanTweakAlphaForCoverage = optimizations.canTweakAlphaForCoverage();
762 }
763
prepareLinesOnlyDraws(Target * target) const764 void prepareLinesOnlyDraws(Target* target) const {
765 bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
766
767 // Setup GrGeometryProcessor
768 sk_sp<GrGeometryProcessor> gp(create_fill_gp(
769 canTweakAlphaForCoverage, this->viewMatrix(), this->usesLocalCoords()));
770 if (!gp) {
771 SkDebugf("Could not create GrGeometryProcessor\n");
772 return;
773 }
774
775 size_t vertexStride = gp->getVertexStride();
776
777 SkASSERT(canTweakAlphaForCoverage ?
778 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
779 vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
780
781 GrAAConvexTessellator tess;
782
783 int instanceCount = fPaths.count();
784
785 for (int i = 0; i < instanceCount; i++) {
786 tess.rewind();
787
788 const PathData& args = fPaths[i];
789
790 if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
791 continue;
792 }
793
794 const GrBuffer* vertexBuffer;
795 int firstVertex;
796
797 void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
798 &firstVertex);
799 if (!verts) {
800 SkDebugf("Could not allocate vertices\n");
801 return;
802 }
803
804 const GrBuffer* indexBuffer;
805 int firstIndex;
806
807 uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
808 if (!idxs) {
809 SkDebugf("Could not allocate indices\n");
810 return;
811 }
812
813 extract_verts(tess, verts, vertexStride, fColor, idxs, canTweakAlphaForCoverage);
814
815 GrMesh mesh;
816 mesh.initIndexed(kTriangles_GrPrimitiveType,
817 vertexBuffer, indexBuffer,
818 firstVertex, firstIndex,
819 tess.numPts(), tess.numIndices());
820 target->draw(gp.get(), mesh);
821 }
822 }
823
onPrepareDraws(Target * target) const824 void onPrepareDraws(Target* target) const override {
825 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
826 if (this->linesOnly()) {
827 this->prepareLinesOnlyDraws(target);
828 return;
829 }
830 #endif
831
832 int instanceCount = fPaths.count();
833
834 SkMatrix invert;
835 if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
836 SkDebugf("Could not invert viewmatrix\n");
837 return;
838 }
839
840 // Setup GrGeometryProcessor
841 sk_sp<GrGeometryProcessor> quadProcessor(
842 QuadEdgeEffect::Make(this->color(), invert, this->usesLocalCoords()));
843
844 // TODO generate all segments for all paths and use one vertex buffer
845 for (int i = 0; i < instanceCount; i++) {
846 const PathData& args = fPaths[i];
847
848 // We use the fact that SkPath::transform path does subdivision based on
849 // perspective. Otherwise, we apply the view matrix when copying to the
850 // segment representation.
851 const SkMatrix* viewMatrix = &args.fViewMatrix;
852
853 // We avoid initializing the path unless we have to
854 const SkPath* pathPtr = &args.fPath;
855 SkTLazy<SkPath> tmpPath;
856 if (viewMatrix->hasPerspective()) {
857 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
858 tmpPathPtr->setIsVolatile(true);
859 tmpPathPtr->transform(*viewMatrix);
860 viewMatrix = &SkMatrix::I();
861 pathPtr = tmpPathPtr;
862 }
863
864 int vertexCount;
865 int indexCount;
866 enum {
867 kPreallocSegmentCnt = 512 / sizeof(Segment),
868 kPreallocDrawCnt = 4,
869 };
870 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
871 SkPoint fanPt;
872
873 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
874 &indexCount)) {
875 continue;
876 }
877
878 const GrBuffer* vertexBuffer;
879 int firstVertex;
880
881 size_t vertexStride = quadProcessor->getVertexStride();
882 QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
883 vertexStride, vertexCount, &vertexBuffer, &firstVertex));
884
885 if (!verts) {
886 SkDebugf("Could not allocate vertices\n");
887 return;
888 }
889
890 const GrBuffer* indexBuffer;
891 int firstIndex;
892
893 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
894 if (!idxs) {
895 SkDebugf("Could not allocate indices\n");
896 return;
897 }
898
899 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
900 create_vertices(segments, fanPt, &draws, verts, idxs);
901
902 GrMesh mesh;
903
904 for (int j = 0; j < draws.count(); ++j) {
905 const Draw& draw = draws[j];
906 mesh.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
907 firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
908 target->draw(quadProcessor.get(), mesh);
909 firstVertex += draw.fVertexCnt;
910 firstIndex += draw.fIndexCnt;
911 }
912 }
913 }
914
onCombineIfPossible(GrOp * t,const GrCaps & caps)915 bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
916 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
917 if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
918 that->bounds(), caps)) {
919 return false;
920 }
921
922 if (this->color() != that->color()) {
923 return false;
924 }
925
926 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
927 if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
928 return false;
929 }
930
931 if (this->linesOnly() != that->linesOnly()) {
932 return false;
933 }
934
935 // In the event of two ops, one who can tweak, one who cannot, we just fall back to not
936 // tweaking
937 if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
938 fCanTweakAlphaForCoverage = false;
939 }
940
941 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
942 this->joinBounds(*that);
943 return true;
944 }
945
color() const946 GrColor color() const { return fColor; }
linesOnly() const947 bool linesOnly() const { return fLinesOnly; }
usesLocalCoords() const948 bool usesLocalCoords() const { return fUsesLocalCoords; }
canTweakAlphaForCoverage() const949 bool canTweakAlphaForCoverage() const { return fCanTweakAlphaForCoverage; }
viewMatrix() const950 const SkMatrix& viewMatrix() const { return fPaths[0].fViewMatrix; }
951
952 GrColor fColor;
953 bool fUsesLocalCoords;
954 bool fLinesOnly;
955 bool fCanTweakAlphaForCoverage;
956
957 struct PathData {
958 SkMatrix fViewMatrix;
959 SkPath fPath;
960 };
961
962 SkSTArray<1, PathData, true> fPaths;
963
964 typedef GrMeshDrawOp INHERITED;
965 };
966
onDrawPath(const DrawPathArgs & args)967 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
968 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
969 "GrAAConvexPathRenderer::onDrawPath");
970 SkASSERT(!args.fRenderTargetContext->isUnifiedMultisampled());
971 SkASSERT(!args.fShape->isEmpty());
972
973 SkPath path;
974 args.fShape->asPath(&path);
975
976 std::unique_ptr<GrMeshDrawOp> op =
977 AAConvexPathOp::Make(args.fPaint.getColor(), *args.fViewMatrix, path);
978
979 GrPipelineBuilder pipelineBuilder(std::move(args.fPaint), args.fAAType);
980 pipelineBuilder.setUserStencil(args.fUserStencilSettings);
981
982 args.fRenderTargetContext->addMeshDrawOp(pipelineBuilder, *args.fClip, std::move(op));
983
984 return true;
985
986 }
987
988 ///////////////////////////////////////////////////////////////////////////////////////////////////
989
990 #if GR_TEST_UTILS
991
DRAW_OP_TEST_DEFINE(AAConvexPathOp)992 DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
993 GrColor color = GrRandomColor(random);
994 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
995 SkPath path = GrTest::TestPathConvex(random);
996
997 return AAConvexPathOp::Make(color, viewMatrix, path);
998 }
999
1000 #endif
1001