1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrAAConvexPathRenderer.h"
9
10 #include "GrAAConvexTessellator.h"
11 #include "GrCaps.h"
12 #include "GrContext.h"
13 #include "GrDefaultGeoProcFactory.h"
14 #include "GrDrawOpTest.h"
15 #include "GrGeometryProcessor.h"
16 #include "GrOpFlushState.h"
17 #include "GrPathUtils.h"
18 #include "GrProcessor.h"
19 #include "GrSimpleMeshDrawOpHelper.h"
20 #include "SkGeometry.h"
21 #include "SkPathPriv.h"
22 #include "SkString.h"
23 #include "SkTraceEvent.h"
24 #include "glsl/GrGLSLFragmentShaderBuilder.h"
25 #include "glsl/GrGLSLGeometryProcessor.h"
26 #include "glsl/GrGLSLProgramDataManager.h"
27 #include "glsl/GrGLSLUniformHandler.h"
28 #include "glsl/GrGLSLVarying.h"
29 #include "glsl/GrGLSLVertexShaderBuilder.h"
30 #include "ops/GrMeshDrawOp.h"
31
GrAAConvexPathRenderer()32 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
33 }
34
35 struct Segment {
36 enum {
37 // These enum values are assumed in member functions below.
38 kLine = 0,
39 kQuad = 1,
40 } fType;
41
42 // line uses one pt, quad uses 2 pts
43 SkPoint fPts[2];
44 // normal to edge ending at each pt
45 SkVector fNorms[2];
46 // is the corner where the previous segment meets this segment
47 // sharp. If so, fMid is a normalized bisector facing outward.
48 SkVector fMid;
49
countPointsSegment50 int countPoints() {
51 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
52 return fType + 1;
53 }
endPtSegment54 const SkPoint& endPt() const {
55 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
56 return fPts[fType];
57 }
endNormSegment58 const SkPoint& endNorm() const {
59 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
60 return fNorms[fType];
61 }
62 };
63
64 typedef SkTArray<Segment, true> SegmentArray;
65
center_of_mass(const SegmentArray & segments,SkPoint * c)66 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
67 SkScalar area = 0;
68 SkPoint center = {0, 0};
69 int count = segments.count();
70 SkPoint p0 = {0, 0};
71 if (count > 2) {
72 // We translate the polygon so that the first point is at the origin.
73 // This avoids some precision issues with small area polygons far away
74 // from the origin.
75 p0 = segments[0].endPt();
76 SkPoint pi;
77 SkPoint pj;
78 // the first and last iteration of the below loop would compute
79 // zeros since the starting / ending point is (0,0). So instead we start
80 // at i=1 and make the last iteration i=count-2.
81 pj = segments[1].endPt() - p0;
82 for (int i = 1; i < count - 1; ++i) {
83 pi = pj;
84 pj = segments[i + 1].endPt() - p0;
85
86 SkScalar t = SkPoint::CrossProduct(pi, pj);
87 area += t;
88 center.fX += (pi.fX + pj.fX) * t;
89 center.fY += (pi.fY + pj.fY) * t;
90 }
91 }
92
93 // If the poly has no area then we instead return the average of
94 // its points.
95 if (SkScalarNearlyZero(area)) {
96 SkPoint avg;
97 avg.set(0, 0);
98 for (int i = 0; i < count; ++i) {
99 const SkPoint& pt = segments[i].endPt();
100 avg.fX += pt.fX;
101 avg.fY += pt.fY;
102 }
103 SkScalar denom = SK_Scalar1 / count;
104 avg.scale(denom);
105 *c = avg;
106 } else {
107 area *= 3;
108 area = SkScalarInvert(area);
109 center.scale(area);
110 // undo the translate of p0 to the origin.
111 *c = center + p0;
112 }
113 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
114 }
115
compute_vectors(SegmentArray * segments,SkPoint * fanPt,SkPathPriv::FirstDirection dir,int * vCount,int * iCount)116 static void compute_vectors(SegmentArray* segments,
117 SkPoint* fanPt,
118 SkPathPriv::FirstDirection dir,
119 int* vCount,
120 int* iCount) {
121 center_of_mass(*segments, fanPt);
122 int count = segments->count();
123
124 // Make the normals point towards the outside
125 SkPoint::Side normSide;
126 if (dir == SkPathPriv::kCCW_FirstDirection) {
127 normSide = SkPoint::kRight_Side;
128 } else {
129 normSide = SkPoint::kLeft_Side;
130 }
131
132 *vCount = 0;
133 *iCount = 0;
134 // compute normals at all points
135 for (int a = 0; a < count; ++a) {
136 Segment& sega = (*segments)[a];
137 int b = (a + 1) % count;
138 Segment& segb = (*segments)[b];
139
140 const SkPoint* prevPt = &sega.endPt();
141 int n = segb.countPoints();
142 for (int p = 0; p < n; ++p) {
143 segb.fNorms[p] = segb.fPts[p] - *prevPt;
144 segb.fNorms[p].normalize();
145 segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
146 prevPt = &segb.fPts[p];
147 }
148 if (Segment::kLine == segb.fType) {
149 *vCount += 5;
150 *iCount += 9;
151 } else {
152 *vCount += 6;
153 *iCount += 12;
154 }
155 }
156
157 // compute mid-vectors where segments meet. TODO: Detect shallow corners
158 // and leave out the wedges and close gaps by stitching segments together.
159 for (int a = 0; a < count; ++a) {
160 const Segment& sega = (*segments)[a];
161 int b = (a + 1) % count;
162 Segment& segb = (*segments)[b];
163 segb.fMid = segb.fNorms[0] + sega.endNorm();
164 segb.fMid.normalize();
165 // corner wedges
166 *vCount += 4;
167 *iCount += 6;
168 }
169 }
170
171 struct DegenerateTestData {
DegenerateTestDataDegenerateTestData172 DegenerateTestData() { fStage = kInitial; }
isDegenerateDegenerateTestData173 bool isDegenerate() const { return kNonDegenerate != fStage; }
174 enum {
175 kInitial,
176 kPoint,
177 kLine,
178 kNonDegenerate
179 } fStage;
180 SkPoint fFirstPoint;
181 SkVector fLineNormal;
182 SkScalar fLineC;
183 };
184
185 static const SkScalar kClose = (SK_Scalar1 / 16);
186 static const SkScalar kCloseSqd = kClose * kClose;
187
update_degenerate_test(DegenerateTestData * data,const SkPoint & pt)188 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
189 switch (data->fStage) {
190 case DegenerateTestData::kInitial:
191 data->fFirstPoint = pt;
192 data->fStage = DegenerateTestData::kPoint;
193 break;
194 case DegenerateTestData::kPoint:
195 if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
196 data->fLineNormal = pt - data->fFirstPoint;
197 data->fLineNormal.normalize();
198 data->fLineNormal.setOrthog(data->fLineNormal);
199 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
200 data->fStage = DegenerateTestData::kLine;
201 }
202 break;
203 case DegenerateTestData::kLine:
204 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
205 data->fStage = DegenerateTestData::kNonDegenerate;
206 }
207 case DegenerateTestData::kNonDegenerate:
208 break;
209 default:
210 SkFAIL("Unexpected degenerate test stage.");
211 }
212 }
213
get_direction(const SkPath & path,const SkMatrix & m,SkPathPriv::FirstDirection * dir)214 static inline bool get_direction(const SkPath& path, const SkMatrix& m,
215 SkPathPriv::FirstDirection* dir) {
216 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
217 return false;
218 }
219 // check whether m reverses the orientation
220 SkASSERT(!m.hasPerspective());
221 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
222 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
223 if (det2x2 < 0) {
224 *dir = SkPathPriv::OppositeFirstDirection(*dir);
225 }
226 return true;
227 }
228
add_line_to_segment(const SkPoint & pt,SegmentArray * segments)229 static inline void add_line_to_segment(const SkPoint& pt,
230 SegmentArray* segments) {
231 segments->push_back();
232 segments->back().fType = Segment::kLine;
233 segments->back().fPts[0] = pt;
234 }
235
add_quad_segment(const SkPoint pts[3],SegmentArray * segments)236 static inline void add_quad_segment(const SkPoint pts[3],
237 SegmentArray* segments) {
238 if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
239 if (pts[0] != pts[2]) {
240 add_line_to_segment(pts[2], segments);
241 }
242 } else {
243 segments->push_back();
244 segments->back().fType = Segment::kQuad;
245 segments->back().fPts[0] = pts[1];
246 segments->back().fPts[1] = pts[2];
247 }
248 }
249
add_cubic_segments(const SkPoint pts[4],SkPathPriv::FirstDirection dir,SegmentArray * segments)250 static inline void add_cubic_segments(const SkPoint pts[4],
251 SkPathPriv::FirstDirection dir,
252 SegmentArray* segments) {
253 SkSTArray<15, SkPoint, true> quads;
254 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
255 int count = quads.count();
256 for (int q = 0; q < count; q += 3) {
257 add_quad_segment(&quads[q], segments);
258 }
259 }
260
get_segments(const SkPath & path,const SkMatrix & m,SegmentArray * segments,SkPoint * fanPt,int * vCount,int * iCount)261 static bool get_segments(const SkPath& path,
262 const SkMatrix& m,
263 SegmentArray* segments,
264 SkPoint* fanPt,
265 int* vCount,
266 int* iCount) {
267 SkPath::Iter iter(path, true);
268 // This renderer over-emphasizes very thin path regions. We use the distance
269 // to the path from the sample to compute coverage. Every pixel intersected
270 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
271 // notice that the sample may be close to a very thin area of the path and
272 // thus should be very light. This is particularly egregious for degenerate
273 // line paths. We detect paths that are very close to a line (zero area) and
274 // draw nothing.
275 DegenerateTestData degenerateData;
276 SkPathPriv::FirstDirection dir;
277 // get_direction can fail for some degenerate paths.
278 if (!get_direction(path, m, &dir)) {
279 return false;
280 }
281
282 for (;;) {
283 SkPoint pts[4];
284 SkPath::Verb verb = iter.next(pts, true, true);
285 switch (verb) {
286 case SkPath::kMove_Verb:
287 m.mapPoints(pts, 1);
288 update_degenerate_test(°enerateData, pts[0]);
289 break;
290 case SkPath::kLine_Verb: {
291 m.mapPoints(&pts[1], 1);
292 update_degenerate_test(°enerateData, pts[1]);
293 add_line_to_segment(pts[1], segments);
294 break;
295 }
296 case SkPath::kQuad_Verb:
297 m.mapPoints(pts, 3);
298 update_degenerate_test(°enerateData, pts[1]);
299 update_degenerate_test(°enerateData, pts[2]);
300 add_quad_segment(pts, segments);
301 break;
302 case SkPath::kConic_Verb: {
303 m.mapPoints(pts, 3);
304 SkScalar weight = iter.conicWeight();
305 SkAutoConicToQuads converter;
306 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
307 for (int i = 0; i < converter.countQuads(); ++i) {
308 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
309 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
310 add_quad_segment(quadPts + 2*i, segments);
311 }
312 break;
313 }
314 case SkPath::kCubic_Verb: {
315 m.mapPoints(pts, 4);
316 update_degenerate_test(°enerateData, pts[1]);
317 update_degenerate_test(°enerateData, pts[2]);
318 update_degenerate_test(°enerateData, pts[3]);
319 add_cubic_segments(pts, dir, segments);
320 break;
321 };
322 case SkPath::kDone_Verb:
323 if (degenerateData.isDegenerate()) {
324 return false;
325 } else {
326 compute_vectors(segments, fanPt, dir, vCount, iCount);
327 return true;
328 }
329 default:
330 break;
331 }
332 }
333 }
334
335 struct QuadVertex {
336 SkPoint fPos;
337 GrColor fColor;
338 SkPoint fUV;
339 SkScalar fD0;
340 SkScalar fD1;
341 };
342
343 struct Draw {
DrawDraw344 Draw() : fVertexCnt(0), fIndexCnt(0) {}
345 int fVertexCnt;
346 int fIndexCnt;
347 };
348
349 typedef SkTArray<Draw, true> DrawArray;
350
create_vertices(const SegmentArray & segments,const SkPoint & fanPt,GrColor color,DrawArray * draws,QuadVertex * verts,uint16_t * idxs)351 static void create_vertices(const SegmentArray& segments,
352 const SkPoint& fanPt,
353 GrColor color,
354 DrawArray* draws,
355 QuadVertex* verts,
356 uint16_t* idxs) {
357 Draw* draw = &draws->push_back();
358 // alias just to make vert/index assignments easier to read.
359 int* v = &draw->fVertexCnt;
360 int* i = &draw->fIndexCnt;
361
362 int count = segments.count();
363 for (int a = 0; a < count; ++a) {
364 const Segment& sega = segments[a];
365 int b = (a + 1) % count;
366 const Segment& segb = segments[b];
367
368 // Check whether adding the verts for this segment to the current draw would cause index
369 // values to overflow.
370 int vCount = 4;
371 if (Segment::kLine == segb.fType) {
372 vCount += 5;
373 } else {
374 vCount += 6;
375 }
376 if (draw->fVertexCnt + vCount > (1 << 16)) {
377 verts += *v;
378 idxs += *i;
379 draw = &draws->push_back();
380 v = &draw->fVertexCnt;
381 i = &draw->fIndexCnt;
382 }
383
384 // FIXME: These tris are inset in the 1 unit arc around the corner
385 verts[*v + 0].fPos = sega.endPt();
386 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
387 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
388 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
389 verts[*v + 0].fColor = color;
390 verts[*v + 1].fColor = color;
391 verts[*v + 2].fColor = color;
392 verts[*v + 3].fColor = color;
393 verts[*v + 0].fUV.set(0,0);
394 verts[*v + 1].fUV.set(0,-SK_Scalar1);
395 verts[*v + 2].fUV.set(0,-SK_Scalar1);
396 verts[*v + 3].fUV.set(0,-SK_Scalar1);
397 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
398 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
399 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
400 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
401
402 idxs[*i + 0] = *v + 0;
403 idxs[*i + 1] = *v + 2;
404 idxs[*i + 2] = *v + 1;
405 idxs[*i + 3] = *v + 0;
406 idxs[*i + 4] = *v + 3;
407 idxs[*i + 5] = *v + 2;
408
409 *v += 4;
410 *i += 6;
411
412 if (Segment::kLine == segb.fType) {
413 verts[*v + 0].fPos = fanPt;
414 verts[*v + 1].fPos = sega.endPt();
415 verts[*v + 2].fPos = segb.fPts[0];
416
417 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
418 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
419
420 verts[*v + 0].fColor = color;
421 verts[*v + 1].fColor = color;
422 verts[*v + 2].fColor = color;
423 verts[*v + 3].fColor = color;
424 verts[*v + 4].fColor = color;
425
426 // we draw the line edge as a degenerate quad (u is 0, v is the
427 // signed distance to the edge)
428 SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
429 verts[*v + 2].fPos);
430 verts[*v + 0].fUV.set(0, dist);
431 verts[*v + 1].fUV.set(0, 0);
432 verts[*v + 2].fUV.set(0, 0);
433 verts[*v + 3].fUV.set(0, -SK_Scalar1);
434 verts[*v + 4].fUV.set(0, -SK_Scalar1);
435
436 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
437 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
438 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
439 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
440 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
441
442 idxs[*i + 0] = *v + 3;
443 idxs[*i + 1] = *v + 1;
444 idxs[*i + 2] = *v + 2;
445
446 idxs[*i + 3] = *v + 4;
447 idxs[*i + 4] = *v + 3;
448 idxs[*i + 5] = *v + 2;
449
450 *i += 6;
451
452 // Draw the interior fan if it exists.
453 // TODO: Detect and combine colinear segments. This will ensure we catch every case
454 // with no interior, and that the resulting shared edge uses the same endpoints.
455 if (count >= 3) {
456 idxs[*i + 0] = *v + 0;
457 idxs[*i + 1] = *v + 2;
458 idxs[*i + 2] = *v + 1;
459
460 *i += 3;
461 }
462
463 *v += 5;
464 } else {
465 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
466
467 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
468 midVec.normalize();
469
470 verts[*v + 0].fPos = fanPt;
471 verts[*v + 1].fPos = qpts[0];
472 verts[*v + 2].fPos = qpts[2];
473 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
474 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
475 verts[*v + 5].fPos = qpts[1] + midVec;
476
477 verts[*v + 0].fColor = color;
478 verts[*v + 1].fColor = color;
479 verts[*v + 2].fColor = color;
480 verts[*v + 3].fColor = color;
481 verts[*v + 4].fColor = color;
482 verts[*v + 5].fColor = color;
483
484 SkScalar c = segb.fNorms[0].dot(qpts[0]);
485 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
486 verts[*v + 1].fD0 = 0.f;
487 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
488 verts[*v + 3].fD0 = -SK_ScalarMax/100;
489 verts[*v + 4].fD0 = -SK_ScalarMax/100;
490 verts[*v + 5].fD0 = -SK_ScalarMax/100;
491
492 c = segb.fNorms[1].dot(qpts[2]);
493 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
494 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
495 verts[*v + 2].fD1 = 0.f;
496 verts[*v + 3].fD1 = -SK_ScalarMax/100;
497 verts[*v + 4].fD1 = -SK_ScalarMax/100;
498 verts[*v + 5].fD1 = -SK_ScalarMax/100;
499
500 GrPathUtils::QuadUVMatrix toUV(qpts);
501 toUV.apply<6, sizeof(QuadVertex), offsetof(QuadVertex, fUV)>(verts + *v);
502
503 idxs[*i + 0] = *v + 3;
504 idxs[*i + 1] = *v + 1;
505 idxs[*i + 2] = *v + 2;
506 idxs[*i + 3] = *v + 4;
507 idxs[*i + 4] = *v + 3;
508 idxs[*i + 5] = *v + 2;
509
510 idxs[*i + 6] = *v + 5;
511 idxs[*i + 7] = *v + 3;
512 idxs[*i + 8] = *v + 4;
513
514 *i += 9;
515
516 // Draw the interior fan if it exists.
517 // TODO: Detect and combine colinear segments. This will ensure we catch every case
518 // with no interior, and that the resulting shared edge uses the same endpoints.
519 if (count >= 3) {
520 idxs[*i + 0] = *v + 0;
521 idxs[*i + 1] = *v + 2;
522 idxs[*i + 2] = *v + 1;
523
524 *i += 3;
525 }
526
527 *v += 6;
528 }
529 }
530 }
531
532 ///////////////////////////////////////////////////////////////////////////////
533
534 /*
535 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
536 * two components of the vertex attribute. Coverage is based on signed
537 * distance with negative being inside, positive outside. The edge is specified in
538 * window space (y-down). If either the third or fourth component of the interpolated
539 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
540 * attempt to trim to a portion of the infinite quad.
541 * Requires shader derivative instruction support.
542 */
543
544 class QuadEdgeEffect : public GrGeometryProcessor {
545 public:
Make(const SkMatrix & localMatrix,bool usesLocalCoords)546 static sk_sp<GrGeometryProcessor> Make(const SkMatrix& localMatrix, bool usesLocalCoords) {
547 return sk_sp<GrGeometryProcessor>(new QuadEdgeEffect(localMatrix, usesLocalCoords));
548 }
549
~QuadEdgeEffect()550 ~QuadEdgeEffect() override {}
551
name() const552 const char* name() const override { return "QuadEdge"; }
553
554 class GLSLProcessor : public GrGLSLGeometryProcessor {
555 public:
GLSLProcessor()556 GLSLProcessor() {}
557
onEmitCode(EmitArgs & args,GrGPArgs * gpArgs)558 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
559 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
560 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
561 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
562 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
563
564 // emit attributes
565 varyingHandler->emitAttributes(qe);
566
567 GrGLSLVertToFrag v(kVec4f_GrSLType);
568 varyingHandler->addVarying("QuadEdge", &v);
569 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge->fName);
570
571 // Setup pass through color
572 varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor);
573
574 GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
575
576 // Setup position
577 this->setupPosition(vertBuilder, gpArgs, qe.fInPosition->fName);
578
579 // emit transforms
580 this->emitTransforms(vertBuilder,
581 varyingHandler,
582 uniformHandler,
583 gpArgs->fPositionVar,
584 qe.fInPosition->fName,
585 qe.fLocalMatrix,
586 args.fFPCoordTransformHandler);
587
588 fragBuilder->codeAppendf("float edgeAlpha;");
589
590 // keep the derivative instructions outside the conditional
591 fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
592 fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
593 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
594 // today we know z and w are in device space. We could use derivatives
595 fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
596 v.fsIn());
597 fragBuilder->codeAppendf ("} else {");
598 fragBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
599 " 2.0*%s.x*duvdy.x - duvdy.y);",
600 v.fsIn(), v.fsIn());
601 fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
602 v.fsIn());
603 fragBuilder->codeAppendf("edgeAlpha = "
604 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
605
606 fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
607 }
608
GenKey(const GrGeometryProcessor & gp,const GrShaderCaps &,GrProcessorKeyBuilder * b)609 static inline void GenKey(const GrGeometryProcessor& gp,
610 const GrShaderCaps&,
611 GrProcessorKeyBuilder* b) {
612 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
613 b->add32(SkToBool(qee.fUsesLocalCoords && qee.fLocalMatrix.hasPerspective()));
614 }
615
setData(const GrGLSLProgramDataManager & pdman,const GrPrimitiveProcessor & gp,FPCoordTransformIter && transformIter)616 void setData(const GrGLSLProgramDataManager& pdman,
617 const GrPrimitiveProcessor& gp,
618 FPCoordTransformIter&& transformIter) override {
619 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
620 this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter);
621 }
622
623 private:
624 typedef GrGLSLGeometryProcessor INHERITED;
625 };
626
getGLSLProcessorKey(const GrShaderCaps & caps,GrProcessorKeyBuilder * b) const627 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
628 GLSLProcessor::GenKey(*this, caps, b);
629 }
630
createGLSLInstance(const GrShaderCaps &) const631 GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override {
632 return new GLSLProcessor();
633 }
634
635 private:
QuadEdgeEffect(const SkMatrix & localMatrix,bool usesLocalCoords)636 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords)
637 : fLocalMatrix(localMatrix), fUsesLocalCoords(usesLocalCoords) {
638 this->initClassID<QuadEdgeEffect>();
639 fInPosition = &this->addVertexAttrib("inPosition", kVec2f_GrVertexAttribType);
640 fInColor = &this->addVertexAttrib("inColor", kVec4ub_GrVertexAttribType);
641 fInQuadEdge = &this->addVertexAttrib("inQuadEdge", kVec4f_GrVertexAttribType);
642 }
643
644 const Attribute* fInPosition;
645 const Attribute* fInQuadEdge;
646 const Attribute* fInColor;
647 SkMatrix fLocalMatrix;
648 bool fUsesLocalCoords;
649
650 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
651
652 typedef GrGeometryProcessor INHERITED;
653 };
654
655 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
656
657 #if GR_TEST_UTILS
TestCreate(GrProcessorTestData * d)658 sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
659 // Doesn't work without derivative instructions.
660 return d->caps()->shaderCaps()->shaderDerivativeSupport()
661 ? QuadEdgeEffect::Make(GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool())
662 : nullptr;
663 }
664 #endif
665
666 ///////////////////////////////////////////////////////////////////////////////
667
onCanDrawPath(const CanDrawPathArgs & args) const668 bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
669 return (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
670 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
671 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex());
672 }
673
674 // extract the result vertices and indices from the GrAAConvexTessellator
extract_lines_only_verts(const GrAAConvexTessellator & tess,void * vertices,size_t vertexStride,GrColor color,uint16_t * idxs,bool tweakAlphaForCoverage)675 static void extract_lines_only_verts(const GrAAConvexTessellator& tess,
676 void* vertices,
677 size_t vertexStride,
678 GrColor color,
679 uint16_t* idxs,
680 bool tweakAlphaForCoverage) {
681 intptr_t verts = reinterpret_cast<intptr_t>(vertices);
682
683 for (int i = 0; i < tess.numPts(); ++i) {
684 *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
685 }
686
687 // Make 'verts' point to the colors
688 verts += sizeof(SkPoint);
689 for (int i = 0; i < tess.numPts(); ++i) {
690 if (tweakAlphaForCoverage) {
691 SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
692 unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
693 GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
694 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
695 } else {
696 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
697 *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
698 tess.coverage(i);
699 }
700 }
701
702 for (int i = 0; i < tess.numIndices(); ++i) {
703 idxs[i] = tess.index(i);
704 }
705 }
706
make_lines_only_gp(bool tweakAlphaForCoverage,const SkMatrix & viewMatrix,bool usesLocalCoords)707 static sk_sp<GrGeometryProcessor> make_lines_only_gp(bool tweakAlphaForCoverage,
708 const SkMatrix& viewMatrix,
709 bool usesLocalCoords) {
710 using namespace GrDefaultGeoProcFactory;
711
712 Coverage::Type coverageType;
713 if (tweakAlphaForCoverage) {
714 coverageType = Coverage::kSolid_Type;
715 } else {
716 coverageType = Coverage::kAttribute_Type;
717 }
718 LocalCoords::Type localCoordsType =
719 usesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type;
720 return MakeForDeviceSpace(Color::kPremulGrColorAttribute_Type, coverageType, localCoordsType,
721 viewMatrix);
722 }
723
724 namespace {
725
726 class AAConvexPathOp final : public GrMeshDrawOp {
727 private:
728 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
729
730 public:
731 DEFINE_OP_CLASS_ID
Make(GrPaint && paint,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)732 static std::unique_ptr<GrDrawOp> Make(GrPaint&& paint, const SkMatrix& viewMatrix,
733 const SkPath& path,
734 const GrUserStencilSettings* stencilSettings) {
735 return Helper::FactoryHelper<AAConvexPathOp>(std::move(paint), viewMatrix, path,
736 stencilSettings);
737 }
738
AAConvexPathOp(const Helper::MakeArgs & helperArgs,GrColor color,const SkMatrix & viewMatrix,const SkPath & path,const GrUserStencilSettings * stencilSettings)739 AAConvexPathOp(const Helper::MakeArgs& helperArgs, GrColor color, const SkMatrix& viewMatrix,
740 const SkPath& path, const GrUserStencilSettings* stencilSettings)
741 : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) {
742 fPaths.emplace_back(PathData{viewMatrix, path, color});
743 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
744 fLinesOnly = SkPath::kLine_SegmentMask == path.getSegmentMasks();
745 }
746
name() const747 const char* name() const override { return "AAConvexPathOp"; }
748
dumpInfo() const749 SkString dumpInfo() const override {
750 SkString string;
751 string.appendf("Count: %d\n", fPaths.count());
752 string += fHelper.dumpInfo();
753 string += INHERITED::dumpInfo();
754 return string;
755 }
756
fixedFunctionFlags() const757 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
758
finalize(const GrCaps & caps,const GrAppliedClip * clip)759 RequiresDstTexture finalize(const GrCaps& caps, const GrAppliedClip* clip) override {
760 return fHelper.xpRequiresDstTexture(caps, clip, GrProcessorAnalysisCoverage::kSingleChannel,
761 &fPaths.back().fColor);
762 }
763
764 private:
prepareLinesOnlyDraws(Target * target) const765 void prepareLinesOnlyDraws(Target* target) const {
766 // Setup GrGeometryProcessor
767 sk_sp<GrGeometryProcessor> gp(make_lines_only_gp(fHelper.compatibleWithAlphaAsCoverage(),
768 fPaths.back().fViewMatrix,
769 fHelper.usesLocalCoords()));
770 if (!gp) {
771 SkDebugf("Could not create GrGeometryProcessor\n");
772 return;
773 }
774
775 size_t vertexStride = gp->getVertexStride();
776
777 SkASSERT(fHelper.compatibleWithAlphaAsCoverage()
778 ? vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr)
779 : vertexStride ==
780 sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
781
782 GrAAConvexTessellator tess;
783
784 int instanceCount = fPaths.count();
785 const GrPipeline* pipeline = fHelper.makePipeline(target);
786 for (int i = 0; i < instanceCount; i++) {
787 tess.rewind();
788
789 const PathData& args = fPaths[i];
790
791 if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
792 continue;
793 }
794
795 const GrBuffer* vertexBuffer;
796 int firstVertex;
797
798 void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
799 &firstVertex);
800 if (!verts) {
801 SkDebugf("Could not allocate vertices\n");
802 return;
803 }
804
805 const GrBuffer* indexBuffer;
806 int firstIndex;
807
808 uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
809 if (!idxs) {
810 SkDebugf("Could not allocate indices\n");
811 return;
812 }
813
814 extract_lines_only_verts(tess, verts, vertexStride, args.fColor, idxs,
815 fHelper.compatibleWithAlphaAsCoverage());
816
817 GrMesh mesh(GrPrimitiveType::kTriangles);
818 mesh.setIndexed(indexBuffer, tess.numIndices(), firstIndex, 0, tess.numPts() - 1);
819 mesh.setVertexData(vertexBuffer, firstVertex);
820 target->draw(gp.get(), pipeline, mesh);
821 }
822 }
823
onPrepareDraws(Target * target) const824 void onPrepareDraws(Target* target) const override {
825 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
826 if (fLinesOnly) {
827 this->prepareLinesOnlyDraws(target);
828 return;
829 }
830 #endif
831 const GrPipeline* pipeline = fHelper.makePipeline(target);
832 int instanceCount = fPaths.count();
833
834 SkMatrix invert;
835 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
836 SkDebugf("Could not invert viewmatrix\n");
837 return;
838 }
839
840 // Setup GrGeometryProcessor
841 sk_sp<GrGeometryProcessor> quadProcessor(
842 QuadEdgeEffect::Make(invert, fHelper.usesLocalCoords()));
843
844 // TODO generate all segments for all paths and use one vertex buffer
845 for (int i = 0; i < instanceCount; i++) {
846 const PathData& args = fPaths[i];
847
848 // We use the fact that SkPath::transform path does subdivision based on
849 // perspective. Otherwise, we apply the view matrix when copying to the
850 // segment representation.
851 const SkMatrix* viewMatrix = &args.fViewMatrix;
852
853 // We avoid initializing the path unless we have to
854 const SkPath* pathPtr = &args.fPath;
855 SkTLazy<SkPath> tmpPath;
856 if (viewMatrix->hasPerspective()) {
857 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
858 tmpPathPtr->setIsVolatile(true);
859 tmpPathPtr->transform(*viewMatrix);
860 viewMatrix = &SkMatrix::I();
861 pathPtr = tmpPathPtr;
862 }
863
864 int vertexCount;
865 int indexCount;
866 enum {
867 kPreallocSegmentCnt = 512 / sizeof(Segment),
868 kPreallocDrawCnt = 4,
869 };
870 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
871 SkPoint fanPt;
872
873 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
874 &indexCount)) {
875 continue;
876 }
877
878 const GrBuffer* vertexBuffer;
879 int firstVertex;
880
881 size_t vertexStride = quadProcessor->getVertexStride();
882 QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
883 vertexStride, vertexCount, &vertexBuffer, &firstVertex));
884
885 if (!verts) {
886 SkDebugf("Could not allocate vertices\n");
887 return;
888 }
889
890 const GrBuffer* indexBuffer;
891 int firstIndex;
892
893 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
894 if (!idxs) {
895 SkDebugf("Could not allocate indices\n");
896 return;
897 }
898
899 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
900 create_vertices(segments, fanPt, args.fColor, &draws, verts, idxs);
901
902 GrMesh mesh(GrPrimitiveType::kTriangles);
903
904 for (int j = 0; j < draws.count(); ++j) {
905 const Draw& draw = draws[j];
906 mesh.setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0, draw.fVertexCnt - 1);
907 mesh.setVertexData(vertexBuffer, firstVertex);
908 target->draw(quadProcessor.get(), pipeline, mesh);
909 firstIndex += draw.fIndexCnt;
910 firstVertex += draw.fVertexCnt;
911 }
912 }
913 }
914
onCombineIfPossible(GrOp * t,const GrCaps & caps)915 bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
916 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
917 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
918 return false;
919 }
920 if (fHelper.usesLocalCoords() &&
921 !fPaths[0].fViewMatrix.cheapEqualTo(that->fPaths[0].fViewMatrix)) {
922 return false;
923 }
924
925 if (fLinesOnly != that->fLinesOnly) {
926 return false;
927 }
928
929 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
930 this->joinBounds(*that);
931 return true;
932 }
933
934 struct PathData {
935 SkMatrix fViewMatrix;
936 SkPath fPath;
937 GrColor fColor;
938 };
939
940 Helper fHelper;
941 SkSTArray<1, PathData, true> fPaths;
942 bool fLinesOnly;
943
944 typedef GrMeshDrawOp INHERITED;
945 };
946
947 } // anonymous namespace
948
onDrawPath(const DrawPathArgs & args)949 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
950 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
951 "GrAAConvexPathRenderer::onDrawPath");
952 SkASSERT(GrFSAAType::kUnifiedMSAA != args.fRenderTargetContext->fsaaType());
953 SkASSERT(!args.fShape->isEmpty());
954
955 SkPath path;
956 args.fShape->asPath(&path);
957
958 std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(std::move(args.fPaint), *args.fViewMatrix,
959 path, args.fUserStencilSettings);
960 args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
961 return true;
962 }
963
964 ///////////////////////////////////////////////////////////////////////////////////////////////////
965
966 #if GR_TEST_UTILS
967
GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp)968 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
969 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
970 SkPath path = GrTest::TestPathConvex(random);
971 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
972 return AAConvexPathOp::Make(std::move(paint), viewMatrix, path, stencilSettings);
973 }
974
975 #endif
976