1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkAnalyticEdge.h"
9 #include "SkAntiRun.h"
10 #include "SkAutoMalloc.h"
11 #include "SkBlitter.h"
12 #include "SkEdge.h"
13 #include "SkEdgeBuilder.h"
14 #include "SkGeometry.h"
15 #include "SkPath.h"
16 #include "SkQuadClipper.h"
17 #include "SkRasterClip.h"
18 #include "SkRegion.h"
19 #include "SkScan.h"
20 #include "SkScanPriv.h"
21 #include "SkTSort.h"
22 #include "SkTemplates.h"
23 #include "SkTo.h"
24 #include "SkUTF.h"
25
26 #include <utility>
27
28 #if defined(SK_DISABLE_AAA)
AAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)29 void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
30 const SkIRect& clipBounds, bool forceRLE) {
31 SkDEBUGFAIL("AAA Disabled");
32 return;
33 }
34 #else
35 ///////////////////////////////////////////////////////////////////////////////
36
37 /*
38
39 The following is a high-level overview of our analytic anti-aliasing
40 algorithm. We consider a path as a collection of line segments, as
41 quadratic/cubic curves are converted to small line segments. Without loss of
42 generality, let's assume that the draw region is [0, W] x [0, H].
43
44 Our algorithm is based on horizontal scan lines (y = c_i) as the previous
45 sampling-based algorithm did. However, our algorithm uses non-equal-spaced
46 scan lines, while the previous method always uses equal-spaced scan lines,
47 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
48 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
49 16-supersampling AA algorithm.
50
51 Our algorithm contains scan lines y = c_i for c_i that is either:
52
53 1. an integer between [0, H]
54
55 2. the y value of a line segment endpoint
56
57 3. the y value of an intersection of two line segments
58
59 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
60 the coverage of this horizontal strip of our path on each pixel. This can be
61 done very efficiently because the strip of our path now only consists of
62 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
63 rectangles and triangles as special cases).
64
65 We now describe how the coverage of single pixel is computed against such a
66 trapezoid. That coverage is essentially the intersection area of a rectangle
67 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
68 could be complicated, as shown in the example region A below:
69
70 +-----------\----+
71 | \ C|
72 | \ |
73 \ \ |
74 |\ A \|
75 | \ \
76 | \ |
77 | B \ |
78 +----\-----------+
79
80 However, we don't have to compute the area of A directly. Instead, we can
81 compute the excluded area, which are B and C, quite easily, because they're
82 just triangles. In fact, we can prove that an excluded region (take B as an
83 example) is either itself a simple trapezoid (including rectangles, triangles,
84 and empty regions), or its opposite (the opposite of B is A + C) is a simple
85 trapezoid. In any case, we can compute its area efficiently.
86
87 In summary, our algorithm has a higher quality because it generates ground-
88 truth coverages analytically. It is also faster because it has much fewer
89 unnessasary horizontal scan lines. For example, given a triangle path, the
90 number of scan lines in our algorithm is only about 3 + H while the
91 16-supersampling algorithm has about 4H scan lines.
92
93 */
94
95 ///////////////////////////////////////////////////////////////////////////////
96
addAlpha(SkAlpha * alpha,SkAlpha delta)97 static inline void addAlpha(SkAlpha* alpha, SkAlpha delta) {
98 SkASSERT(*alpha + (int)delta <= 256);
99 *alpha = SkAlphaRuns::CatchOverflow(*alpha + (int)delta);
100 }
101
safelyAddAlpha(SkAlpha * alpha,SkAlpha delta)102 static inline void safelyAddAlpha(SkAlpha* alpha, SkAlpha delta) {
103 *alpha = SkTMin(0xFF, *alpha + (int)delta);
104 }
105
106 class AdditiveBlitter : public SkBlitter {
107 public:
~AdditiveBlitter()108 ~AdditiveBlitter() override {}
109
110 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
111
112 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
113 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
114 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
115
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])116 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
117 SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
118 }
119
blitV(int x,int y,int height,SkAlpha alpha)120 void blitV(int x, int y, int height, SkAlpha alpha) override {
121 SkDEBUGFAIL("Please call real blitter's blitV instead.");
122 }
123
blitH(int x,int y,int width)124 void blitH(int x, int y, int width) override {
125 SkDEBUGFAIL("Please call real blitter's blitH instead.");
126 }
127
blitRect(int x,int y,int width,int height)128 void blitRect(int x, int y, int width, int height) override {
129 SkDEBUGFAIL("Please call real blitter's blitRect instead.");
130 }
131
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)132 void blitAntiRect(int x, int y, int width, int height,
133 SkAlpha leftAlpha, SkAlpha rightAlpha) override {
134 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
135 }
136
137 virtual int getWidth() = 0;
138
139 // Flush the additive alpha cache if floor(y) and floor(nextY) is different
140 // (i.e., we'll start working on a new pixel row).
141 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
142 };
143
144 // We need this mask blitter because it significantly accelerates small path filling.
145 class MaskAdditiveBlitter : public AdditiveBlitter {
146 public:
147 MaskAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
148 bool isInverse);
~MaskAdditiveBlitter()149 ~MaskAdditiveBlitter() override {
150 fRealBlitter->blitMask(fMask, fClipRect);
151 }
152
153 // Most of the time, we still consider this mask blitter as the real blitter
154 // so we can accelerate blitRect and others. But sometimes we want to return
155 // the absolute real blitter (e.g., when we fall back to the old code path).
getRealBlitter(bool forceRealBlitter)156 SkBlitter* getRealBlitter(bool forceRealBlitter) override {
157 return forceRealBlitter ? fRealBlitter : this;
158 }
159
160 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
161 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
162
163 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
164 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
165 void blitAntiH(int x, int y, const SkAlpha alpha) override;
166 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
167 void blitV(int x, int y, int height, SkAlpha alpha) override;
168 void blitRect(int x, int y, int width, int height) override;
169 void blitAntiRect(int x, int y, int width, int height,
170 SkAlpha leftAlpha, SkAlpha rightAlpha) override;
171
172 // The flush is only needed for RLE (RunBasedAdditiveBlitter)
flush_if_y_changed(SkFixed y,SkFixed nextY)173 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
174
getWidth()175 int getWidth() override { return fClipRect.width(); }
176
canHandleRect(const SkIRect & bounds)177 static bool canHandleRect(const SkIRect& bounds) {
178 int width = bounds.width();
179 if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
180 return false;
181 }
182 int64_t rb = SkAlign4(width);
183 // use 64bits to detect overflow
184 int64_t storage = rb * bounds.height();
185
186 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
187 (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
188 }
189
190 // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
getRow(int y)191 inline uint8_t* getRow(int y) {
192 if (y != fY) {
193 fY = y;
194 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
195 }
196 return fRow;
197 }
198
199 private:
200 // so we don't try to do very wide things, where the RLE blitter would be faster
201 static const int kMAX_WIDTH = 32;
202 static const int kMAX_STORAGE = 1024;
203
204 SkBlitter* fRealBlitter;
205 SkMask fMask;
206 SkIRect fClipRect;
207 // we add 2 because we can write 1 extra byte at either end due to precision error
208 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
209
210 uint8_t* fRow;
211 int fY;
212 };
213
MaskAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)214 MaskAdditiveBlitter::MaskAdditiveBlitter(
215 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) {
216 SkASSERT(canHandleRect(ir));
217 SkASSERT(!isInverse);
218
219 fRealBlitter = realBlitter;
220
221 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
222 fMask.fBounds = ir;
223 fMask.fRowBytes = ir.width();
224 fMask.fFormat = SkMask::kA8_Format;
225
226 fY = ir.fTop - 1;
227 fRow = nullptr;
228
229 fClipRect = ir;
230 if (!fClipRect.intersect(clipBounds)) {
231 SkASSERT(0);
232 fClipRect.setEmpty();
233 }
234
235 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
236 }
237
blitAntiH(int x,int y,const SkAlpha antialias[],int len)238 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
239 SK_ABORT("Don't use this; directly add alphas to the mask.");
240 }
241
blitAntiH(int x,int y,const SkAlpha alpha)242 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
243 SkASSERT(x >= fMask.fBounds.fLeft -1);
244 addAlpha(&this->getRow(y)[x], alpha);
245 }
246
blitAntiH(int x,int y,int width,const SkAlpha alpha)247 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
248 SkASSERT(x >= fMask.fBounds.fLeft -1);
249 uint8_t* row = this->getRow(y);
250 for (int i = 0; i < width; ++i) {
251 addAlpha(&row[x + i], alpha);
252 }
253 }
254
blitV(int x,int y,int height,SkAlpha alpha)255 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
256 if (alpha == 0) {
257 return;
258 }
259 SkASSERT(x >= fMask.fBounds.fLeft -1);
260 // This must be called as if this is a real blitter.
261 // So we directly set alpha rather than adding it.
262 uint8_t* row = this->getRow(y);
263 for (int i = 0; i < height; ++i) {
264 row[x] = alpha;
265 row += fMask.fRowBytes;
266 }
267 }
268
blitRect(int x,int y,int width,int height)269 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
270 SkASSERT(x >= fMask.fBounds.fLeft -1);
271 // This must be called as if this is a real blitter.
272 // So we directly set alpha rather than adding it.
273 uint8_t* row = this->getRow(y);
274 for (int i = 0; i < height; ++i) {
275 memset(row + x, 0xFF, width);
276 row += fMask.fRowBytes;
277 }
278 }
279
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)280 void MaskAdditiveBlitter::blitAntiRect(int x, int y, int width, int height,
281 SkAlpha leftAlpha, SkAlpha rightAlpha) {
282 blitV(x, y, height, leftAlpha);
283 blitV(x + 1 + width, y, height, rightAlpha);
284 blitRect(x + 1, y, width, height);
285 }
286
287 class RunBasedAdditiveBlitter : public AdditiveBlitter {
288 public:
289 RunBasedAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
290 bool isInverse);
291 ~RunBasedAdditiveBlitter() override;
292
293 SkBlitter* getRealBlitter(bool forceRealBlitter) override;
294
295 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
296 void blitAntiH(int x, int y, const SkAlpha alpha) override;
297 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
298
299 int getWidth() override;
300
flush_if_y_changed(SkFixed y,SkFixed nextY)301 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
302 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
303 this->flush();
304 }
305 }
306
307 protected:
308 SkBlitter* fRealBlitter;
309
310 /// Current y coordinate
311 int fCurrY;
312 /// Widest row of region to be blitted
313 int fWidth;
314 /// Leftmost x coordinate in any row
315 int fLeft;
316 /// Initial y coordinate (top of bounds).
317 int fTop;
318
319 // The next three variables are used to track a circular buffer that
320 // contains the values used in SkAlphaRuns. These variables should only
321 // ever be updated in advanceRuns(), and fRuns should always point to
322 // a valid SkAlphaRuns...
323 int fRunsToBuffer;
324 void* fRunsBuffer;
325 int fCurrentRun;
326 SkAlphaRuns fRuns;
327
328 int fOffsetX;
329
check(int x,int width) const330 inline bool check(int x, int width) const {
331 #ifdef SK_DEBUG
332 if (x < 0 || x + width > fWidth) {
333 // SkDebugf("Ignore x = %d, width = %d\n", x, width);
334 }
335 #endif
336 return (x >= 0 && x + width <= fWidth);
337 }
338
339 // extra one to store the zero at the end
getRunsSz() const340 inline int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
341
342 // This function updates the fRuns variable to point to the next buffer space
343 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
344 // and resets fRuns to point to an empty scanline.
advanceRuns()345 inline void advanceRuns() {
346 const size_t kRunsSz = this->getRunsSz();
347 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
348 fRuns.fRuns = reinterpret_cast<int16_t*>(
349 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
350 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
351 fRuns.reset(fWidth);
352 }
353
354 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
snapAlpha(SkAlpha alpha)355 inline SkAlpha snapAlpha(SkAlpha alpha) {
356 return alpha > 247 ? 0xFF : alpha < 8 ? 0 : alpha;
357 }
358
flush()359 inline void flush() {
360 if (fCurrY >= fTop) {
361 SkASSERT(fCurrentRun < fRunsToBuffer);
362 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
363 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
364 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
365 }
366 if (!fRuns.empty()) {
367 // SkDEBUGCODE(fRuns.dump();)
368 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
369 this->advanceRuns();
370 fOffsetX = 0;
371 }
372 fCurrY = fTop - 1;
373 }
374 }
375
checkY(int y)376 inline void checkY(int y) {
377 if (y != fCurrY) {
378 this->flush();
379 fCurrY = y;
380 }
381 }
382 };
383
RunBasedAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)384 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(
385 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) {
386 fRealBlitter = realBlitter;
387
388 SkIRect sectBounds;
389 if (isInverse) {
390 // We use the clip bounds instead of the ir, since we may be asked to
391 //draw outside of the rect when we're a inverse filltype
392 sectBounds = clipBounds;
393 } else {
394 if (!sectBounds.intersect(ir, clipBounds)) {
395 sectBounds.setEmpty();
396 }
397 }
398
399 const int left = sectBounds.left();
400 const int right = sectBounds.right();
401
402 fLeft = left;
403 fWidth = right - left;
404 fTop = sectBounds.top();
405 fCurrY = fTop - 1;
406
407 fRunsToBuffer = realBlitter->requestRowsPreserved();
408 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
409 fCurrentRun = -1;
410
411 this->advanceRuns();
412
413 fOffsetX = 0;
414 }
415
~RunBasedAdditiveBlitter()416 RunBasedAdditiveBlitter::~RunBasedAdditiveBlitter() {
417 this->flush();
418 }
419
getRealBlitter(bool forceRealBlitter)420 SkBlitter* RunBasedAdditiveBlitter::getRealBlitter(bool forceRealBlitter) {
421 return fRealBlitter;
422 }
423
blitAntiH(int x,int y,const SkAlpha antialias[],int len)424 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
425 checkY(y);
426 x -= fLeft;
427
428 if (x < 0) {
429 len += x;
430 antialias -= x;
431 x = 0;
432 }
433 len = SkTMin(len, fWidth - x);
434 SkASSERT(check(x, len));
435
436 if (x < fOffsetX) {
437 fOffsetX = 0;
438 }
439
440 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
441 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
442 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
443 fRuns.fRuns[x + i + j] = 1;
444 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
445 }
446 fRuns.fRuns[x + i] = 1;
447 }
448 for (int i = 0; i < len; ++i) {
449 addAlpha(&fRuns.fAlpha[x + i], antialias[i]);
450 }
451 }
blitAntiH(int x,int y,const SkAlpha alpha)452 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
453 checkY(y);
454 x -= fLeft;
455
456 if (x < fOffsetX) {
457 fOffsetX = 0;
458 }
459
460 if (this->check(x, 1)) {
461 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
462 }
463 }
464
blitAntiH(int x,int y,int width,const SkAlpha alpha)465 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
466 checkY(y);
467 x -= fLeft;
468
469 if (x < fOffsetX) {
470 fOffsetX = 0;
471 }
472
473 if (this->check(x, width)) {
474 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
475 }
476 }
477
getWidth()478 int RunBasedAdditiveBlitter::getWidth() { return fWidth; }
479
480 // This exists specifically for concave path filling.
481 // In those cases, we can easily accumulate alpha greater than 0xFF.
482 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
483 public:
SafeRLEAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)484 SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
485 bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
486
487 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
488 void blitAntiH(int x, int y, const SkAlpha alpha) override;
489 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
490 };
491
blitAntiH(int x,int y,const SkAlpha antialias[],int len)492 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
493 checkY(y);
494 x -= fLeft;
495
496 if (x < 0) {
497 len += x;
498 antialias -= x;
499 x = 0;
500 }
501 len = SkTMin(len, fWidth - x);
502 SkASSERT(check(x, len));
503
504 if (x < fOffsetX) {
505 fOffsetX = 0;
506 }
507
508 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
509 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
510 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
511 fRuns.fRuns[x + i + j] = 1;
512 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
513 }
514 fRuns.fRuns[x + i] = 1;
515 }
516 for (int i = 0; i < len; ++i) {
517 safelyAddAlpha(&fRuns.fAlpha[x + i], antialias[i]);
518 }
519 }
520
blitAntiH(int x,int y,const SkAlpha alpha)521 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
522 checkY(y);
523 x -= fLeft;
524
525 if (x < fOffsetX) {
526 fOffsetX = 0;
527 }
528
529 if (check(x, 1)) {
530 // Break the run
531 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
532 safelyAddAlpha(&fRuns.fAlpha[x], alpha);
533 }
534 }
535
blitAntiH(int x,int y,int width,const SkAlpha alpha)536 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
537 checkY(y);
538 x -= fLeft;
539
540 if (x < fOffsetX) {
541 fOffsetX = 0;
542 }
543
544 if (check(x, width)) {
545 // Break the run
546 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
547 for(int i = x; i < x + width; i += fRuns.fRuns[i]) {
548 safelyAddAlpha(&fRuns.fAlpha[i], alpha);
549 }
550 }
551 }
552
553 ///////////////////////////////////////////////////////////////////////////////
554
555 // Return the alpha of a trapezoid whose height is 1
trapezoidToAlpha(SkFixed l1,SkFixed l2)556 static inline SkAlpha trapezoidToAlpha(SkFixed l1, SkFixed l2) {
557 SkASSERT(l1 >= 0 && l2 >= 0);
558 return (l1 + l2) >> 9;
559 }
560
561 // The alpha of right-triangle (a, a*b), in 16 bits
partialTriangleToAlpha16(SkFixed a,SkFixed b)562 static inline SkFixed partialTriangleToAlpha16(SkFixed a, SkFixed b) {
563 SkASSERT(a <= SK_Fixed1);
564 // SkFixedMul(SkFixedMul(a, a), b) >> 1
565 // return ((((a >> 8) * (a >> 8)) >> 8) * (b >> 8)) >> 1;
566 return (a >> 11) * (a >> 11) * (b >> 11);
567 }
568
569 // The alpha of right-triangle (a, a*b)
partialTriangleToAlpha(SkFixed a,SkFixed b)570 static inline SkAlpha partialTriangleToAlpha(SkFixed a, SkFixed b) {
571 return partialTriangleToAlpha16(a, b) >> 8;
572 }
573
getPartialAlpha(SkAlpha alpha,SkFixed partialHeight)574 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkFixed partialHeight) {
575 return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
576 }
577
getPartialAlpha(SkAlpha alpha,SkAlpha fullAlpha)578 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkAlpha fullAlpha) {
579 return ((uint16_t)alpha * fullAlpha) >> 8;
580 }
581
582 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
583 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
584 // This is rarely the problem so we'll only use this for blitting rectangles.
f2a(SkFixed f)585 static inline SkAlpha f2a(SkFixed f) {
586 SkASSERT(f <= SK_Fixed1);
587 return getPartialAlpha(0xFF, f);
588 }
589
590 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
591 // approximate (very coarsely) the x coordinate of the intersection.
approximateIntersection(SkFixed l1,SkFixed r1,SkFixed l2,SkFixed r2)592 static inline SkFixed approximateIntersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
593 if (l1 > r1) { using std::swap; swap(l1, r1); }
594 if (l2 > r2) { using std::swap; swap(l2, r2); }
595 return (SkTMax(l1, l2) + SkTMin(r1, r2)) >> 1;
596 }
597
598 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
computeAlphaAboveLine(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)599 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
600 SkFixed dY, SkAlpha fullAlpha) {
601 SkASSERT(l <= r);
602 SkASSERT(l >> 16 == 0);
603 int R = SkFixedCeilToInt(r);
604 if (R == 0) {
605 return;
606 } else if (R == 1) {
607 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha);
608 } else {
609 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
610 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
611 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
612 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
613 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
614 for (int i = 1; i < R - 1; ++i) {
615 alphas[i] = alpha16 >> 8;
616 alpha16 += dY;
617 }
618 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY);
619 }
620 }
621
622 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
computeAlphaBelowLine(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)623 static inline void computeAlphaBelowLine(
624 SkAlpha* alphas, SkFixed l, SkFixed r, SkFixed dY, SkAlpha fullAlpha) {
625 SkASSERT(l <= r);
626 SkASSERT(l >> 16 == 0);
627 int R = SkFixedCeilToInt(r);
628 if (R == 0) {
629 return;
630 } else if (R == 1) {
631 alphas[0] = getPartialAlpha(trapezoidToAlpha(l, r), fullAlpha);
632 } else {
633 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
634 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
635 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
636 alphas[R-1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
637 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
638 for (int i = R - 2; i > 0; i--) {
639 alphas[i] = alpha16 >> 8;
640 alpha16 += dY;
641 }
642 alphas[0] = fullAlpha - partialTriangleToAlpha(first, dY);
643 }
644 }
645
646 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
blit_single_alpha(AdditiveBlitter * blitter,int y,int x,SkAlpha alpha,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)647 static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
648 SkAlpha alpha, SkAlpha fullAlpha, SkAlpha* maskRow,
649 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
650 if (isUsingMask) {
651 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
652 maskRow[x] = alpha;
653 } else if (needSafeCheck) {
654 safelyAddAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
655 } else {
656 addAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
657 }
658 } else {
659 if (fullAlpha == 0xFF && !noRealBlitter) {
660 blitter->getRealBlitter()->blitV(x, y, 1, alpha);
661 } else {
662 blitter->blitAntiH(x, y, getPartialAlpha(alpha, fullAlpha));
663 }
664 }
665 }
666
blit_two_alphas(AdditiveBlitter * blitter,int y,int x,SkAlpha a1,SkAlpha a2,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)667 static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
668 SkAlpha a1, SkAlpha a2, SkAlpha fullAlpha, SkAlpha* maskRow,
669 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
670 if (isUsingMask) {
671 if (needSafeCheck) {
672 safelyAddAlpha(&maskRow[x], a1);
673 safelyAddAlpha(&maskRow[x + 1], a2);
674 } else {
675 addAlpha(&maskRow[x], a1);
676 addAlpha(&maskRow[x + 1], a2);
677 }
678 } else {
679 if (fullAlpha == 0xFF && !noRealBlitter) {
680 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
681 } else {
682 blitter->blitAntiH(x, y, a1);
683 blitter->blitAntiH(x + 1, y, a2);
684 }
685 }
686 }
687
688 // It's important that this is inline. Otherwise it'll be much slower.
blit_full_alpha(AdditiveBlitter * blitter,int y,int x,int len,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)689 static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, int x, int len,
690 SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask,
691 bool noRealBlitter, bool needSafeCheck) {
692 if (isUsingMask) {
693 for (int i = 0; i < len; ++i) {
694 if (needSafeCheck) {
695 safelyAddAlpha(&maskRow[x + i], fullAlpha);
696 } else {
697 addAlpha(&maskRow[x + i], fullAlpha);
698 }
699 }
700 } else {
701 if (fullAlpha == 0xFF && !noRealBlitter) {
702 blitter->getRealBlitter()->blitH(x, y, len);
703 } else {
704 blitter->blitAntiH(x, y, len, fullAlpha);
705 }
706 }
707 }
708
blit_aaa_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)709 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
710 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
711 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, SkAlpha* maskRow,
712 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
713 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
714 int len = R - L;
715
716 if (len == 1) {
717 SkAlpha alpha = trapezoidToAlpha(ur - ul, lr - ll);
718 blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask, noRealBlitter,
719 needSafeCheck);
720 return;
721 }
722
723 // SkDebugf("y = %d, len = %d, ul = %f, ur = %f, ll = %f, lr = %f\n", y, len,
724 // SkFixedToFloat(ul), SkFixedToFloat(ur), SkFixedToFloat(ll), SkFixedToFloat(lr));
725
726 const int kQuickLen = 31;
727 // This is faster than SkAutoSMalloc<1024>
728 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
729 SkAlpha* alphas;
730
731 if (len <= kQuickLen) {
732 alphas = (SkAlpha*)quickMemory;
733 } else {
734 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
735 }
736
737 SkAlpha* tempAlphas = alphas + len + 1;
738 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
739
740 for (int i = 0; i < len; ++i) {
741 runs[i] = 1;
742 alphas[i] = fullAlpha;
743 }
744 runs[len] = 0;
745
746 int uL = SkFixedFloorToInt(ul);
747 int lL = SkFixedCeilToInt(ll);
748 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
749 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
750 SkFixed second = ll - ul - first;
751 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, lDY);
752 SkAlpha a2 = partialTriangleToAlpha(second, lDY);
753 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
754 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
755 } else {
756 computeAlphaBelowLine(tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL),
757 lDY, fullAlpha);
758 for (int i = uL; i < lL; ++i) {
759 if (alphas[i - L] > tempAlphas[i - L]) {
760 alphas[i - L] -= tempAlphas[i - L];
761 } else {
762 alphas[i - L] = 0;
763 }
764 }
765 }
766
767 int uR = SkFixedFloorToInt(ur);
768 int lR = SkFixedCeilToInt(lr);
769 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
770 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
771 SkFixed second = lr - ur - first;
772 SkAlpha a1 = partialTriangleToAlpha(first, rDY);
773 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, rDY);
774 alphas[len-2] = alphas[len-2] > a1 ? alphas[len-2] - a1 : 0;
775 alphas[len-1] = alphas[len-1] > a2 ? alphas[len-1] - a2 : 0;
776 } else {
777 computeAlphaAboveLine(tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR),
778 rDY, fullAlpha);
779 for (int i = uR; i < lR; ++i) {
780 if (alphas[i - L] > tempAlphas[i - L]) {
781 alphas[i - L] -= tempAlphas[i - L];
782 } else {
783 alphas[i - L] = 0;
784 }
785 }
786 }
787
788 if (isUsingMask) {
789 for (int i = 0; i < len; ++i) {
790 if (needSafeCheck) {
791 safelyAddAlpha(&maskRow[L + i], alphas[i]);
792 } else {
793 addAlpha(&maskRow[L + i], alphas[i]);
794 }
795 }
796 } else {
797 if (fullAlpha == 0xFF && !noRealBlitter) {
798 // Real blitter is faster than RunBasedAdditiveBlitter
799 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
800 } else {
801 blitter->blitAntiH(L, y, alphas, len);
802 }
803 }
804
805 if (len > kQuickLen) {
806 delete [] alphas;
807 }
808 }
809
blit_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter=false,bool needSafeCheck=false)810 static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
811 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
812 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha,
813 SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter = false,
814 bool needSafeCheck = false) {
815 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
816
817 if (ul > ur) {
818 #ifdef SK_DEBUG
819 // SkDebugf("ul = %f > ur = %f!\n", SkFixedToFloat(ul), SkFixedToFloat(ur));
820 #endif
821 return;
822 }
823
824 // Edge crosses. Approximate it. This should only happend due to precision limit,
825 // so the approximation could be very coarse.
826 if (ll > lr) {
827 #ifdef SK_DEBUG
828 // SkDebugf("approximate intersection: %d %f %f\n", y,
829 // SkFixedToFloat(ll), SkFixedToFloat(lr));
830 #endif
831 ll = lr = approximateIntersection(ul, ll, ur, lr);
832 }
833
834 if (ul == ur && ll == lr) {
835 return; // empty trapzoid
836 }
837
838 // We're going to use the left line ul-ll and the rite line ur-lr
839 // to exclude the area that's not covered by the path.
840 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
841 // so we'll do that for simplicity.
842 if (ul > ll) { using std::swap; swap(ul, ll); }
843 if (ur > lr) { using std::swap; swap(ur, lr); }
844
845 SkFixed joinLeft = SkFixedCeilToFixed(ll);
846 SkFixed joinRite = SkFixedFloorToFixed(ur);
847 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
848 if (ul < joinLeft) {
849 int len = SkFixedCeilToInt(joinLeft - ul);
850 if (len == 1) {
851 SkAlpha alpha = trapezoidToAlpha(joinLeft - ul, joinLeft - ll);
852 blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask,
853 noRealBlitter, needSafeCheck);
854 } else if (len == 2) {
855 SkFixed first = joinLeft - SK_Fixed1 - ul;
856 SkFixed second = ll - ul - first;
857 SkAlpha a1 = partialTriangleToAlpha(first, lDY);
858 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, lDY);
859 blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask,
860 noRealBlitter, needSafeCheck);
861 } else {
862 blit_aaa_trapezoid_row(blitter, y, ul, joinLeft, ll, joinLeft, lDY, SK_MaxS32,
863 fullAlpha, maskRow, isUsingMask, noRealBlitter,
864 needSafeCheck);
865 }
866 }
867 // SkAAClip requires that we blit from left to right.
868 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
869 if (joinLeft < joinRite) {
870 blit_full_alpha(blitter, y, SkFixedFloorToInt(joinLeft),
871 SkFixedFloorToInt(joinRite - joinLeft),
872 fullAlpha, maskRow, isUsingMask, noRealBlitter, needSafeCheck);
873 }
874 if (lr > joinRite) {
875 int len = SkFixedCeilToInt(lr - joinRite);
876 if (len == 1) {
877 SkAlpha alpha = trapezoidToAlpha(ur - joinRite, lr - joinRite);
878 blit_single_alpha(blitter, y, joinRite >> 16, alpha, fullAlpha, maskRow,
879 isUsingMask, noRealBlitter, needSafeCheck);
880 } else if (len == 2) {
881 SkFixed first = joinRite + SK_Fixed1 - ur;
882 SkFixed second = lr - ur - first;
883 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, rDY);
884 SkAlpha a2 = partialTriangleToAlpha(second, rDY);
885 blit_two_alphas(blitter, y, joinRite >> 16, a1, a2, fullAlpha, maskRow,
886 isUsingMask, noRealBlitter, needSafeCheck);
887 } else {
888 blit_aaa_trapezoid_row(blitter, y, joinRite, ur, joinRite, lr, SK_MaxS32, rDY,
889 fullAlpha, maskRow, isUsingMask, noRealBlitter,
890 needSafeCheck);
891 }
892 }
893 } else {
894 blit_aaa_trapezoid_row(blitter, y, ul, ur, ll, lr, lDY, rDY, fullAlpha, maskRow,
895 isUsingMask, noRealBlitter, needSafeCheck);
896 }
897 }
898
899 ///////////////////////////////////////////////////////////////////////////////
900
operator <(const SkAnalyticEdge & a,const SkAnalyticEdge & b)901 static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
902 int valuea = a.fUpperY;
903 int valueb = b.fUpperY;
904
905 if (valuea == valueb) {
906 valuea = a.fX;
907 valueb = b.fX;
908 }
909
910 if (valuea == valueb) {
911 valuea = a.fDX;
912 valueb = b.fDX;
913 }
914
915 return valuea < valueb;
916 }
917
sort_edges(SkAnalyticEdge * list[],int count,SkAnalyticEdge ** last)918 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
919 SkTQSort(list, list + count - 1);
920
921 // now make the edges linked in sorted order
922 for (int i = 1; i < count; ++i) {
923 list[i - 1]->fNext = list[i];
924 list[i]->fPrev = list[i - 1];
925 }
926
927 *last = list[count - 1];
928 return list[0];
929 }
930
931 #ifdef SK_DEBUG
validate_sort(const SkAnalyticEdge * edge)932 static void validate_sort(const SkAnalyticEdge* edge) {
933 SkFixed y = SkIntToFixed(-32768);
934
935 while (edge->fUpperY != SK_MaxS32) {
936 edge->validate();
937 SkASSERT(y <= edge->fUpperY);
938
939 y = edge->fUpperY;
940 edge = (SkAnalyticEdge*)edge->fNext;
941 }
942 }
943 #else
944 #define validate_sort(edge)
945 #endif
946
947 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
948 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
949 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
isSmoothEnough(SkAnalyticEdge * thisEdge,SkAnalyticEdge * nextEdge,int stop_y)950 static inline bool isSmoothEnough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
951 if (thisEdge->fCurveCount < 0) {
952 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
953 int ddshift = cEdge.fCurveShift;
954 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
955 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
956 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
957 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
958 } else if (thisEdge->fCurveCount > 0) {
959 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
960 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
961 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
962 // current Dy is (fQDy - fQDDy) >> shift
963 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift
964 >= SK_Fixed1;
965 }
966 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
967 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
968 }
969
970 // Check if the leftE and riteE are changing smoothly in terms of fDX.
971 // If yes, we can later skip the fractional y and directly jump to integer y.
isSmoothEnough(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkAnalyticEdge * currE,int stop_y)972 static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
973 SkAnalyticEdge* currE, int stop_y) {
974 if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
975 return false; // We're at the end so we won't skip anything
976 }
977 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
978 return isSmoothEnough(leftE, currE, stop_y); // Only leftE is changing
979 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
980 return isSmoothEnough(riteE, currE, stop_y); // Only riteE is changing
981 }
982
983 // Now both edges are changing, find the second next edge
984 SkAnalyticEdge* nextCurrE = currE->fNext;
985 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
986 return false;
987 }
988 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
989 if (nextCurrE->fUpperX < currE->fUpperX) {
990 using std::swap;
991 swap(currE, nextCurrE);
992 }
993 return isSmoothEnough(leftE, currE, stop_y) && isSmoothEnough(riteE, nextCurrE, stop_y);
994 }
995
aaa_walk_convex_edges(SkAnalyticEdge * prevHead,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftBound,SkFixed riteBound,bool isUsingMask)996 static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
997 AdditiveBlitter* blitter, int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
998 bool isUsingMask) {
999 validate_sort((SkAnalyticEdge*)prevHead->fNext);
1000
1001 SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext;
1002 SkAnalyticEdge* riteE = (SkAnalyticEdge*) leftE->fNext;
1003 SkAnalyticEdge* currE = (SkAnalyticEdge*) riteE->fNext;
1004
1005 SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY);
1006
1007 #ifdef SK_DEBUG
1008 int frac_y_cnt = 0;
1009 int total_y_cnt = 0;
1010 #endif
1011
1012 for (;;) {
1013 // We have to check fLowerY first because some edges might be alone (e.g., there's only
1014 // a left edge but no right edge in a given y scan line) due to precision limit.
1015 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1016 if (!leftE->update(y)) {
1017 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1018 goto END_WALK;
1019 }
1020 leftE = currE;
1021 currE = (SkAnalyticEdge*)currE->fNext;
1022 }
1023 }
1024 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1025 if (!riteE->update(y)) {
1026 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1027 goto END_WALK;
1028 }
1029 riteE = currE;
1030 currE = (SkAnalyticEdge*)currE->fNext;
1031 }
1032 }
1033
1034 SkASSERT(leftE);
1035 SkASSERT(riteE);
1036
1037 // check our bottom clip
1038 if (SkFixedFloorToInt(y) >= stop_y) {
1039 break;
1040 }
1041
1042 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
1043 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
1044
1045 leftE->goY(y);
1046 riteE->goY(y);
1047
1048 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX &&
1049 leftE->fDX > riteE->fDX)) {
1050 using std::swap;
1051 swap(leftE, riteE);
1052 }
1053
1054 SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY);
1055 if (isSmoothEnough(leftE, riteE, currE, stop_y)) {
1056 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
1057 }
1058 local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y));
1059
1060 SkFixed left = SkTMax(leftBound, leftE->fX);
1061 SkFixed dLeft = leftE->fDX;
1062 SkFixed rite = SkTMin(riteBound, riteE->fX);
1063 SkFixed dRite = riteE->fDX;
1064 if (0 == (dLeft | dRite)) {
1065 int fullLeft = SkFixedCeilToInt(left);
1066 int fullRite = SkFixedFloorToInt(rite);
1067 SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
1068 SkFixed partialRite = rite - SkIntToFixed(fullRite);
1069 int fullTop = SkFixedCeilToInt(y);
1070 int fullBot = SkFixedFloorToInt(local_bot_fixed);
1071 SkFixed partialTop = SkIntToFixed(fullTop) - y;
1072 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
1073 if (fullTop > fullBot) { // The rectangle is within one pixel height...
1074 partialTop -= (SK_Fixed1 - partialBot);
1075 partialBot = 0;
1076 }
1077
1078 if (fullRite >= fullLeft) {
1079 if (partialTop > 0) { // blit first partial row
1080 if (partialLeft > 0) {
1081 blitter->blitAntiH(fullLeft - 1, fullTop - 1,
1082 f2a(SkFixedMul(partialTop, partialLeft)));
1083 }
1084 blitter->blitAntiH(fullLeft, fullTop - 1, fullRite - fullLeft,
1085 f2a(partialTop));
1086 if (partialRite > 0) {
1087 blitter->blitAntiH(fullRite, fullTop - 1,
1088 f2a(SkFixedMul(partialTop, partialRite)));
1089 }
1090 blitter->flush_if_y_changed(y, y + partialTop);
1091 }
1092
1093 // Blit all full-height rows from fullTop to fullBot
1094 if (fullBot > fullTop &&
1095 // SkAAClip cannot handle the empty rect so check the non-emptiness here
1096 // (bug chromium:662800)
1097 (fullRite > fullLeft || f2a(partialLeft) > 0 || f2a(partialRite) > 0)) {
1098 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1, fullTop,
1099 fullRite - fullLeft, fullBot - fullTop,
1100 f2a(partialLeft), f2a(partialRite));
1101 }
1102
1103 if (partialBot > 0) { // blit last partial row
1104 if (partialLeft > 0) {
1105 blitter->blitAntiH(fullLeft - 1, fullBot,
1106 f2a(SkFixedMul(partialBot, partialLeft)));
1107 }
1108 blitter->blitAntiH(fullLeft, fullBot, fullRite - fullLeft, f2a(partialBot));
1109 if (partialRite > 0) {
1110 blitter->blitAntiH(fullRite, fullBot,
1111 f2a(SkFixedMul(partialBot, partialRite)));
1112 }
1113 }
1114 } else { // left and rite are within the same pixel
1115 if (partialTop > 0) {
1116 blitter->blitAntiH(fullLeft - 1, fullTop - 1, 1,
1117 f2a(SkFixedMul(partialTop, rite - left)));
1118 blitter->flush_if_y_changed(y, y + partialTop);
1119 }
1120 if (fullBot > fullTop) {
1121 blitter->getRealBlitter()->blitV(fullLeft - 1, fullTop, fullBot - fullTop,
1122 f2a(rite - left));
1123 }
1124 if (partialBot > 0) {
1125 blitter->blitAntiH(fullLeft - 1, fullBot, 1,
1126 f2a(SkFixedMul(partialBot, rite - left)));
1127 }
1128 }
1129
1130 y = local_bot_fixed;
1131 } else {
1132 // The following constant are used to snap X
1133 // We snap X mainly for speedup (no tiny triangle) and
1134 // avoiding edge cases caused by precision errors
1135 const SkFixed kSnapDigit = SK_Fixed1 >> 4;
1136 const SkFixed kSnapHalf = kSnapDigit >> 1;
1137 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
1138 left += kSnapHalf; rite += kSnapHalf; // For fast rounding
1139
1140 // Number of blit_trapezoid_row calls we'll have
1141 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
1142 #ifdef SK_DEBUG
1143 total_y_cnt += count;
1144 frac_y_cnt += ((int)(y & 0xFFFF0000) != y);
1145 if ((int)(y & 0xFFFF0000) != y) {
1146 // SkDebugf("frac_y = %f\n", SkFixedToFloat(y));
1147 }
1148 #endif
1149
1150 // If we're using mask blitter, we advance the mask row in this function
1151 // to save some "if" condition checks.
1152 SkAlpha* maskRow = nullptr;
1153 if (isUsingMask) {
1154 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1155 }
1156
1157 // Instead of writing one loop that handles both partial-row blit_trapezoid_row
1158 // and full-row trapezoid_row together, we use the following 3-stage flow to
1159 // handle partial-row blit and full-row blit separately. It will save us much time
1160 // on changing y, left, and rite.
1161 if (count > 1) {
1162 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
1163 count--;
1164 SkFixed nextY = SkFixedCeilToFixed(y + 1);
1165 SkFixed dY = nextY - y;
1166 SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
1167 SkFixed nextRite = rite + SkFixedMul(dRite, dY);
1168 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1169 (nextLeft & kSnapMask) >= leftBound &&
1170 (nextRite & kSnapMask) <= riteBound);
1171 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
1172 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY,
1173 getPartialAlpha(0xFF, dY), maskRow, isUsingMask);
1174 blitter->flush_if_y_changed(y, nextY);
1175 left = nextLeft; rite = nextRite; y = nextY;
1176 }
1177
1178 while (count > 1) { // Full rows in the middle
1179 count--;
1180 if (isUsingMask) {
1181 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1182 }
1183 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
1184 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1185 (nextLeft & kSnapMask) >= leftBound &&
1186 (nextRite & kSnapMask) <= riteBound);
1187 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
1188 nextLeft & kSnapMask, nextRite & kSnapMask,
1189 leftE->fDY, riteE->fDY, 0xFF, maskRow, isUsingMask);
1190 blitter->flush_if_y_changed(y, nextY);
1191 left = nextLeft; rite = nextRite; y = nextY;
1192 }
1193 }
1194
1195 if (isUsingMask) {
1196 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1197 }
1198
1199 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
1200 SkASSERT(dY <= SK_Fixed1);
1201 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
1202 // Take them back into the bound here.
1203 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
1204 SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
1205 SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
1206 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1207 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
1208 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
1209 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY,
1210 getPartialAlpha(0xFF, dY), maskRow, isUsingMask);
1211 blitter->flush_if_y_changed(y, local_bot_fixed);
1212 left = nextLeft; rite = nextRite; y = local_bot_fixed;
1213 left -= kSnapHalf; rite -= kSnapHalf;
1214 }
1215
1216 leftE->fX = left;
1217 riteE->fX = rite;
1218 leftE->fY = riteE->fY = y;
1219 }
1220
1221 END_WALK:
1222 ;
1223 #ifdef SK_DEBUG
1224 // SkDebugf("frac_y_cnt = %d, total_y_cnt = %d\n", frac_y_cnt, total_y_cnt);
1225 #endif
1226 }
1227
1228 ///////////////////////////////////////////////////////////////////////////////
1229
updateNextNextY(SkFixed y,SkFixed nextY,SkFixed * nextNextY)1230 static inline void updateNextNextY(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
1231 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
1232 }
1233
checkIntersection(const SkAnalyticEdge * edge,SkFixed nextY,SkFixed * nextNextY)1234 static inline void checkIntersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY)
1235 {
1236 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
1237 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1238 }
1239 }
1240
insert_new_edges(SkAnalyticEdge * newEdge,SkFixed y,SkFixed * nextNextY)1241 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
1242 if (newEdge->fUpperY > y) {
1243 updateNextNextY(newEdge->fUpperY, y, nextNextY);
1244 return;
1245 }
1246 SkAnalyticEdge* prev = newEdge->fPrev;
1247 if (prev->fX <= newEdge->fX) {
1248 while (newEdge->fUpperY <= y) {
1249 checkIntersection(newEdge, y, nextNextY);
1250 updateNextNextY(newEdge->fLowerY, y, nextNextY);
1251 newEdge = newEdge->fNext;
1252 }
1253 updateNextNextY(newEdge->fUpperY, y, nextNextY);
1254 return;
1255 }
1256 // find first x pos to insert
1257 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
1258 //insert the lot, fixing up the links as we go
1259 do {
1260 SkAnalyticEdge* next = newEdge->fNext;
1261 do {
1262 if (start->fNext == newEdge) {
1263 goto nextEdge;
1264 }
1265 SkAnalyticEdge* after = start->fNext;
1266 if (after->fX >= newEdge->fX) {
1267 break;
1268 }
1269 SkASSERT(start != after);
1270 start = after;
1271 } while (true);
1272 remove_edge(newEdge);
1273 insert_edge_after(newEdge, start);
1274 nextEdge:
1275 checkIntersection(newEdge, y, nextNextY);
1276 updateNextNextY(newEdge->fLowerY, y, nextNextY);
1277 start = newEdge;
1278 newEdge = next;
1279 } while (newEdge->fUpperY <= y);
1280 updateNextNextY(newEdge->fUpperY, y, nextNextY);
1281 }
1282
validate_edges_for_y(const SkAnalyticEdge * edge,SkFixed y)1283 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
1284 #ifdef SK_DEBUG
1285 while (edge->fUpperY <= y) {
1286 SkASSERT(edge->fPrev && edge->fNext);
1287 SkASSERT(edge->fPrev->fNext == edge);
1288 SkASSERT(edge->fNext->fPrev == edge);
1289 SkASSERT(edge->fUpperY <= edge->fLowerY);
1290 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
1291 edge = edge->fNext;
1292 }
1293 #endif
1294 }
1295
1296 // Return true if prev->fX, next->fX are too close in the current pixel row.
edges_too_close(SkAnalyticEdge * prev,SkAnalyticEdge * next,SkFixed lowerY)1297 static inline bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
1298 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
1299 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
1300 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
1301 // edges prev and next are within one pixel.
1302 constexpr SkFixed SLACK = SK_Fixed1;
1303
1304 // Note that even if the following test failed, the edges might still be very close to each
1305 // other at some point within the current pixel row because of prev->fDX and next->fDX.
1306 // However, to handle that case, we have to sacrafice more performance.
1307 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
1308 // so I'll ignore fDX for performance tradeoff.
1309 return next && prev && next->fUpperY < lowerY && prev->fX + SLACK >=
1310 next->fX - SkAbs32(next->fDX);
1311 // The following is more accurate but also slower.
1312 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
1313 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
1314 }
1315
1316 // This function exists for the case where the previous rite edge is removed because
1317 // its fLowerY <= nextY
edges_too_close(int prevRite,SkFixed ul,SkFixed ll)1318 static inline bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
1319 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
1320 }
1321
blit_saved_trapezoid(SkAnalyticEdge * leftE,SkFixed lowerY,SkFixed lowerLeft,SkFixed lowerRite,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip)1322 static inline void blit_saved_trapezoid(SkAnalyticEdge* leftE, SkFixed lowerY,
1323 SkFixed lowerLeft, SkFixed lowerRite,
1324 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
1325 SkFixed leftClip, SkFixed rightClip) {
1326 SkAnalyticEdge* riteE = leftE->fRiteE;
1327 SkASSERT(riteE);
1328 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
1329 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
1330 int y = SkFixedFloorToInt(leftE->fSavedY);
1331 // Instead of using f2a(lowerY - leftE->fSavedY), we use the following fullAlpha
1332 // to elimiate cumulative error: if there are many fractional y scan lines within the
1333 // same row, the former may accumulate the rounding error while the later won't.
1334 SkAlpha fullAlpha = f2a(lowerY - SkIntToFixed(y)) - f2a(leftE->fSavedY - SkIntToFixed(y));
1335 // We need fSavedDY because the (quad or cubic) edge might be updated
1336 blit_trapezoid_row(blitter, y,
1337 SkTMax(leftE->fSavedX, leftClip), SkTMin(riteE->fSavedX, rightClip),
1338 SkTMax(lowerLeft, leftClip), SkTMin(lowerRite, rightClip),
1339 leftE->fSavedDY, riteE->fSavedDY, fullAlpha, maskRow, isUsingMask,
1340 noRealBlitter ||
1341 (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY)
1342 || edges_too_close(riteE, riteE->fNext, lowerY))),
1343 true);
1344 leftE->fRiteE = nullptr;
1345 }
1346
deferred_blit(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkFixed left,SkFixed leftDY,SkFixed y,SkFixed nextY,bool isIntegralNextY,bool leftEnds,bool riteEnds,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip,int yShift)1347 static inline void deferred_blit(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
1348 SkFixed left, SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
1349 SkFixed y, SkFixed nextY, bool isIntegralNextY, bool leftEnds, bool riteEnds,
1350 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
1351 SkFixed leftClip, SkFixed rightClip, int yShift) {
1352 if (leftE->fRiteE && leftE->fRiteE != riteE) {
1353 // leftE's right edge changed. Blit the saved trapezoid.
1354 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
1355 blit_saved_trapezoid(leftE, y, left, leftE->fRiteE->fX,
1356 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
1357 }
1358 if (!leftE->fRiteE) {
1359 // Save and defer blitting the trapezoid
1360 SkASSERT(riteE->fRiteE == nullptr);
1361 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1362 SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
1363 leftE->saveXY(left, y, leftDY);
1364 riteE->saveXY(riteE->fX, y, riteE->fDY);
1365 leftE->fRiteE = riteE;
1366 }
1367 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1368 riteE->goY(nextY, yShift);
1369 // Always blit when edges end or nextY is integral
1370 if (isIntegralNextY || leftEnds || riteEnds) {
1371 blit_saved_trapezoid(leftE, nextY, leftE->fX, riteE->fX,
1372 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
1373 }
1374 }
1375
aaa_walk_edges(SkAnalyticEdge * prevHead,SkAnalyticEdge * nextTail,SkPath::FillType fillType,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftClip,SkFixed rightClip,bool isUsingMask,bool forceRLE,bool useDeferred,bool skipIntersect)1376 static void aaa_walk_edges(SkAnalyticEdge* prevHead, SkAnalyticEdge* nextTail,
1377 SkPath::FillType fillType, AdditiveBlitter* blitter, int start_y, int stop_y,
1378 SkFixed leftClip, SkFixed rightClip, bool isUsingMask, bool forceRLE, bool useDeferred,
1379 bool skipIntersect) {
1380 prevHead->fX = prevHead->fUpperX = leftClip;
1381 nextTail->fX = nextTail->fUpperX = rightClip;
1382 SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
1383 SkFixed nextNextY = SK_MaxS32;
1384
1385 {
1386 SkAnalyticEdge* edge;
1387 for(edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
1388 edge->goY(y);
1389 updateNextNextY(edge->fLowerY, y, &nextNextY);
1390 }
1391 updateNextNextY(edge->fUpperY, y, &nextNextY);
1392 }
1393
1394 // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
1395 int windingMask = (fillType & 1) ? 1 : -1;
1396
1397 bool isInverse = SkPath::IsInverseFillType(fillType);
1398
1399 if (isInverse && SkIntToFixed(start_y) != y) {
1400 int width = SkFixedFloorToInt(rightClip - leftClip);
1401 if (SkFixedFloorToInt(y) != start_y) {
1402 blitter->getRealBlitter()->blitRect(SkFixedFloorToInt(leftClip), start_y,
1403 width, SkFixedFloorToInt(y) - start_y);
1404 start_y = SkFixedFloorToInt(y);
1405 }
1406 SkAlpha* maskRow = isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y)
1407 : nullptr;
1408 blit_full_alpha(blitter, start_y, SkFixedFloorToInt(leftClip), width,
1409 f2a(y - SkIntToFixed(start_y)), maskRow, isUsingMask, false, false);
1410 }
1411
1412 while (true) {
1413 int w = 0;
1414 bool in_interval = isInverse;
1415 SkFixed prevX = prevHead->fX;
1416 SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
1417 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
1418 SkAnalyticEdge* currE = prevHead->fNext;
1419 SkAnalyticEdge* leftE = prevHead;
1420 SkFixed left = leftClip;
1421 SkFixed leftDY = 0;
1422 bool leftEnds = false;
1423 int prevRite = SkFixedFloorToInt(leftClip);
1424
1425 nextNextY = SK_MaxS32;
1426
1427 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
1428 int yShift = 0;
1429 if ((nextY - y) & (SK_Fixed1 >> 2)) {
1430 yShift = 2;
1431 nextY = y + (SK_Fixed1 >> 2);
1432 } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
1433 yShift = 1;
1434 SkASSERT(nextY == y + (SK_Fixed1 >> 1));
1435 }
1436
1437 SkAlpha fullAlpha = f2a(nextY - y);
1438
1439 // If we're using mask blitter, we advance the mask row in this function
1440 // to save some "if" condition checks.
1441 SkAlpha* maskRow = nullptr;
1442 if (isUsingMask) {
1443 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
1444 }
1445
1446 SkASSERT(currE->fPrev == prevHead);
1447 validate_edges_for_y(currE, y);
1448
1449 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
1450 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
1451 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
1452
1453 while (currE->fUpperY <= y) {
1454 SkASSERT(currE->fLowerY >= nextY);
1455 SkASSERT(currE->fY == y);
1456
1457 w += currE->fWinding;
1458 bool prev_in_interval = in_interval;
1459 in_interval = !(w & windingMask) == isInverse;
1460
1461 bool isLeft = in_interval && !prev_in_interval;
1462 bool isRite = !in_interval && prev_in_interval;
1463 bool currEnds = currE->fLowerY == nextY;
1464
1465 if (useDeferred) {
1466 if (currE->fRiteE && !isLeft) {
1467 // currE is a left edge previously, but now it's not.
1468 // Blit the trapezoid between fSavedY and y.
1469 SkASSERT(currE->fRiteE->fY == y);
1470 blit_saved_trapezoid(currE, y, currE->fX, currE->fRiteE->fX,
1471 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
1472 }
1473 if (leftE->fRiteE == currE && !isRite) {
1474 // currE is a right edge previously, but now it's not.
1475 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
1476 // in the previous if clause). Hence we blit the trapezoid.
1477 blit_saved_trapezoid(leftE, y, left, currE->fX,
1478 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
1479 }
1480 }
1481
1482 if (isRite) {
1483 if (useDeferred) {
1484 deferred_blit(leftE, currE, left, leftDY, y, nextY, isIntegralNextY,
1485 leftEnds, currEnds, blitter, maskRow, isUsingMask, noRealBlitter,
1486 leftClip, rightClip, yShift);
1487 } else {
1488 SkFixed rite = currE->fX;
1489 currE->goY(nextY, yShift);
1490 SkFixed nextLeft = SkTMax(leftClip, leftE->fX);
1491 rite = SkTMin(rightClip, rite);
1492 SkFixed nextRite = SkTMin(rightClip, currE->fX);
1493 blit_trapezoid_row(blitter, y >> 16, left, rite, nextLeft, nextRite,
1494 leftDY, currE->fDY, fullAlpha, maskRow, isUsingMask,
1495 noRealBlitter || (fullAlpha == 0xFF && (
1496 edges_too_close(prevRite, left, leftE->fX) ||
1497 edges_too_close(currE, currE->fNext, nextY)
1498 )),
1499 true);
1500 prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
1501 }
1502 } else {
1503 if (isLeft) {
1504 left = SkTMax(currE->fX, leftClip);
1505 leftDY = currE->fDY;
1506 leftE = currE;
1507 leftEnds = leftE->fLowerY == nextY;
1508 }
1509 currE->goY(nextY, yShift);
1510 }
1511
1512
1513 SkAnalyticEdge* next = currE->fNext;
1514 SkFixed newX;
1515
1516 while (currE->fLowerY <= nextY) {
1517 if (currE->fCurveCount < 0) {
1518 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
1519 cubicEdge->keepContinuous();
1520 if (!cubicEdge->updateCubic()) {
1521 break;
1522 }
1523 } else if (currE->fCurveCount > 0) {
1524 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
1525 quadEdge->keepContinuous();
1526 if (!quadEdge->updateQuadratic()) {
1527 break;
1528 }
1529 } else {
1530 break;
1531 }
1532 }
1533 SkASSERT(currE->fY == nextY);
1534
1535 if (currE->fLowerY <= nextY) {
1536 remove_edge(currE);
1537 } else {
1538 updateNextNextY(currE->fLowerY, nextY, &nextNextY);
1539 newX = currE->fX;
1540 SkASSERT(currE->fLowerY > nextY);
1541 if (newX < prevX) { // ripple currE backwards until it is x-sorted
1542 // If the crossing edge is a right edge, blit the saved trapezoid.
1543 if (leftE->fRiteE == currE && useDeferred) {
1544 SkASSERT(leftE->fY == nextY && currE->fY == nextY);
1545 blit_saved_trapezoid(leftE, nextY, leftE->fX, currE->fX,
1546 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
1547 }
1548 backward_insert_edge_based_on_x(currE);
1549 } else {
1550 prevX = newX;
1551 }
1552 if (!skipIntersect) {
1553 checkIntersection(currE, nextY, &nextNextY);
1554 }
1555 }
1556
1557 currE = next;
1558 SkASSERT(currE);
1559 }
1560
1561 // was our right-edge culled away?
1562 if (in_interval) {
1563 if (useDeferred) {
1564 deferred_blit(leftE, nextTail, left, leftDY, y, nextY, isIntegralNextY,
1565 leftEnds, false, blitter, maskRow, isUsingMask, noRealBlitter,
1566 leftClip, rightClip, yShift);
1567 } else {
1568 blit_trapezoid_row(blitter, y >> 16,
1569 left, rightClip,
1570 SkTMax(leftClip, leftE->fX), rightClip,
1571 leftDY, 0, fullAlpha, maskRow, isUsingMask,
1572 noRealBlitter ||
1573 (fullAlpha == 0xFF && edges_too_close(leftE->fPrev, leftE, nextY)),
1574 true);
1575 }
1576 }
1577
1578 if (forceRLE) {
1579 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
1580 }
1581
1582 y = nextY;
1583 if (y >= SkIntToFixed(stop_y)) {
1584 break;
1585 }
1586
1587 // now currE points to the first edge with a fUpperY larger than the previous y
1588 insert_new_edges(currE, y, &nextNextY);
1589 }
1590 }
1591
aaa_fill_path(const SkPath & path,const SkIRect & clipRect,AdditiveBlitter * blitter,int start_y,int stop_y,bool pathContainedInClip,bool isUsingMask,bool forceRLE)1592 static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& clipRect,
1593 AdditiveBlitter* blitter, int start_y, int stop_y, bool pathContainedInClip,
1594 bool isUsingMask, bool forceRLE) { // forceRLE implies that SkAAClip is calling us
1595 SkASSERT(blitter);
1596
1597 SkAnalyticEdgeBuilder builder;
1598 int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
1599 SkAnalyticEdge** list = builder.analyticEdgeList();
1600
1601 SkIRect rect = clipRect;
1602 if (0 == count) {
1603 if (path.isInverseFillType()) {
1604 /*
1605 * Since we are in inverse-fill, our caller has already drawn above
1606 * our top (start_y) and will draw below our bottom (stop_y). Thus
1607 * we need to restrict our drawing to the intersection of the clip
1608 * and those two limits.
1609 */
1610 if (rect.fTop < start_y) {
1611 rect.fTop = start_y;
1612 }
1613 if (rect.fBottom > stop_y) {
1614 rect.fBottom = stop_y;
1615 }
1616 if (!rect.isEmpty()) {
1617 blitter->getRealBlitter()->blitRect(rect.fLeft, rect.fTop,
1618 rect.width(), rect.height());
1619 }
1620 }
1621 return;
1622 }
1623
1624 SkAnalyticEdge headEdge, tailEdge, *last;
1625 // this returns the first and last edge after they're sorted into a dlink list
1626 SkAnalyticEdge* edge = sort_edges(list, count, &last);
1627
1628 headEdge.fRiteE = nullptr;
1629 headEdge.fPrev = nullptr;
1630 headEdge.fNext = edge;
1631 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
1632 headEdge.fX = SK_MinS32;
1633 headEdge.fDX = 0;
1634 headEdge.fDY = SK_MaxS32;
1635 headEdge.fUpperX = SK_MinS32;
1636 edge->fPrev = &headEdge;
1637
1638 tailEdge.fRiteE = nullptr;
1639 tailEdge.fPrev = last;
1640 tailEdge.fNext = nullptr;
1641 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
1642 tailEdge.fX = SK_MaxS32;
1643 tailEdge.fDX = 0;
1644 tailEdge.fDY = SK_MaxS32;
1645 tailEdge.fUpperX = SK_MaxS32;
1646 last->fNext = &tailEdge;
1647
1648 // now edge is the head of the sorted linklist
1649
1650 if (!pathContainedInClip && start_y < clipRect.fTop) {
1651 start_y = clipRect.fTop;
1652 }
1653 if (!pathContainedInClip && stop_y > clipRect.fBottom) {
1654 stop_y = clipRect.fBottom;
1655 }
1656
1657 SkFixed leftBound = SkIntToFixed(rect.fLeft);
1658 SkFixed rightBound = SkIntToFixed(rect.fRight);
1659 if (isUsingMask) {
1660 // If we're using mask, then we have to limit the bound within the path bounds.
1661 // Otherwise, the edge drift may access an invalid address inside the mask.
1662 SkIRect ir;
1663 path.getBounds().roundOut(&ir);
1664 leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
1665 rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
1666 }
1667
1668 if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
1669 aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y,
1670 leftBound, rightBound, isUsingMask);
1671 } else {
1672 // Only use deferred blitting if there are many edges.
1673 bool useDeferred = count >
1674 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
1675
1676 // We skip intersection computation if there are many points which probably already
1677 // give us enough fractional scan lines.
1678 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
1679
1680 aaa_walk_edges(&headEdge, &tailEdge, path.getFillType(), blitter, start_y, stop_y,
1681 leftBound, rightBound, isUsingMask, forceRLE, useDeferred, skipIntersect);
1682 }
1683 }
1684
1685 ///////////////////////////////////////////////////////////////////////////////
1686
AAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)1687 void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
1688 const SkIRect& clipBounds, bool forceRLE) {
1689 bool containedInClip = clipBounds.contains(ir);
1690 bool isInverse = path.isInverseFillType();
1691
1692 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
1693 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
1694 // the blit region is small enough (i.e., canHandleRect(ir)). When isInverse is true, the blit
1695 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
1696 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
1697 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
1698 // blitFatAntiRect to avoid the mask and its overhead.
1699 if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) {
1700 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
1701 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
1702 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
1703 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1704 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
1705 containedInClip, true, forceRLE);
1706 }
1707 } else if (!isInverse && path.isConvex()) {
1708 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
1709 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
1710 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
1711 // RunBasedAdditiveBlitter would suffice.
1712 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1713 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
1714 containedInClip, false, forceRLE);
1715 } else {
1716 // If the filling area might not be convex, the more involved aaa_walk_edges would
1717 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
1718 // does that at a cost of performance.
1719 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1720 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
1721 containedInClip, false, forceRLE);
1722 }
1723 }
1724 #endif //defined(SK_DISABLE_AAA)
1725