1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkColor.h"
9 #include "include/core/SkPath.h"
10 #include "include/core/SkPathTypes.h"
11 #include "include/core/SkRect.h"
12 #include "include/private/base/SkAlign.h"
13 #include "include/private/base/SkAssert.h"
14 #include "include/private/base/SkDebug.h"
15 #include "include/private/base/SkFixed.h"
16 #include "include/private/base/SkMath.h"
17 #include "include/private/base/SkSafe32.h"
18 #include "include/private/base/SkTo.h"
19 #include "src/base/SkTSort.h"
20 #include "src/core/SkAlphaRuns.h"
21 #include "src/core/SkAnalyticEdge.h"
22 #include "src/core/SkBlitter.h"
23 #include "src/core/SkEdge.h"
24 #include "src/core/SkEdgeBuilder.h"
25 #include "src/core/SkMask.h"
26 #include "src/core/SkScan.h"
27 #include "src/core/SkScanPriv.h"
28
29 #include <algorithm>
30 #include <cstdint>
31 #include <cstring>
32
33 /*
34
35 The following is a high-level overview of our analytic anti-aliasing
36 algorithm. We consider a path as a collection of line segments, as
37 quadratic/cubic curves are converted to small line segments. Without loss of
38 generality, let's assume that the draw region is [0, W] x [0, H].
39
40 Our algorithm is based on horizontal scan lines (y = c_i) as the previous
41 sampling-based algorithm did. However, our algorithm uses non-equal-spaced
42 scan lines, while the previous method always uses equal-spaced scan lines,
43 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
44 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
45 16-supersampling AA algorithm.
46
47 Our algorithm contains scan lines y = c_i for c_i that is either:
48
49 1. an integer between [0, H]
50
51 2. the y value of a line segment endpoint
52
53 3. the y value of an intersection of two line segments
54
55 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
56 the coverage of this horizontal strip of our path on each pixel. This can be
57 done very efficiently because the strip of our path now only consists of
58 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
59 rectangles and triangles as special cases).
60
61 We now describe how the coverage of single pixel is computed against such a
62 trapezoid. That coverage is essentially the intersection area of a rectangle
63 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
64 could be complicated, as shown in the example region A below:
65
66 +-----------\----+
67 | \ C|
68 | \ |
69 \ \ |
70 |\ A \|
71 | \ \
72 | \ |
73 | B \ |
74 +----\-----------+
75
76 However, we don't have to compute the area of A directly. Instead, we can
77 compute the excluded area, which are B and C, quite easily, because they're
78 just triangles. In fact, we can prove that an excluded region (take B as an
79 example) is either itself a simple trapezoid (including rectangles, triangles,
80 and empty regions), or its opposite (the opposite of B is A + C) is a simple
81 trapezoid. In any case, we can compute its area efficiently.
82
83 In summary, our algorithm has a higher quality because it generates ground-
84 truth coverages analytically. It is also faster because it has much fewer
85 unnessasary horizontal scan lines. For example, given a triangle path, the
86 number of scan lines in our algorithm is only about 3 + H while the
87 16-supersampling algorithm has about 4H scan lines.
88
89 */
90
add_alpha(SkAlpha * alpha,SkAlpha delta)91 static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
92 SkASSERT(*alpha + delta <= 256);
93 *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
94 }
95
safely_add_alpha(SkAlpha * alpha,SkAlpha delta)96 static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
97 *alpha = std::min(0xFF, *alpha + delta);
98 }
99
100 class AdditiveBlitter : public SkBlitter {
101 public:
~AdditiveBlitter()102 ~AdditiveBlitter() override {}
103
104 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
105
106 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
107 virtual void blitAntiH(int x, int y, SkAlpha alpha) = 0;
108 virtual void blitAntiH(int x, int y, int width, SkAlpha alpha) = 0;
109
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])110 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
111 SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
112 }
113
blitV(int x,int y,int height,SkAlpha alpha)114 void blitV(int x, int y, int height, SkAlpha alpha) override {
115 SkDEBUGFAIL("Please call real blitter's blitV instead.");
116 }
117
blitH(int x,int y,int width)118 void blitH(int x, int y, int width) override {
119 SkDEBUGFAIL("Please call real blitter's blitH instead.");
120 }
121
blitRect(int x,int y,int width,int height)122 void blitRect(int x, int y, int width, int height) override {
123 SkDEBUGFAIL("Please call real blitter's blitRect instead.");
124 }
125
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)126 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
127 override {
128 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
129 }
130
131 virtual int getWidth() = 0;
132
133 // Flush the additive alpha cache if floor(y) and floor(nextY) is different
134 // (i.e., we'll start working on a new pixel row).
135 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
136 };
137
138 // We need this mask blitter because it significantly accelerates small path filling.
139 class MaskAdditiveBlitter : public AdditiveBlitter {
140 public:
141 MaskAdditiveBlitter(SkBlitter* realBlitter,
142 const SkIRect& ir,
143 const SkIRect& clipBounds,
144 bool isInverse);
~MaskAdditiveBlitter()145 ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
146
147 // Most of the time, we still consider this mask blitter as the real blitter
148 // so we can accelerate blitRect and others. But sometimes we want to return
149 // the absolute real blitter (e.g., when we fall back to the old code path).
getRealBlitter(bool forceRealBlitter)150 SkBlitter* getRealBlitter(bool forceRealBlitter) override {
151 return forceRealBlitter ? fRealBlitter : this;
152 }
153
154 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
155 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
156
157 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
158 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
159 void blitAntiH(int x, int y, SkAlpha alpha) override;
160 void blitAntiH(int x, int y, int width, SkAlpha alpha) override;
161 void blitV(int x, int y, int height, SkAlpha alpha) override;
162 void blitRect(int x, int y, int width, int height) override;
163 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
164 override;
165
166 // The flush is only needed for RLE (RunBasedAdditiveBlitter)
flush_if_y_changed(SkFixed y,SkFixed nextY)167 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
168
getWidth()169 int getWidth() override { return fClipRect.width(); }
170
CanHandleRect(const SkIRect & bounds)171 static bool CanHandleRect(const SkIRect& bounds) {
172 int width = bounds.width();
173 if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
174 return false;
175 }
176 int64_t rb = SkAlign4(width);
177 // use 64bits to detect overflow
178 int64_t storage = rb * bounds.height();
179
180 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
181 (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
182 }
183
184 // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
getRow(int y)185 uint8_t* getRow(int y) {
186 if (y != fY) {
187 fY = y;
188 fRow = fMask.image() + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
189 }
190 return fRow;
191 }
192
193 private:
194 // so we don't try to do very wide things, where the RLE blitter would be faster
195 static const int kMAX_WIDTH = 32;
196 static const int kMAX_STORAGE = 1024;
197
198 SkBlitter* fRealBlitter;
199 SkMaskBuilder fMask;
200 SkIRect fClipRect;
201 // we add 2 because we can write 1 extra byte at either end due to precision error
202 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
203
204 uint8_t* fRow;
205 int fY;
206 };
207
MaskAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)208 MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
209 const SkIRect& ir,
210 const SkIRect& clipBounds,
211 bool isInverse)
212 : fRealBlitter(realBlitter)
213 , fMask((uint8_t*)fStorage + 1, ir, ir.width(), SkMask::kA8_Format)
214 , fRow(nullptr)
215 , fY(ir.fTop - 1)
216 {
217 SkASSERT(CanHandleRect(ir));
218 SkASSERT(!isInverse);
219
220 fClipRect = ir;
221 if (!fClipRect.intersect(clipBounds)) {
222 SkASSERT(0);
223 fClipRect.setEmpty();
224 }
225
226 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
227 }
228
blitAntiH(int x,int y,const SkAlpha antialias[],int len)229 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
230 SK_ABORT("Don't use this; directly add alphas to the mask.");
231 }
232
blitAntiH(int x,int y,SkAlpha alpha)233 void MaskAdditiveBlitter::blitAntiH(int x, int y, SkAlpha alpha) {
234 SkASSERT(x >= fMask.fBounds.fLeft - 1);
235 add_alpha(&this->getRow(y)[x], alpha);
236 }
237
blitAntiH(int x,int y,int width,SkAlpha alpha)238 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, SkAlpha alpha) {
239 SkASSERT(x >= fMask.fBounds.fLeft - 1);
240 uint8_t* row = this->getRow(y);
241 for (int i = 0; i < width; ++i) {
242 add_alpha(&row[x + i], alpha);
243 }
244 }
245
blitV(int x,int y,int height,SkAlpha alpha)246 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
247 if (alpha == 0) {
248 return;
249 }
250 SkASSERT(x >= fMask.fBounds.fLeft - 1);
251 // This must be called as if this is a real blitter.
252 // So we directly set alpha rather than adding it.
253 uint8_t* row = this->getRow(y);
254 for (int i = 0; i < height; ++i) {
255 row[x] = alpha;
256 row += fMask.fRowBytes;
257 }
258 }
259
blitRect(int x,int y,int width,int height)260 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
261 SkASSERT(x >= fMask.fBounds.fLeft - 1);
262 // This must be called as if this is a real blitter.
263 // So we directly set alpha rather than adding it.
264 uint8_t* row = this->getRow(y);
265 for (int i = 0; i < height; ++i) {
266 memset(row + x, 0xFF, width);
267 row += fMask.fRowBytes;
268 }
269 }
270
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)271 void MaskAdditiveBlitter::blitAntiRect(int x,
272 int y,
273 int width,
274 int height,
275 SkAlpha leftAlpha,
276 SkAlpha rightAlpha) {
277 blitV(x, y, height, leftAlpha);
278 blitV(x + 1 + width, y, height, rightAlpha);
279 blitRect(x + 1, y, width, height);
280 }
281
282 class RunBasedAdditiveBlitter : public AdditiveBlitter {
283 public:
284 RunBasedAdditiveBlitter(SkBlitter* realBlitter,
285 const SkIRect& ir,
286 const SkIRect& clipBounds,
287 bool isInverse);
288
~RunBasedAdditiveBlitter()289 ~RunBasedAdditiveBlitter() override { this->flush(); }
290
getRealBlitter(bool forceRealBlitter)291 SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
292
293 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
294 void blitAntiH(int x, int y, SkAlpha alpha) override;
295 void blitAntiH(int x, int y, int width, SkAlpha alpha) override;
296
getWidth()297 int getWidth() override { return fWidth; }
298
flush_if_y_changed(SkFixed y,SkFixed nextY)299 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
300 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
301 this->flush();
302 }
303 }
304
305 protected:
306 SkBlitter* fRealBlitter;
307
308 int fCurrY; // Current y coordinate.
309 int fWidth; // Widest row of region to be blitted
310 int fLeft; // Leftmost x coordinate in any row
311 int fTop; // Initial y coordinate (top of bounds)
312
313 // The next three variables are used to track a circular buffer that
314 // contains the values used in SkAlphaRuns. These variables should only
315 // ever be updated in advanceRuns(), and fRuns should always point to
316 // a valid SkAlphaRuns...
317 int fRunsToBuffer;
318 void* fRunsBuffer;
319 int fCurrentRun;
320 SkAlphaRuns fRuns;
321
322 int fOffsetX;
323
check(int x,int width) const324 bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
325
326 // extra one to store the zero at the end
getRunsSz() const327 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
328
329 // This function updates the fRuns variable to point to the next buffer space
330 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
331 // and resets fRuns to point to an empty scanline.
advanceRuns()332 void advanceRuns() {
333 const size_t kRunsSz = this->getRunsSz();
334 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
335 fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
336 fCurrentRun * kRunsSz);
337 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
338 fRuns.reset(fWidth);
339 }
340
341 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
snapAlpha(SkAlpha alpha)342 SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
343
flush()344 void flush() {
345 if (fCurrY >= fTop) {
346 SkASSERT(fCurrentRun < fRunsToBuffer);
347 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
348 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
349 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
350 }
351 if (!fRuns.empty()) {
352 // SkDEBUGCODE(fRuns.dump();)
353 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
354 this->advanceRuns();
355 fOffsetX = 0;
356 }
357 fCurrY = fTop - 1;
358 }
359 }
360
checkY(int y)361 void checkY(int y) {
362 if (y != fCurrY) {
363 this->flush();
364 fCurrY = y;
365 }
366 }
367 };
368
RunBasedAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)369 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
370 const SkIRect& ir,
371 const SkIRect& clipBounds,
372 bool isInverse) {
373 fRealBlitter = realBlitter;
374
375 SkIRect sectBounds;
376 if (isInverse) {
377 // We use the clip bounds instead of the ir, since we may be asked to
378 // draw outside of the rect when we're a inverse filltype
379 sectBounds = clipBounds;
380 } else {
381 if (!sectBounds.intersect(ir, clipBounds)) {
382 sectBounds.setEmpty();
383 }
384 }
385
386 const int left = sectBounds.left();
387 const int right = sectBounds.right();
388
389 fLeft = left;
390 fWidth = right - left;
391 fTop = sectBounds.top();
392 fCurrY = fTop - 1;
393
394 fRunsToBuffer = realBlitter->requestRowsPreserved();
395 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
396 fCurrentRun = -1;
397
398 this->advanceRuns();
399
400 fOffsetX = 0;
401 }
402
blitAntiH(int x,int y,const SkAlpha antialias[],int len)403 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
404 checkY(y);
405 x -= fLeft;
406
407 if (x < 0) {
408 len += x;
409 antialias -= x;
410 x = 0;
411 }
412 len = std::min(len, fWidth - x);
413 SkASSERT(check(x, len));
414
415 if (x < fOffsetX) {
416 fOffsetX = 0;
417 }
418
419 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
420 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
421 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
422 fRuns.fRuns[x + i + j] = 1;
423 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
424 }
425 fRuns.fRuns[x + i] = 1;
426 }
427 for (int i = 0; i < len; ++i) {
428 add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
429 }
430 }
431
blitAntiH(int x,int y,SkAlpha alpha)432 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, SkAlpha alpha) {
433 checkY(y);
434 x -= fLeft;
435
436 if (x < fOffsetX) {
437 fOffsetX = 0;
438 }
439
440 if (this->check(x, 1)) {
441 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
442 }
443 }
444
blitAntiH(int x,int y,int width,SkAlpha alpha)445 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, SkAlpha alpha) {
446 checkY(y);
447 x -= fLeft;
448
449 if (x < fOffsetX) {
450 fOffsetX = 0;
451 }
452
453 if (this->check(x, width)) {
454 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
455 }
456 }
457
458 // This exists specifically for concave path filling.
459 // In those cases, we can easily accumulate alpha greater than 0xFF.
460 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
461 public:
SafeRLEAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)462 SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
463 const SkIRect& ir,
464 const SkIRect& clipBounds,
465 bool isInverse)
466 : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
467
468 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
469 void blitAntiH(int x, int y, SkAlpha alpha) override;
470 void blitAntiH(int x, int y, int width, SkAlpha alpha) override;
471 };
472
blitAntiH(int x,int y,const SkAlpha antialias[],int len)473 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
474 checkY(y);
475 x -= fLeft;
476
477 if (x < 0) {
478 len += x;
479 antialias -= x;
480 x = 0;
481 }
482 len = std::min(len, fWidth - x);
483 SkASSERT(check(x, len));
484
485 if (x < fOffsetX) {
486 fOffsetX = 0;
487 }
488
489 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
490 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
491 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
492 fRuns.fRuns[x + i + j] = 1;
493 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
494 }
495 fRuns.fRuns[x + i] = 1;
496 }
497 for (int i = 0; i < len; ++i) {
498 safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
499 }
500 }
501
blitAntiH(int x,int y,SkAlpha alpha)502 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, SkAlpha alpha) {
503 checkY(y);
504 x -= fLeft;
505
506 if (x < fOffsetX) {
507 fOffsetX = 0;
508 }
509
510 if (check(x, 1)) {
511 // Break the run
512 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
513 safely_add_alpha(&fRuns.fAlpha[x], alpha);
514 }
515 }
516
blitAntiH(int x,int y,int width,SkAlpha alpha)517 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, SkAlpha alpha) {
518 checkY(y);
519 x -= fLeft;
520
521 if (x < fOffsetX) {
522 fOffsetX = 0;
523 }
524
525 if (check(x, width)) {
526 // Break the run
527 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
528 for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
529 safely_add_alpha(&fRuns.fAlpha[i], alpha);
530 }
531 }
532 }
533
534 // Return the alpha of a trapezoid whose height is 1
trapezoid_to_alpha(SkFixed l1,SkFixed l2)535 static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
536 SkASSERT(l1 >= 0 && l2 >= 0);
537 SkFixed area = (l1 + l2) / 2;
538 return SkTo<SkAlpha>(area >> 8);
539 }
540
541 // The alpha of right-triangle (a, a*b)
partial_triangle_to_alpha(SkFixed a,SkFixed b)542 static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
543 SkASSERT(a <= SK_Fixed1);
544 #if 0
545 // TODO(mtklein): skia:8877
546 SkASSERT(b <= SK_Fixed1);
547 #endif
548
549 // Approximating...
550 // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
551 SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
552
553 #if 0
554 // TODO(mtklein): skia:8877
555 return SkTo<SkAlpha>(area >> 8);
556 #else
557 return SkTo<SkAlpha>((area >> 8) & 0xFF);
558 #endif
559 }
560
get_partial_alpha(SkAlpha alpha,SkFixed partialHeight)561 static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
562 return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
563 }
564
get_partial_alpha(SkAlpha alpha,SkAlpha fullAlpha)565 static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
566 return (alpha * fullAlpha) >> 8;
567 }
568
569 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
570 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
571 // This is rarely the problem so we'll only use this for blitting rectangles.
fixed_to_alpha(SkFixed f)572 static SkAlpha fixed_to_alpha(SkFixed f) {
573 SkASSERT(f <= SK_Fixed1);
574 return get_partial_alpha(0xFF, f);
575 }
576
577 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
578 // approximate (very coarsely) the x coordinate of the intersection.
approximate_intersection(SkFixed l1,SkFixed r1,SkFixed l2,SkFixed r2)579 static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
580 if (l1 > r1) {
581 std::swap(l1, r1);
582 }
583 if (l2 > r2) {
584 std::swap(l2, r2);
585 }
586 return (std::max(l1, l2) + std::min(r1, r2)) / 2;
587 }
588
589 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_above_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)590 static void compute_alpha_above_line(SkAlpha* alphas,
591 SkFixed l,
592 SkFixed r,
593 SkFixed dY,
594 SkAlpha fullAlpha) {
595 SkASSERT(l <= r);
596 SkASSERT(l >> 16 == 0);
597 int R = SkFixedCeilToInt(r);
598 if (R == 0) {
599 return;
600 } else if (R == 1) {
601 alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
602 } else {
603 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
604 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
605 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
606 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
607 SkFixed alpha16 = Sk32_sat_add(firstH, dY >> 1); // rectangle plus triangle
608 for (int i = 1; i < R - 1; ++i) {
609 alphas[i] = alpha16 >> 8;
610 alpha16 = Sk32_sat_add(alpha16, dY);
611 }
612 alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
613 }
614 }
615
616 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_below_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)617 static void compute_alpha_below_line(SkAlpha* alphas,
618 SkFixed l,
619 SkFixed r,
620 SkFixed dY,
621 SkAlpha fullAlpha) {
622 SkASSERT(l <= r);
623 SkASSERT(l >> 16 == 0);
624 int R = SkFixedCeilToInt(r);
625 if (R == 0) {
626 return;
627 } else if (R == 1) {
628 alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
629 } else {
630 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
631 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
632 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
633 alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
634 SkFixed alpha16 = Sk32_sat_add(lastH, dY >> 1); // rectangle plus triangle
635 for (int i = R - 2; i > 0; i--) {
636 alphas[i] = (alpha16 >> 8) & 0xFF;
637 alpha16 = Sk32_sat_add(alpha16, dY);
638 }
639 alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
640 }
641 }
642
643 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
blit_single_alpha(AdditiveBlitter * blitter,int y,int x,SkAlpha alpha,SkAlpha fullAlpha,SkAlpha * maskRow,bool noRealBlitter)644 static void blit_single_alpha(AdditiveBlitter* blitter,
645 int y,
646 int x,
647 SkAlpha alpha,
648 SkAlpha fullAlpha,
649 SkAlpha* maskRow,
650 bool noRealBlitter) {
651 if (maskRow) {
652 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
653 maskRow[x] = alpha;
654 } else {
655 safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
656 }
657 } else {
658 if (fullAlpha == 0xFF && !noRealBlitter) {
659 blitter->getRealBlitter()->blitV(x, y, 1, alpha);
660 } else {
661 blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
662 }
663 }
664 }
665
blit_two_alphas(AdditiveBlitter * blitter,int y,int x,SkAlpha a1,SkAlpha a2,SkAlpha fullAlpha,SkAlpha * maskRow,bool noRealBlitter)666 static void blit_two_alphas(AdditiveBlitter* blitter,
667 int y,
668 int x,
669 SkAlpha a1,
670 SkAlpha a2,
671 SkAlpha fullAlpha,
672 SkAlpha* maskRow,
673 bool noRealBlitter) {
674 if (maskRow) {
675 safely_add_alpha(&maskRow[x], a1);
676 safely_add_alpha(&maskRow[x + 1], a2);
677 } else {
678 if (fullAlpha == 0xFF && !noRealBlitter) {
679 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
680 } else {
681 blitter->blitAntiH(x, y, a1);
682 blitter->blitAntiH(x + 1, y, a2);
683 }
684 }
685 }
686
blit_full_alpha(AdditiveBlitter * blitter,int y,int x,int len,SkAlpha fullAlpha,SkAlpha * maskRow,bool noRealBlitter)687 static void blit_full_alpha(AdditiveBlitter* blitter,
688 int y,
689 int x,
690 int len,
691 SkAlpha fullAlpha,
692 SkAlpha* maskRow,
693 bool noRealBlitter) {
694 if (maskRow) {
695 for (int i = 0; i < len; ++i) {
696 safely_add_alpha(&maskRow[x + i], fullAlpha);
697 }
698 } else {
699 if (fullAlpha == 0xFF && !noRealBlitter) {
700 blitter->getRealBlitter()->blitH(x, y, len);
701 } else {
702 blitter->blitAntiH(x, y, len, fullAlpha);
703 }
704 }
705 }
706
blit_aaa_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool noRealBlitter)707 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
708 int y,
709 SkFixed ul,
710 SkFixed ur,
711 SkFixed ll,
712 SkFixed lr,
713 SkFixed lDY,
714 SkFixed rDY,
715 SkAlpha fullAlpha,
716 SkAlpha* maskRow,
717 bool noRealBlitter) {
718 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
719 int len = R - L;
720
721 if (len == 1) {
722 SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
723 blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, noRealBlitter);
724 return;
725 }
726
727 const int kQuickLen = 31;
728 alignas(2) char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
729 SkAlpha* alphas;
730
731 if (len <= kQuickLen) {
732 alphas = (SkAlpha*)quickMemory;
733 } else {
734 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
735 }
736
737 SkAlpha* tempAlphas = alphas + len + 1;
738 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
739
740 for (int i = 0; i < len; ++i) {
741 runs[i] = 1;
742 alphas[i] = fullAlpha;
743 }
744 runs[len] = 0;
745
746 int uL = SkFixedFloorToInt(ul);
747 int lL = SkFixedCeilToInt(ll);
748 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
749 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
750 SkFixed second = ll - ul - first;
751 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
752 SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
753 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
754 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
755 } else {
756 compute_alpha_below_line(
757 tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
758 for (int i = uL; i < lL; ++i) {
759 if (alphas[i - L] > tempAlphas[i - L]) {
760 alphas[i - L] -= tempAlphas[i - L];
761 } else {
762 alphas[i - L] = 0;
763 }
764 }
765 }
766
767 int uR = SkFixedFloorToInt(ur);
768 int lR = SkFixedCeilToInt(lr);
769 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
770 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
771 SkFixed second = lr - ur - first;
772 SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
773 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
774 alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
775 alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
776 } else {
777 compute_alpha_above_line(
778 tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
779 for (int i = uR; i < lR; ++i) {
780 if (alphas[i - L] > tempAlphas[i - L]) {
781 alphas[i - L] -= tempAlphas[i - L];
782 } else {
783 alphas[i - L] = 0;
784 }
785 }
786 }
787
788 if (maskRow) {
789 for (int i = 0; i < len; ++i) {
790 safely_add_alpha(&maskRow[L + i], alphas[i]);
791 }
792 } else {
793 if (fullAlpha == 0xFF && !noRealBlitter) {
794 // Real blitter is faster than RunBasedAdditiveBlitter
795 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
796 } else {
797 blitter->blitAntiH(L, y, alphas, len);
798 }
799 }
800
801 if (len > kQuickLen) {
802 delete[] alphas;
803 }
804 }
805
blit_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool noRealBlitter)806 static void blit_trapezoid_row(AdditiveBlitter* blitter,
807 int y,
808 SkFixed ul,
809 SkFixed ur,
810 SkFixed ll,
811 SkFixed lr,
812 SkFixed lDY,
813 SkFixed rDY,
814 SkAlpha fullAlpha,
815 SkAlpha* maskRow,
816 bool noRealBlitter) {
817 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
818
819 if (ul > ur) {
820 return;
821 }
822
823 // Edge crosses. Approximate it. This should only happend due to precision limit,
824 // so the approximation could be very coarse.
825 if (ll > lr) {
826 ll = lr = approximate_intersection(ul, ll, ur, lr);
827 }
828
829 if (ul == ur && ll == lr) {
830 return; // empty trapzoid
831 }
832
833 // We're going to use the left line ul-ll and the rite line ur-lr
834 // to exclude the area that's not covered by the path.
835 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
836 // so we'll do that for simplicity.
837 if (ul > ll) {
838 std::swap(ul, ll);
839 }
840 if (ur > lr) {
841 std::swap(ur, lr);
842 }
843
844 SkFixed joinLeft = SkFixedCeilToFixed(ll);
845 SkFixed joinRite = SkFixedFloorToFixed(ur);
846 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
847 if (ul < joinLeft) {
848 int len = SkFixedCeilToInt(joinLeft - ul);
849 if (len == 1) {
850 SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
851 blit_single_alpha(blitter,
852 y,
853 ul >> 16,
854 alpha,
855 fullAlpha,
856 maskRow,
857 noRealBlitter);
858 } else if (len == 2) {
859 SkFixed first = joinLeft - SK_Fixed1 - ul;
860 SkFixed second = ll - ul - first;
861 SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
862 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
863 blit_two_alphas(blitter,
864 y,
865 ul >> 16,
866 a1,
867 a2,
868 fullAlpha,
869 maskRow,
870 noRealBlitter);
871 } else {
872 blit_aaa_trapezoid_row(blitter,
873 y,
874 ul,
875 joinLeft,
876 ll,
877 joinLeft,
878 lDY,
879 SK_MaxS32,
880 fullAlpha,
881 maskRow,
882 noRealBlitter);
883 }
884 }
885 // SkAAClip requires that we blit from left to right.
886 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
887 if (joinLeft < joinRite) {
888 blit_full_alpha(blitter,
889 y,
890 SkFixedFloorToInt(joinLeft),
891 SkFixedFloorToInt(joinRite - joinLeft),
892 fullAlpha,
893 maskRow,
894 noRealBlitter);
895 }
896 if (lr > joinRite) {
897 int len = SkFixedCeilToInt(lr - joinRite);
898 if (len == 1) {
899 SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
900 blit_single_alpha(blitter,
901 y,
902 joinRite >> 16,
903 alpha,
904 fullAlpha,
905 maskRow,
906 noRealBlitter);
907 } else if (len == 2) {
908 SkFixed first = joinRite + SK_Fixed1 - ur;
909 SkFixed second = lr - ur - first;
910 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
911 SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
912 blit_two_alphas(blitter,
913 y,
914 joinRite >> 16,
915 a1,
916 a2,
917 fullAlpha,
918 maskRow,
919 noRealBlitter);
920 } else {
921 blit_aaa_trapezoid_row(blitter,
922 y,
923 joinRite,
924 ur,
925 joinRite,
926 lr,
927 SK_MaxS32,
928 rDY,
929 fullAlpha,
930 maskRow,
931 noRealBlitter);
932 }
933 }
934 } else {
935 blit_aaa_trapezoid_row(blitter,
936 y,
937 ul,
938 ur,
939 ll,
940 lr,
941 lDY,
942 rDY,
943 fullAlpha,
944 maskRow,
945 noRealBlitter);
946 }
947 }
948
compare_edges(const SkAnalyticEdge * a,const SkAnalyticEdge * b)949 static bool compare_edges(const SkAnalyticEdge* a, const SkAnalyticEdge* b) {
950 if (a->fUpperY != b->fUpperY) {
951 return a->fUpperY < b->fUpperY;
952 }
953
954 if (a->fX != b->fX) {
955 return a->fX < b->fX;
956 }
957
958 return a->fDX < b->fDX;
959 }
960
sort_edges(SkAnalyticEdge * list[],int count,SkAnalyticEdge ** last)961 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
962 SkTQSort(list, list + count, compare_edges);
963
964 // now make the edges linked in sorted order
965 for (int i = 1; i < count; ++i) {
966 list[i - 1]->fNext = list[i];
967 list[i]->fPrev = list[i - 1];
968 }
969
970 *last = list[count - 1];
971 return list[0];
972 }
973
validate_sort(const SkAnalyticEdge * edge)974 static void validate_sort(const SkAnalyticEdge* edge) {
975 #ifdef SK_DEBUG
976 SkFixed y = SkIntToFixed(-32768);
977
978 while (edge->fUpperY != SK_MaxS32) {
979 edge->validate();
980 SkASSERT(y <= edge->fUpperY);
981
982 y = edge->fUpperY;
983 edge = (SkAnalyticEdge*)edge->fNext;
984 }
985 #endif
986 }
987
988 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
989 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
990 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
is_smooth_enough(SkAnalyticEdge * thisEdge,SkAnalyticEdge * nextEdge,int stop_y)991 static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
992 if (thisEdge->fCurveCount < 0) {
993 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
994 int ddshift = cEdge.fCurveShift;
995 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
996 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
997 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
998 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
999 } else if (thisEdge->fCurveCount > 0) {
1000 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
1001 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
1002 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
1003 // current Dy is (fQDy - fQDDy) >> shift
1004 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
1005 }
1006 // DDx should be small and Dy should be large
1007 return SkAbs32(Sk32_sat_sub(nextEdge->fDX, thisEdge->fDX)) <= SK_Fixed1 &&
1008 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1;
1009 }
1010
1011 // Check if the leftE and riteE are changing smoothly in terms of fDX.
1012 // If yes, we can later skip the fractional y and directly jump to integer y.
is_smooth_enough(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkAnalyticEdge * currE,int stop_y)1013 static bool is_smooth_enough(SkAnalyticEdge* leftE,
1014 SkAnalyticEdge* riteE,
1015 SkAnalyticEdge* currE,
1016 int stop_y) {
1017 if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
1018 return false; // We're at the end so we won't skip anything
1019 }
1020 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
1021 return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
1022 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
1023 return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
1024 }
1025
1026 // Now both edges are changing, find the second next edge
1027 SkAnalyticEdge* nextCurrE = currE->fNext;
1028 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
1029 return false;
1030 }
1031 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
1032 if (nextCurrE->fUpperX < currE->fUpperX) {
1033 std::swap(currE, nextCurrE);
1034 }
1035 return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
1036 }
1037
aaa_walk_convex_edges(SkAnalyticEdge * prevHead,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftBound,SkFixed riteBound,bool isUsingMask)1038 static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
1039 AdditiveBlitter* blitter,
1040 int start_y,
1041 int stop_y,
1042 SkFixed leftBound,
1043 SkFixed riteBound,
1044 bool isUsingMask) {
1045 validate_sort((SkAnalyticEdge*)prevHead->fNext);
1046
1047 SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
1048 SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
1049 SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
1050
1051 SkFixed y = std::max(leftE->fUpperY, riteE->fUpperY);
1052
1053 for (;;) {
1054 // We have to check fLowerY first because some edges might be alone (e.g., there's only
1055 // a left edge but no right edge in a given y scan line) due to precision limit.
1056 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1057 if (!leftE->update(y)) {
1058 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1059 goto END_WALK;
1060 }
1061 leftE = currE;
1062 currE = (SkAnalyticEdge*)currE->fNext;
1063 }
1064 }
1065 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1066 if (!riteE->update(y)) {
1067 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1068 goto END_WALK;
1069 }
1070 riteE = currE;
1071 currE = (SkAnalyticEdge*)currE->fNext;
1072 }
1073 }
1074
1075 SkASSERT(leftE);
1076 SkASSERT(riteE);
1077
1078 // check our bottom clip
1079 if (SkFixedFloorToInt(y) >= stop_y) {
1080 break;
1081 }
1082
1083 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
1084 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
1085
1086 leftE->goY(y);
1087 riteE->goY(y);
1088
1089 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
1090 std::swap(leftE, riteE);
1091 }
1092
1093 SkFixed local_bot_fixed = std::min(leftE->fLowerY, riteE->fLowerY);
1094 if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
1095 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
1096 }
1097 local_bot_fixed = std::min(local_bot_fixed, SkIntToFixed(stop_y));
1098
1099 SkFixed left = std::max(leftBound, leftE->fX);
1100 SkFixed dLeft = leftE->fDX;
1101 SkFixed rite = std::min(riteBound, riteE->fX);
1102 SkFixed dRite = riteE->fDX;
1103 if (0 == (dLeft | dRite)) {
1104 int fullLeft = SkFixedCeilToInt(left);
1105 int fullRite = SkFixedFloorToInt(rite);
1106 SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
1107 SkFixed partialRite = rite - SkIntToFixed(fullRite);
1108 int fullTop = SkFixedCeilToInt(y);
1109 int fullBot = SkFixedFloorToInt(local_bot_fixed);
1110 SkFixed partialTop = SkIntToFixed(fullTop) - y;
1111 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
1112 if (fullTop > fullBot) { // The rectangle is within one pixel height...
1113 partialTop -= (SK_Fixed1 - partialBot);
1114 partialBot = 0;
1115 }
1116
1117 if (fullRite >= fullLeft) {
1118 if (partialTop > 0) { // blit first partial row
1119 if (partialLeft > 0) {
1120 blitter->blitAntiH(fullLeft - 1,
1121 fullTop - 1,
1122 fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
1123 }
1124 blitter->blitAntiH(
1125 fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
1126 if (partialRite > 0) {
1127 blitter->blitAntiH(fullRite,
1128 fullTop - 1,
1129 fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
1130 }
1131 blitter->flush_if_y_changed(y, y + partialTop);
1132 }
1133
1134 // Blit all full-height rows from fullTop to fullBot
1135 if (fullBot > fullTop &&
1136 // SkAAClip cannot handle the empty rect so check the non-emptiness here
1137 // (bug chromium:662800)
1138 (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
1139 fixed_to_alpha(partialRite) > 0)) {
1140 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
1141 fullTop,
1142 fullRite - fullLeft,
1143 fullBot - fullTop,
1144 fixed_to_alpha(partialLeft),
1145 fixed_to_alpha(partialRite));
1146 }
1147
1148 if (partialBot > 0) { // blit last partial row
1149 if (partialLeft > 0) {
1150 blitter->blitAntiH(fullLeft - 1,
1151 fullBot,
1152 fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
1153 }
1154 blitter->blitAntiH(
1155 fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
1156 if (partialRite > 0) {
1157 blitter->blitAntiH(fullRite,
1158 fullBot,
1159 fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
1160 }
1161 }
1162 } else {
1163 // Normal conditions, this means left and rite are within the same pixel, but if
1164 // both left and rite were < leftBounds or > rightBounds, both edges are clipped and
1165 // we should not do any blitting (particularly since the negative width saturates to
1166 // full alpha).
1167 SkFixed width = rite - left;
1168 if (width > 0) {
1169 if (partialTop > 0) {
1170 blitter->blitAntiH(fullLeft - 1,
1171 fullTop - 1,
1172 1,
1173 fixed_to_alpha(SkFixedMul(partialTop, width)));
1174 blitter->flush_if_y_changed(y, y + partialTop);
1175 }
1176 if (fullBot > fullTop) {
1177 blitter->getRealBlitter()->blitV(
1178 fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(width));
1179 }
1180 if (partialBot > 0) {
1181 blitter->blitAntiH(fullLeft - 1,
1182 fullBot,
1183 1,
1184 fixed_to_alpha(SkFixedMul(partialBot, width)));
1185 }
1186 }
1187 }
1188
1189 y = local_bot_fixed;
1190 } else {
1191 // The following constant are used to snap X
1192 // We snap X mainly for speedup (no tiny triangle) and
1193 // avoiding edge cases caused by precision errors
1194 const SkFixed kSnapDigit = SK_Fixed1 >> 4;
1195 const SkFixed kSnapHalf = kSnapDigit >> 1;
1196 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
1197 left += kSnapHalf;
1198 rite += kSnapHalf; // For fast rounding
1199
1200 // Number of blit_trapezoid_row calls we'll have
1201 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
1202
1203 // If we're using mask blitter, we advance the mask row in this function
1204 // to save some "if" condition checks.
1205 SkAlpha* maskRow = isUsingMask
1206 ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16)
1207 : nullptr;
1208
1209 // Instead of writing one loop that handles both partial-row blit_trapezoid_row
1210 // and full-row trapezoid_row together, we use the following 3-stage flow to
1211 // handle partial-row blit and full-row blit separately. It will save us much time
1212 // on changing y, left, and rite.
1213 if (count > 1) {
1214 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
1215 count--;
1216 SkFixed nextY = SkFixedCeilToFixed(y + 1);
1217 SkFixed dY = nextY - y;
1218 SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
1219 SkFixed nextRite = rite + SkFixedMul(dRite, dY);
1220 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1221 (nextLeft & kSnapMask) >= leftBound &&
1222 (nextRite & kSnapMask) <= riteBound);
1223 blit_trapezoid_row(blitter,
1224 y >> 16,
1225 left & kSnapMask,
1226 rite & kSnapMask,
1227 nextLeft & kSnapMask,
1228 nextRite & kSnapMask,
1229 leftE->fDY,
1230 riteE->fDY,
1231 get_partial_alpha(0xFF, dY),
1232 maskRow,
1233 /*noRealBlitter=*/false);
1234 blitter->flush_if_y_changed(y, nextY);
1235 left = nextLeft;
1236 rite = nextRite;
1237 y = nextY;
1238 }
1239
1240 while (count > 1) { // Full rows in the middle
1241 count--;
1242 if (isUsingMask) {
1243 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1244 }
1245 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
1246 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1247 (nextLeft & kSnapMask) >= leftBound &&
1248 (nextRite & kSnapMask) <= riteBound);
1249 blit_trapezoid_row(blitter,
1250 y >> 16,
1251 left & kSnapMask,
1252 rite & kSnapMask,
1253 nextLeft & kSnapMask,
1254 nextRite & kSnapMask,
1255 leftE->fDY,
1256 riteE->fDY,
1257 0xFF,
1258 maskRow,
1259 /*noRealBlitter=*/false);
1260 blitter->flush_if_y_changed(y, nextY);
1261 left = nextLeft;
1262 rite = nextRite;
1263 y = nextY;
1264 }
1265 }
1266
1267 if (isUsingMask) {
1268 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1269 }
1270
1271 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
1272 SkASSERT(dY <= SK_Fixed1);
1273 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
1274 // Take them back into the bound here.
1275 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
1276 SkFixed nextLeft = std::max(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
1277 SkFixed nextRite = std::min(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
1278 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1279 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
1280 blit_trapezoid_row(blitter,
1281 y >> 16,
1282 left & kSnapMask,
1283 rite & kSnapMask,
1284 nextLeft & kSnapMask,
1285 nextRite & kSnapMask,
1286 leftE->fDY,
1287 riteE->fDY,
1288 get_partial_alpha(0xFF, dY),
1289 maskRow,
1290 /*noRealBlitter=*/false);
1291 blitter->flush_if_y_changed(y, local_bot_fixed);
1292 left = nextLeft;
1293 rite = nextRite;
1294 y = local_bot_fixed;
1295 left -= kSnapHalf;
1296 rite -= kSnapHalf;
1297 }
1298
1299 leftE->fX = left;
1300 riteE->fX = rite;
1301 leftE->fY = riteE->fY = y;
1302 }
1303
1304 END_WALK:;
1305 }
1306
update_next_next_y(SkFixed y,SkFixed nextY,SkFixed * nextNextY)1307 static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
1308 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
1309 }
1310
check_intersection(const SkAnalyticEdge * edge,SkFixed nextY,SkFixed * nextNextY)1311 static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
1312 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
1313 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1314 }
1315 }
1316
check_intersection_fwd(const SkAnalyticEdge * edge,SkFixed nextY,SkFixed * nextNextY)1317 static void check_intersection_fwd(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
1318 if (edge->fNext->fNext && edge->fX + edge->fDX > edge->fNext->fX + edge->fNext->fDX) {
1319 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1320 }
1321 }
1322
insert_new_edges(SkAnalyticEdge * newEdge,SkFixed y,SkFixed * nextNextY)1323 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
1324 if (newEdge->fUpperY > y) {
1325 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1326 return;
1327 }
1328 SkAnalyticEdge* prev = newEdge->fPrev;
1329 if (prev->fX <= newEdge->fX) {
1330 while (newEdge->fUpperY <= y) {
1331 check_intersection(newEdge, y, nextNextY);
1332 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1333 newEdge = newEdge->fNext;
1334 }
1335 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1336 return;
1337 }
1338 // find first x pos to insert
1339 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
1340 // insert the lot, fixing up the links as we go
1341 do {
1342 SkAnalyticEdge* next = newEdge->fNext;
1343 do {
1344 if (start->fNext == newEdge) {
1345 goto nextEdge;
1346 }
1347 SkAnalyticEdge* after = start->fNext;
1348 if (after->fX >= newEdge->fX) {
1349 break;
1350 }
1351 SkASSERT(start != after);
1352 start = after;
1353 } while (true);
1354 remove_edge(newEdge);
1355 insert_edge_after(newEdge, start);
1356 nextEdge:
1357 check_intersection(newEdge, y, nextNextY);
1358 check_intersection_fwd(newEdge, y, nextNextY);
1359 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1360 start = newEdge;
1361 newEdge = next;
1362 } while (newEdge->fUpperY <= y);
1363 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1364 }
1365
validate_edges_for_y(const SkAnalyticEdge * edge,SkFixed y)1366 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
1367 #ifdef SK_DEBUG
1368 while (edge->fUpperY <= y) {
1369 SkASSERT(edge->fPrev && edge->fNext);
1370 SkASSERT(edge->fPrev->fNext == edge);
1371 SkASSERT(edge->fNext->fPrev == edge);
1372 SkASSERT(edge->fUpperY <= edge->fLowerY);
1373 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
1374 edge = edge->fNext;
1375 }
1376 #endif
1377 }
1378
1379 // Return true if prev->fX, next->fX are too close in the current pixel row.
edges_too_close(SkAnalyticEdge * prev,SkAnalyticEdge * next,SkFixed lowerY)1380 static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
1381 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
1382 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
1383 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
1384 // edges prev and next are within one pixel.
1385 constexpr SkFixed SLACK = SK_Fixed1;
1386
1387 // Note that even if the following test failed, the edges might still be very close to each
1388 // other at some point within the current pixel row because of prev->fDX and next->fDX.
1389 // However, to handle that case, we have to sacrafice more performance.
1390 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
1391 // so I'll ignore fDX for performance tradeoff.
1392 return next && prev && next->fUpperY < lowerY &&
1393 prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
1394 // The following is more accurate but also slower.
1395 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
1396 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
1397 }
1398
1399 // This function exists for the case where the previous rite edge is removed because
1400 // its fLowerY <= nextY
edges_too_close(int prevRite,SkFixed ul,SkFixed ll)1401 static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
1402 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
1403 }
1404
aaa_walk_edges(SkAnalyticEdge * prevHead,SkAnalyticEdge * nextTail,SkPathFillType fillType,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftClip,SkFixed rightClip,bool isUsingMask,bool forceRLE,bool skipIntersect)1405 static void aaa_walk_edges(SkAnalyticEdge* prevHead,
1406 SkAnalyticEdge* nextTail,
1407 SkPathFillType fillType,
1408 AdditiveBlitter* blitter,
1409 int start_y,
1410 int stop_y,
1411 SkFixed leftClip,
1412 SkFixed rightClip,
1413 bool isUsingMask,
1414 bool forceRLE,
1415 bool skipIntersect) {
1416 prevHead->fX = prevHead->fUpperX = leftClip;
1417 nextTail->fX = nextTail->fUpperX = rightClip;
1418 SkFixed y = std::max(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
1419 SkFixed nextNextY = SK_MaxS32;
1420
1421 {
1422 SkAnalyticEdge* edge;
1423 for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
1424 edge->goY(y);
1425 update_next_next_y(edge->fLowerY, y, &nextNextY);
1426 }
1427 update_next_next_y(edge->fUpperY, y, &nextNextY);
1428 }
1429
1430 int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
1431 bool isInverse = SkPathFillType_IsInverse(fillType);
1432
1433 if (isInverse && SkIntToFixed(start_y) != y) {
1434 int width = SkFixedFloorToInt(rightClip - leftClip);
1435 if (SkFixedFloorToInt(y) != start_y) {
1436 blitter->getRealBlitter()->blitRect(
1437 SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
1438 start_y = SkFixedFloorToInt(y);
1439 }
1440 SkAlpha* maskRow =
1441 isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
1442 blit_full_alpha(blitter,
1443 start_y,
1444 SkFixedFloorToInt(leftClip),
1445 width,
1446 fixed_to_alpha(y - SkIntToFixed(start_y)),
1447 maskRow,
1448 false);
1449 }
1450
1451 while (true) {
1452 int w = 0;
1453 bool in_interval = isInverse;
1454 SkFixed prevX = prevHead->fX;
1455 SkFixed nextY = std::min(nextNextY, SkFixedCeilToFixed(y + 1));
1456 SkAnalyticEdge* currE = prevHead->fNext;
1457 SkAnalyticEdge* leftE = prevHead;
1458 SkFixed left = leftClip;
1459 SkFixed leftDY = 0;
1460 int prevRite = SkFixedFloorToInt(leftClip);
1461
1462 nextNextY = SK_MaxS32;
1463
1464 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
1465 int yShift = 0;
1466 if ((nextY - y) & (SK_Fixed1 >> 2)) {
1467 yShift = 2;
1468 nextY = y + (SK_Fixed1 >> 2);
1469 } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
1470 yShift = 1;
1471 SkASSERT(nextY == y + (SK_Fixed1 >> 1));
1472 }
1473
1474 SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
1475
1476 // If we're using mask blitter, we advance the mask row in this function
1477 // to save some "if" condition checks.
1478 SkAlpha* maskRow =
1479 isUsingMask
1480 ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y))
1481 : nullptr;
1482
1483 SkASSERT(currE->fPrev == prevHead);
1484 validate_edges_for_y(currE, y);
1485
1486 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
1487 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
1488 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
1489
1490 while (currE->fUpperY <= y) {
1491 SkASSERT(currE->fLowerY >= nextY);
1492 SkASSERT(currE->fY == y);
1493
1494 w += static_cast<int>(currE->fWinding);
1495 bool prev_in_interval = in_interval;
1496 in_interval = !(w & windingMask) == isInverse;
1497
1498 bool isLeft = in_interval && !prev_in_interval;
1499 bool isRite = !in_interval && prev_in_interval;
1500
1501 if (isRite) {
1502 SkFixed rite = currE->fX;
1503 currE->goY(nextY, yShift);
1504 SkFixed nextLeft = std::max(leftClip, leftE->fX);
1505 rite = std::min(rightClip, rite);
1506 SkFixed nextRite = std::min(rightClip, currE->fX);
1507 blit_trapezoid_row(
1508 blitter,
1509 y >> 16,
1510 left,
1511 rite,
1512 nextLeft,
1513 nextRite,
1514 leftDY,
1515 currE->fDY,
1516 fullAlpha,
1517 maskRow,
1518 noRealBlitter || (fullAlpha == 0xFF &&
1519 (edges_too_close(prevRite, left, leftE->fX) ||
1520 edges_too_close(currE, currE->fNext, nextY))));
1521 prevRite = SkFixedCeilToInt(std::max(rite, currE->fX));
1522 } else {
1523 if (isLeft) {
1524 left = std::max(currE->fX, leftClip);
1525 leftDY = currE->fDY;
1526 leftE = currE;
1527 }
1528 currE->goY(nextY, yShift);
1529 }
1530
1531 SkAnalyticEdge* next = currE->fNext;
1532 SkFixed newX;
1533
1534 while (currE->fLowerY <= nextY) {
1535 if (currE->fCurveCount < 0) {
1536 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
1537 cubicEdge->keepContinuous();
1538 if (!cubicEdge->updateCubic()) {
1539 break;
1540 }
1541 } else if (currE->fCurveCount > 0) {
1542 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
1543 quadEdge->keepContinuous();
1544 if (!quadEdge->updateQuadratic()) {
1545 break;
1546 }
1547 } else {
1548 break;
1549 }
1550 }
1551 SkASSERT(currE->fY == nextY);
1552
1553 if (currE->fLowerY <= nextY) {
1554 remove_edge(currE);
1555 } else {
1556 update_next_next_y(currE->fLowerY, nextY, &nextNextY);
1557 newX = currE->fX;
1558 SkASSERT(currE->fLowerY > nextY);
1559 if (newX < prevX) {
1560 // ripple currE backwards until it is x-sorted
1561 backward_insert_edge_based_on_x(currE);
1562 } else {
1563 prevX = newX;
1564 }
1565 if (!skipIntersect) {
1566 check_intersection(currE, nextY, &nextNextY);
1567 }
1568 }
1569
1570 currE = next;
1571 SkASSERT(currE);
1572 }
1573
1574 // was our right-edge culled away?
1575 if (in_interval) {
1576 blit_trapezoid_row(blitter,
1577 y >> 16,
1578 left,
1579 rightClip,
1580 std::max(leftClip, leftE->fX),
1581 rightClip,
1582 leftDY,
1583 0,
1584 fullAlpha,
1585 maskRow,
1586 noRealBlitter || (fullAlpha == 0xFF &&
1587 edges_too_close(leftE->fPrev, leftE, nextY)));
1588 }
1589
1590 if (forceRLE) {
1591 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
1592 }
1593
1594 y = nextY;
1595 if (y >= SkIntToFixed(stop_y)) {
1596 break;
1597 }
1598
1599 // now currE points to the first edge with a fUpperY larger than the previous y
1600 insert_new_edges(currE, y, &nextNextY);
1601 }
1602 }
1603
aaa_fill_path(const SkPath & path,const SkIRect & clipRect,AdditiveBlitter * blitter,int start_y,int stop_y,bool pathContainedInClip,bool isUsingMask,bool forceRLE)1604 static void aaa_fill_path(const SkPath& path,
1605 const SkIRect& clipRect,
1606 AdditiveBlitter* blitter,
1607 int start_y,
1608 int stop_y,
1609 bool pathContainedInClip,
1610 bool isUsingMask,
1611 bool forceRLE) { // forceRLE implies that SkAAClip is calling us
1612 SkASSERT(blitter);
1613
1614 SkAnalyticEdgeBuilder builder;
1615 int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
1616 SkAnalyticEdge** list = builder.analyticEdgeList();
1617
1618 SkIRect rect = clipRect;
1619 if (0 == count) {
1620 if (path.isInverseFillType()) {
1621 /*
1622 * Since we are in inverse-fill, our caller has already drawn above
1623 * our top (start_y) and will draw below our bottom (stop_y). Thus
1624 * we need to restrict our drawing to the intersection of the clip
1625 * and those two limits.
1626 */
1627 if (rect.fTop < start_y) {
1628 rect.fTop = start_y;
1629 }
1630 if (rect.fBottom > stop_y) {
1631 rect.fBottom = stop_y;
1632 }
1633 if (!rect.isEmpty()) {
1634 blitter->getRealBlitter()->blitRect(
1635 rect.fLeft, rect.fTop, rect.width(), rect.height());
1636 }
1637 }
1638 return;
1639 }
1640
1641 SkAnalyticEdge headEdge, tailEdge, *last;
1642 // this returns the first and last edge after they're sorted into a dlink list
1643 SkAnalyticEdge* edge = sort_edges(list, count, &last);
1644
1645 headEdge.fPrev = nullptr;
1646 headEdge.fNext = edge;
1647 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
1648 headEdge.fX = SK_MinS32;
1649 headEdge.fDX = 0;
1650 headEdge.fDY = SK_MaxS32;
1651 headEdge.fUpperX = SK_MinS32;
1652 edge->fPrev = &headEdge;
1653
1654 tailEdge.fPrev = last;
1655 tailEdge.fNext = nullptr;
1656 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
1657 tailEdge.fX = SK_MaxS32;
1658 tailEdge.fDX = 0;
1659 tailEdge.fDY = SK_MaxS32;
1660 tailEdge.fUpperX = SK_MaxS32;
1661 last->fNext = &tailEdge;
1662
1663 // now edge is the head of the sorted linklist
1664
1665 if (!pathContainedInClip && start_y < clipRect.fTop) {
1666 start_y = clipRect.fTop;
1667 }
1668 if (!pathContainedInClip && stop_y > clipRect.fBottom) {
1669 stop_y = clipRect.fBottom;
1670 }
1671
1672 SkFixed leftBound = SkIntToFixed(rect.fLeft);
1673 SkFixed rightBound = SkIntToFixed(rect.fRight);
1674 if (isUsingMask) {
1675 // If we're using mask, then we have to limit the bound within the path bounds.
1676 // Otherwise, the edge drift may access an invalid address inside the mask.
1677 SkIRect ir;
1678 path.getBounds().roundOut(&ir);
1679 leftBound = std::max(leftBound, SkIntToFixed(ir.fLeft));
1680 rightBound = std::min(rightBound, SkIntToFixed(ir.fRight));
1681 }
1682
1683 if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
1684 aaa_walk_convex_edges(
1685 &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
1686 } else {
1687 // We skip intersection computation if there are many points which probably already
1688 // give us enough fractional scan lines.
1689 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
1690
1691 aaa_walk_edges(&headEdge,
1692 &tailEdge,
1693 path.getFillType(),
1694 blitter,
1695 start_y,
1696 stop_y,
1697 leftBound,
1698 rightBound,
1699 isUsingMask,
1700 forceRLE,
1701 skipIntersect);
1702 }
1703 }
1704
1705 // Check if the path is a rect and fat enough after clipping; if so, blit it.
try_blit_fat_anti_rect(SkBlitter * blitter,const SkPath & path,const SkIRect & clip)1706 static inline bool try_blit_fat_anti_rect(SkBlitter* blitter,
1707 const SkPath& path,
1708 const SkIRect& clip) {
1709 SkRect rect;
1710 if (!path.isRect(&rect)) {
1711 return false; // not rect
1712 }
1713 if (!rect.intersect(SkRect::Make(clip))) {
1714 return true; // The intersection is empty. Hence consider it done.
1715 }
1716 SkIRect bounds = rect.roundOut();
1717 if (bounds.width() < 3) {
1718 return false; // not fat
1719 }
1720 blitter->blitFatAntiRect(rect);
1721 return true;
1722 }
1723
AAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)1724 void SkScan::AAAFillPath(const SkPath& path,
1725 SkBlitter* blitter,
1726 const SkIRect& ir,
1727 const SkIRect& clipBounds,
1728 bool forceRLE) {
1729 bool containedInClip = clipBounds.contains(ir);
1730 bool isInverse = path.isInverseFillType();
1731
1732 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
1733 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
1734 // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
1735 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
1736 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
1737 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
1738 // blitFatAntiRect to avoid the mask and its overhead.
1739 if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
1740 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
1741 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
1742 if (!try_blit_fat_anti_rect(blitter, path, clipBounds)) {
1743 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1744 aaa_fill_path(path,
1745 clipBounds,
1746 &additiveBlitter,
1747 ir.fTop,
1748 ir.fBottom,
1749 containedInClip,
1750 true,
1751 forceRLE);
1752 }
1753 } else if (!isInverse && path.isConvex()) {
1754 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
1755 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
1756 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
1757 // RunBasedAdditiveBlitter would suffice.
1758 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1759 aaa_fill_path(path,
1760 clipBounds,
1761 &additiveBlitter,
1762 ir.fTop,
1763 ir.fBottom,
1764 containedInClip,
1765 false,
1766 forceRLE);
1767 } else {
1768 // If the filling area might not be convex, the more involved aaa_walk_edges would
1769 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
1770 // does that at a cost of performance.
1771 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1772 aaa_fill_path(path,
1773 clipBounds,
1774 &additiveBlitter,
1775 ir.fTop,
1776 ir.fBottom,
1777 containedInClip,
1778 false,
1779 forceRLE);
1780 }
1781 }
1782