1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkPath.h"
9 #include "include/core/SkRegion.h"
10 #include "include/private/SkTemplates.h"
11 #include "include/private/SkTo.h"
12 #include "src/core/SkAnalyticEdge.h"
13 #include "src/core/SkAntiRun.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkBlitter.h"
16 #include "src/core/SkEdge.h"
17 #include "src/core/SkEdgeBuilder.h"
18 #include "src/core/SkGeometry.h"
19 #include "src/core/SkQuadClipper.h"
20 #include "src/core/SkRasterClip.h"
21 #include "src/core/SkScan.h"
22 #include "src/core/SkScanPriv.h"
23 #include "src/core/SkTSort.h"
24
25 #include <utility>
26
27 #if defined(SK_DISABLE_AAA)
AAAFillPath(const SkPath &,SkBlitter *,const SkIRect &,const SkIRect &,bool)28 void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
29 SkDEBUGFAIL("AAA Disabled");
30 return;
31 }
32 #else
33
34 /*
35
36 The following is a high-level overview of our analytic anti-aliasing
37 algorithm. We consider a path as a collection of line segments, as
38 quadratic/cubic curves are converted to small line segments. Without loss of
39 generality, let's assume that the draw region is [0, W] x [0, H].
40
41 Our algorithm is based on horizontal scan lines (y = c_i) as the previous
42 sampling-based algorithm did. However, our algorithm uses non-equal-spaced
43 scan lines, while the previous method always uses equal-spaced scan lines,
44 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
45 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
46 16-supersampling AA algorithm.
47
48 Our algorithm contains scan lines y = c_i for c_i that is either:
49
50 1. an integer between [0, H]
51
52 2. the y value of a line segment endpoint
53
54 3. the y value of an intersection of two line segments
55
56 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
57 the coverage of this horizontal strip of our path on each pixel. This can be
58 done very efficiently because the strip of our path now only consists of
59 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
60 rectangles and triangles as special cases).
61
62 We now describe how the coverage of single pixel is computed against such a
63 trapezoid. That coverage is essentially the intersection area of a rectangle
64 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
65 could be complicated, as shown in the example region A below:
66
67 +-----------\----+
68 | \ C|
69 | \ |
70 \ \ |
71 |\ A \|
72 | \ \
73 | \ |
74 | B \ |
75 +----\-----------+
76
77 However, we don't have to compute the area of A directly. Instead, we can
78 compute the excluded area, which are B and C, quite easily, because they're
79 just triangles. In fact, we can prove that an excluded region (take B as an
80 example) is either itself a simple trapezoid (including rectangles, triangles,
81 and empty regions), or its opposite (the opposite of B is A + C) is a simple
82 trapezoid. In any case, we can compute its area efficiently.
83
84 In summary, our algorithm has a higher quality because it generates ground-
85 truth coverages analytically. It is also faster because it has much fewer
86 unnessasary horizontal scan lines. For example, given a triangle path, the
87 number of scan lines in our algorithm is only about 3 + H while the
88 16-supersampling algorithm has about 4H scan lines.
89
90 */
91
add_alpha(SkAlpha * alpha,SkAlpha delta)92 static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
93 SkASSERT(*alpha + delta <= 256);
94 *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
95 }
96
safely_add_alpha(SkAlpha * alpha,SkAlpha delta)97 static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
98 *alpha = std::min(0xFF, *alpha + delta);
99 }
100
101 class AdditiveBlitter : public SkBlitter {
102 public:
~AdditiveBlitter()103 ~AdditiveBlitter() override {}
104
105 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
106
107 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
108 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
109 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
110
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])111 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
112 SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
113 }
114
blitV(int x,int y,int height,SkAlpha alpha)115 void blitV(int x, int y, int height, SkAlpha alpha) override {
116 SkDEBUGFAIL("Please call real blitter's blitV instead.");
117 }
118
blitH(int x,int y,int width)119 void blitH(int x, int y, int width) override {
120 SkDEBUGFAIL("Please call real blitter's blitH instead.");
121 }
122
blitRect(int x,int y,int width,int height)123 void blitRect(int x, int y, int width, int height) override {
124 SkDEBUGFAIL("Please call real blitter's blitRect instead.");
125 }
126
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)127 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
128 override {
129 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
130 }
131
132 virtual int getWidth() = 0;
133
134 // Flush the additive alpha cache if floor(y) and floor(nextY) is different
135 // (i.e., we'll start working on a new pixel row).
136 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
137 };
138
139 // We need this mask blitter because it significantly accelerates small path filling.
140 class MaskAdditiveBlitter : public AdditiveBlitter {
141 public:
142 MaskAdditiveBlitter(SkBlitter* realBlitter,
143 const SkIRect& ir,
144 const SkIRect& clipBounds,
145 bool isInverse);
~MaskAdditiveBlitter()146 ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
147
148 // Most of the time, we still consider this mask blitter as the real blitter
149 // so we can accelerate blitRect and others. But sometimes we want to return
150 // the absolute real blitter (e.g., when we fall back to the old code path).
getRealBlitter(bool forceRealBlitter)151 SkBlitter* getRealBlitter(bool forceRealBlitter) override {
152 return forceRealBlitter ? fRealBlitter : this;
153 }
154
155 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
156 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
157
158 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
159 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
160 void blitAntiH(int x, int y, const SkAlpha alpha) override;
161 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
162 void blitV(int x, int y, int height, SkAlpha alpha) override;
163 void blitRect(int x, int y, int width, int height) override;
164 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
165 override;
166
167 // The flush is only needed for RLE (RunBasedAdditiveBlitter)
flush_if_y_changed(SkFixed y,SkFixed nextY)168 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
169
getWidth()170 int getWidth() override { return fClipRect.width(); }
171
CanHandleRect(const SkIRect & bounds)172 static bool CanHandleRect(const SkIRect& bounds) {
173 int width = bounds.width();
174 if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
175 return false;
176 }
177 int64_t rb = SkAlign4(width);
178 // use 64bits to detect overflow
179 int64_t storage = rb * bounds.height();
180
181 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
182 (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
183 }
184
185 // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
getRow(int y)186 uint8_t* getRow(int y) {
187 if (y != fY) {
188 fY = y;
189 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
190 }
191 return fRow;
192 }
193
194 private:
195 // so we don't try to do very wide things, where the RLE blitter would be faster
196 static const int kMAX_WIDTH = 32;
197 static const int kMAX_STORAGE = 1024;
198
199 SkBlitter* fRealBlitter;
200 SkMask fMask;
201 SkIRect fClipRect;
202 // we add 2 because we can write 1 extra byte at either end due to precision error
203 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
204
205 uint8_t* fRow;
206 int fY;
207 };
208
MaskAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)209 MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
210 const SkIRect& ir,
211 const SkIRect& clipBounds,
212 bool isInverse) {
213 SkASSERT(CanHandleRect(ir));
214 SkASSERT(!isInverse);
215
216 fRealBlitter = realBlitter;
217
218 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
219 fMask.fBounds = ir;
220 fMask.fRowBytes = ir.width();
221 fMask.fFormat = SkMask::kA8_Format;
222
223 fY = ir.fTop - 1;
224 fRow = nullptr;
225
226 fClipRect = ir;
227 if (!fClipRect.intersect(clipBounds)) {
228 SkASSERT(0);
229 fClipRect.setEmpty();
230 }
231
232 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
233 }
234
blitAntiH(int x,int y,const SkAlpha antialias[],int len)235 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
236 SK_ABORT("Don't use this; directly add alphas to the mask.");
237 }
238
blitAntiH(int x,int y,const SkAlpha alpha)239 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
240 SkASSERT(x >= fMask.fBounds.fLeft - 1);
241 add_alpha(&this->getRow(y)[x], alpha);
242 }
243
blitAntiH(int x,int y,int width,const SkAlpha alpha)244 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
245 SkASSERT(x >= fMask.fBounds.fLeft - 1);
246 uint8_t* row = this->getRow(y);
247 for (int i = 0; i < width; ++i) {
248 add_alpha(&row[x + i], alpha);
249 }
250 }
251
blitV(int x,int y,int height,SkAlpha alpha)252 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
253 if (alpha == 0) {
254 return;
255 }
256 SkASSERT(x >= fMask.fBounds.fLeft - 1);
257 // This must be called as if this is a real blitter.
258 // So we directly set alpha rather than adding it.
259 uint8_t* row = this->getRow(y);
260 for (int i = 0; i < height; ++i) {
261 row[x] = alpha;
262 row += fMask.fRowBytes;
263 }
264 }
265
blitRect(int x,int y,int width,int height)266 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
267 SkASSERT(x >= fMask.fBounds.fLeft - 1);
268 // This must be called as if this is a real blitter.
269 // So we directly set alpha rather than adding it.
270 uint8_t* row = this->getRow(y);
271 for (int i = 0; i < height; ++i) {
272 memset(row + x, 0xFF, width);
273 row += fMask.fRowBytes;
274 }
275 }
276
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)277 void MaskAdditiveBlitter::blitAntiRect(int x,
278 int y,
279 int width,
280 int height,
281 SkAlpha leftAlpha,
282 SkAlpha rightAlpha) {
283 blitV(x, y, height, leftAlpha);
284 blitV(x + 1 + width, y, height, rightAlpha);
285 blitRect(x + 1, y, width, height);
286 }
287
288 class RunBasedAdditiveBlitter : public AdditiveBlitter {
289 public:
290 RunBasedAdditiveBlitter(SkBlitter* realBlitter,
291 const SkIRect& ir,
292 const SkIRect& clipBounds,
293 bool isInverse);
294
~RunBasedAdditiveBlitter()295 ~RunBasedAdditiveBlitter() override { this->flush(); }
296
getRealBlitter(bool forceRealBlitter)297 SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
298
299 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
300 void blitAntiH(int x, int y, const SkAlpha alpha) override;
301 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
302
getWidth()303 int getWidth() override { return fWidth; }
304
flush_if_y_changed(SkFixed y,SkFixed nextY)305 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
306 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
307 this->flush();
308 }
309 }
310
311 protected:
312 SkBlitter* fRealBlitter;
313
314 int fCurrY; // Current y coordinate.
315 int fWidth; // Widest row of region to be blitted
316 int fLeft; // Leftmost x coordinate in any row
317 int fTop; // Initial y coordinate (top of bounds)
318
319 // The next three variables are used to track a circular buffer that
320 // contains the values used in SkAlphaRuns. These variables should only
321 // ever be updated in advanceRuns(), and fRuns should always point to
322 // a valid SkAlphaRuns...
323 int fRunsToBuffer;
324 void* fRunsBuffer;
325 int fCurrentRun;
326 SkAlphaRuns fRuns;
327
328 int fOffsetX;
329
check(int x,int width) const330 bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
331
332 // extra one to store the zero at the end
getRunsSz() const333 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
334
335 // This function updates the fRuns variable to point to the next buffer space
336 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
337 // and resets fRuns to point to an empty scanline.
advanceRuns()338 void advanceRuns() {
339 const size_t kRunsSz = this->getRunsSz();
340 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
341 fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
342 fCurrentRun * kRunsSz);
343 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
344 fRuns.reset(fWidth);
345 }
346
347 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
snapAlpha(SkAlpha alpha)348 SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
349
flush()350 void flush() {
351 if (fCurrY >= fTop) {
352 SkASSERT(fCurrentRun < fRunsToBuffer);
353 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
354 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
355 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
356 }
357 if (!fRuns.empty()) {
358 // SkDEBUGCODE(fRuns.dump();)
359 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
360 this->advanceRuns();
361 fOffsetX = 0;
362 }
363 fCurrY = fTop - 1;
364 }
365 }
366
checkY(int y)367 void checkY(int y) {
368 if (y != fCurrY) {
369 this->flush();
370 fCurrY = y;
371 }
372 }
373 };
374
RunBasedAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)375 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
376 const SkIRect& ir,
377 const SkIRect& clipBounds,
378 bool isInverse) {
379 fRealBlitter = realBlitter;
380
381 SkIRect sectBounds;
382 if (isInverse) {
383 // We use the clip bounds instead of the ir, since we may be asked to
384 // draw outside of the rect when we're a inverse filltype
385 sectBounds = clipBounds;
386 } else {
387 if (!sectBounds.intersect(ir, clipBounds)) {
388 sectBounds.setEmpty();
389 }
390 }
391
392 const int left = sectBounds.left();
393 const int right = sectBounds.right();
394
395 fLeft = left;
396 fWidth = right - left;
397 fTop = sectBounds.top();
398 fCurrY = fTop - 1;
399
400 fRunsToBuffer = realBlitter->requestRowsPreserved();
401 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
402 fCurrentRun = -1;
403
404 this->advanceRuns();
405
406 fOffsetX = 0;
407 }
408
blitAntiH(int x,int y,const SkAlpha antialias[],int len)409 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
410 checkY(y);
411 x -= fLeft;
412
413 if (x < 0) {
414 len += x;
415 antialias -= x;
416 x = 0;
417 }
418 len = std::min(len, fWidth - x);
419 SkASSERT(check(x, len));
420
421 if (x < fOffsetX) {
422 fOffsetX = 0;
423 }
424
425 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
426 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
427 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
428 fRuns.fRuns[x + i + j] = 1;
429 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
430 }
431 fRuns.fRuns[x + i] = 1;
432 }
433 for (int i = 0; i < len; ++i) {
434 add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
435 }
436 }
437
blitAntiH(int x,int y,const SkAlpha alpha)438 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
439 checkY(y);
440 x -= fLeft;
441
442 if (x < fOffsetX) {
443 fOffsetX = 0;
444 }
445
446 if (this->check(x, 1)) {
447 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
448 }
449 }
450
blitAntiH(int x,int y,int width,const SkAlpha alpha)451 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
452 checkY(y);
453 x -= fLeft;
454
455 if (x < fOffsetX) {
456 fOffsetX = 0;
457 }
458
459 if (this->check(x, width)) {
460 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
461 }
462 }
463
464 // This exists specifically for concave path filling.
465 // In those cases, we can easily accumulate alpha greater than 0xFF.
466 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
467 public:
SafeRLEAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)468 SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
469 const SkIRect& ir,
470 const SkIRect& clipBounds,
471 bool isInverse)
472 : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
473
474 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
475 void blitAntiH(int x, int y, const SkAlpha alpha) override;
476 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
477 };
478
blitAntiH(int x,int y,const SkAlpha antialias[],int len)479 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
480 checkY(y);
481 x -= fLeft;
482
483 if (x < 0) {
484 len += x;
485 antialias -= x;
486 x = 0;
487 }
488 len = std::min(len, fWidth - x);
489 SkASSERT(check(x, len));
490
491 if (x < fOffsetX) {
492 fOffsetX = 0;
493 }
494
495 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
496 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
497 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
498 fRuns.fRuns[x + i + j] = 1;
499 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
500 }
501 fRuns.fRuns[x + i] = 1;
502 }
503 for (int i = 0; i < len; ++i) {
504 safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
505 }
506 }
507
blitAntiH(int x,int y,const SkAlpha alpha)508 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
509 checkY(y);
510 x -= fLeft;
511
512 if (x < fOffsetX) {
513 fOffsetX = 0;
514 }
515
516 if (check(x, 1)) {
517 // Break the run
518 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
519 safely_add_alpha(&fRuns.fAlpha[x], alpha);
520 }
521 }
522
blitAntiH(int x,int y,int width,const SkAlpha alpha)523 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
524 checkY(y);
525 x -= fLeft;
526
527 if (x < fOffsetX) {
528 fOffsetX = 0;
529 }
530
531 if (check(x, width)) {
532 // Break the run
533 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
534 for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
535 safely_add_alpha(&fRuns.fAlpha[i], alpha);
536 }
537 }
538 }
539
540 // Return the alpha of a trapezoid whose height is 1
trapezoid_to_alpha(SkFixed l1,SkFixed l2)541 static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
542 SkASSERT(l1 >= 0 && l2 >= 0);
543 SkFixed area = (l1 + l2) / 2;
544 return SkTo<SkAlpha>(area >> 8);
545 }
546
547 // The alpha of right-triangle (a, a*b)
partial_triangle_to_alpha(SkFixed a,SkFixed b)548 static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
549 SkASSERT(a <= SK_Fixed1);
550 #if 0
551 // TODO(mtklein): skia:8877
552 SkASSERT(b <= SK_Fixed1);
553 #endif
554
555 // Approximating...
556 // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
557 SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
558
559 #if 0
560 // TODO(mtklein): skia:8877
561 return SkTo<SkAlpha>(area >> 8);
562 #else
563 return SkTo<SkAlpha>((area >> 8) & 0xFF);
564 #endif
565 }
566
get_partial_alpha(SkAlpha alpha,SkFixed partialHeight)567 static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
568 return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
569 }
570
get_partial_alpha(SkAlpha alpha,SkAlpha fullAlpha)571 static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
572 return (alpha * fullAlpha) >> 8;
573 }
574
575 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
576 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
577 // This is rarely the problem so we'll only use this for blitting rectangles.
fixed_to_alpha(SkFixed f)578 static SkAlpha fixed_to_alpha(SkFixed f) {
579 SkASSERT(f <= SK_Fixed1);
580 return get_partial_alpha(0xFF, f);
581 }
582
583 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
584 // approximate (very coarsely) the x coordinate of the intersection.
approximate_intersection(SkFixed l1,SkFixed r1,SkFixed l2,SkFixed r2)585 static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
586 if (l1 > r1) {
587 std::swap(l1, r1);
588 }
589 if (l2 > r2) {
590 std::swap(l2, r2);
591 }
592 return (std::max(l1, l2) + std::min(r1, r2)) / 2;
593 }
594
595 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_above_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)596 static void compute_alpha_above_line(SkAlpha* alphas,
597 SkFixed l,
598 SkFixed r,
599 SkFixed dY,
600 SkAlpha fullAlpha) {
601 SkASSERT(l <= r);
602 SkASSERT(l >> 16 == 0);
603 int R = SkFixedCeilToInt(r);
604 if (R == 0) {
605 return;
606 } else if (R == 1) {
607 alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
608 } else {
609 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
610 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
611 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
612 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
613 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
614 for (int i = 1; i < R - 1; ++i) {
615 alphas[i] = alpha16 >> 8;
616 alpha16 += dY;
617 }
618 alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
619 }
620 }
621
622 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_below_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)623 static void compute_alpha_below_line(SkAlpha* alphas,
624 SkFixed l,
625 SkFixed r,
626 SkFixed dY,
627 SkAlpha fullAlpha) {
628 SkASSERT(l <= r);
629 SkASSERT(l >> 16 == 0);
630 int R = SkFixedCeilToInt(r);
631 if (R == 0) {
632 return;
633 } else if (R == 1) {
634 alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
635 } else {
636 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
637 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
638 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
639 alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
640 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
641 for (int i = R - 2; i > 0; i--) {
642 alphas[i] = (alpha16 >> 8) & 0xFF;
643 alpha16 += dY;
644 }
645 alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
646 }
647 }
648
649 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
blit_single_alpha(AdditiveBlitter * blitter,int y,int x,SkAlpha alpha,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)650 static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter,
651 int y,
652 int x,
653 SkAlpha alpha,
654 SkAlpha fullAlpha,
655 SkAlpha* maskRow,
656 bool isUsingMask,
657 bool noRealBlitter,
658 bool needSafeCheck) {
659 if (isUsingMask) {
660 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
661 maskRow[x] = alpha;
662 } else if (needSafeCheck) {
663 safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
664 } else {
665 add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
666 }
667 } else {
668 if (fullAlpha == 0xFF && !noRealBlitter) {
669 blitter->getRealBlitter()->blitV(x, y, 1, alpha);
670 } else {
671 blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
672 }
673 }
674 }
675
blit_two_alphas(AdditiveBlitter * blitter,int y,int x,SkAlpha a1,SkAlpha a2,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)676 static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter,
677 int y,
678 int x,
679 SkAlpha a1,
680 SkAlpha a2,
681 SkAlpha fullAlpha,
682 SkAlpha* maskRow,
683 bool isUsingMask,
684 bool noRealBlitter,
685 bool needSafeCheck) {
686 if (isUsingMask) {
687 if (needSafeCheck) {
688 safely_add_alpha(&maskRow[x], a1);
689 safely_add_alpha(&maskRow[x + 1], a2);
690 } else {
691 add_alpha(&maskRow[x], a1);
692 add_alpha(&maskRow[x + 1], a2);
693 }
694 } else {
695 if (fullAlpha == 0xFF && !noRealBlitter) {
696 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
697 } else {
698 blitter->blitAntiH(x, y, a1);
699 blitter->blitAntiH(x + 1, y, a2);
700 }
701 }
702 }
703
blit_full_alpha(AdditiveBlitter * blitter,int y,int x,int len,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)704 static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter,
705 int y,
706 int x,
707 int len,
708 SkAlpha fullAlpha,
709 SkAlpha* maskRow,
710 bool isUsingMask,
711 bool noRealBlitter,
712 bool needSafeCheck) {
713 if (isUsingMask) {
714 for (int i = 0; i < len; ++i) {
715 if (needSafeCheck) {
716 safely_add_alpha(&maskRow[x + i], fullAlpha);
717 } else {
718 add_alpha(&maskRow[x + i], fullAlpha);
719 }
720 }
721 } else {
722 if (fullAlpha == 0xFF && !noRealBlitter) {
723 blitter->getRealBlitter()->blitH(x, y, len);
724 } else {
725 blitter->blitAntiH(x, y, len, fullAlpha);
726 }
727 }
728 }
729
blit_aaa_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)730 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
731 int y,
732 SkFixed ul,
733 SkFixed ur,
734 SkFixed ll,
735 SkFixed lr,
736 SkFixed lDY,
737 SkFixed rDY,
738 SkAlpha fullAlpha,
739 SkAlpha* maskRow,
740 bool isUsingMask,
741 bool noRealBlitter,
742 bool needSafeCheck) {
743 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
744 int len = R - L;
745
746 if (len == 1) {
747 SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
748 blit_single_alpha(blitter,
749 y,
750 L,
751 alpha,
752 fullAlpha,
753 maskRow,
754 isUsingMask,
755 noRealBlitter,
756 needSafeCheck);
757 return;
758 }
759
760 const int kQuickLen = 31;
761 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
762 SkAlpha* alphas;
763
764 if (len <= kQuickLen) {
765 alphas = (SkAlpha*)quickMemory;
766 } else {
767 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
768 }
769
770 SkAlpha* tempAlphas = alphas + len + 1;
771 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
772
773 for (int i = 0; i < len; ++i) {
774 runs[i] = 1;
775 alphas[i] = fullAlpha;
776 }
777 runs[len] = 0;
778
779 int uL = SkFixedFloorToInt(ul);
780 int lL = SkFixedCeilToInt(ll);
781 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
782 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
783 SkFixed second = ll - ul - first;
784 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
785 SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
786 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
787 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
788 } else {
789 compute_alpha_below_line(
790 tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
791 for (int i = uL; i < lL; ++i) {
792 if (alphas[i - L] > tempAlphas[i - L]) {
793 alphas[i - L] -= tempAlphas[i - L];
794 } else {
795 alphas[i - L] = 0;
796 }
797 }
798 }
799
800 int uR = SkFixedFloorToInt(ur);
801 int lR = SkFixedCeilToInt(lr);
802 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
803 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
804 SkFixed second = lr - ur - first;
805 SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
806 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
807 alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
808 alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
809 } else {
810 compute_alpha_above_line(
811 tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
812 for (int i = uR; i < lR; ++i) {
813 if (alphas[i - L] > tempAlphas[i - L]) {
814 alphas[i - L] -= tempAlphas[i - L];
815 } else {
816 alphas[i - L] = 0;
817 }
818 }
819 }
820
821 if (isUsingMask) {
822 for (int i = 0; i < len; ++i) {
823 if (needSafeCheck) {
824 safely_add_alpha(&maskRow[L + i], alphas[i]);
825 } else {
826 add_alpha(&maskRow[L + i], alphas[i]);
827 }
828 }
829 } else {
830 if (fullAlpha == 0xFF && !noRealBlitter) {
831 // Real blitter is faster than RunBasedAdditiveBlitter
832 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
833 } else {
834 blitter->blitAntiH(L, y, alphas, len);
835 }
836 }
837
838 if (len > kQuickLen) {
839 delete[] alphas;
840 }
841 }
842
blit_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter=false,bool needSafeCheck=false)843 static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter,
844 int y,
845 SkFixed ul,
846 SkFixed ur,
847 SkFixed ll,
848 SkFixed lr,
849 SkFixed lDY,
850 SkFixed rDY,
851 SkAlpha fullAlpha,
852 SkAlpha* maskRow,
853 bool isUsingMask,
854 bool noRealBlitter = false,
855 bool needSafeCheck = false) {
856 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
857
858 if (ul > ur) {
859 return;
860 }
861
862 // Edge crosses. Approximate it. This should only happend due to precision limit,
863 // so the approximation could be very coarse.
864 if (ll > lr) {
865 ll = lr = approximate_intersection(ul, ll, ur, lr);
866 }
867
868 if (ul == ur && ll == lr) {
869 return; // empty trapzoid
870 }
871
872 // We're going to use the left line ul-ll and the rite line ur-lr
873 // to exclude the area that's not covered by the path.
874 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
875 // so we'll do that for simplicity.
876 if (ul > ll) {
877 std::swap(ul, ll);
878 }
879 if (ur > lr) {
880 std::swap(ur, lr);
881 }
882
883 SkFixed joinLeft = SkFixedCeilToFixed(ll);
884 SkFixed joinRite = SkFixedFloorToFixed(ur);
885 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
886 if (ul < joinLeft) {
887 int len = SkFixedCeilToInt(joinLeft - ul);
888 if (len == 1) {
889 SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
890 blit_single_alpha(blitter,
891 y,
892 ul >> 16,
893 alpha,
894 fullAlpha,
895 maskRow,
896 isUsingMask,
897 noRealBlitter,
898 needSafeCheck);
899 } else if (len == 2) {
900 SkFixed first = joinLeft - SK_Fixed1 - ul;
901 SkFixed second = ll - ul - first;
902 SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
903 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
904 blit_two_alphas(blitter,
905 y,
906 ul >> 16,
907 a1,
908 a2,
909 fullAlpha,
910 maskRow,
911 isUsingMask,
912 noRealBlitter,
913 needSafeCheck);
914 } else {
915 blit_aaa_trapezoid_row(blitter,
916 y,
917 ul,
918 joinLeft,
919 ll,
920 joinLeft,
921 lDY,
922 SK_MaxS32,
923 fullAlpha,
924 maskRow,
925 isUsingMask,
926 noRealBlitter,
927 needSafeCheck);
928 }
929 }
930 // SkAAClip requires that we blit from left to right.
931 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
932 if (joinLeft < joinRite) {
933 blit_full_alpha(blitter,
934 y,
935 SkFixedFloorToInt(joinLeft),
936 SkFixedFloorToInt(joinRite - joinLeft),
937 fullAlpha,
938 maskRow,
939 isUsingMask,
940 noRealBlitter,
941 needSafeCheck);
942 }
943 if (lr > joinRite) {
944 int len = SkFixedCeilToInt(lr - joinRite);
945 if (len == 1) {
946 SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
947 blit_single_alpha(blitter,
948 y,
949 joinRite >> 16,
950 alpha,
951 fullAlpha,
952 maskRow,
953 isUsingMask,
954 noRealBlitter,
955 needSafeCheck);
956 } else if (len == 2) {
957 SkFixed first = joinRite + SK_Fixed1 - ur;
958 SkFixed second = lr - ur - first;
959 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
960 SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
961 blit_two_alphas(blitter,
962 y,
963 joinRite >> 16,
964 a1,
965 a2,
966 fullAlpha,
967 maskRow,
968 isUsingMask,
969 noRealBlitter,
970 needSafeCheck);
971 } else {
972 blit_aaa_trapezoid_row(blitter,
973 y,
974 joinRite,
975 ur,
976 joinRite,
977 lr,
978 SK_MaxS32,
979 rDY,
980 fullAlpha,
981 maskRow,
982 isUsingMask,
983 noRealBlitter,
984 needSafeCheck);
985 }
986 }
987 } else {
988 blit_aaa_trapezoid_row(blitter,
989 y,
990 ul,
991 ur,
992 ll,
993 lr,
994 lDY,
995 rDY,
996 fullAlpha,
997 maskRow,
998 isUsingMask,
999 noRealBlitter,
1000 needSafeCheck);
1001 }
1002 }
1003
operator <(const SkAnalyticEdge & a,const SkAnalyticEdge & b)1004 static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
1005 int valuea = a.fUpperY;
1006 int valueb = b.fUpperY;
1007
1008 if (valuea == valueb) {
1009 valuea = a.fX;
1010 valueb = b.fX;
1011 }
1012
1013 if (valuea == valueb) {
1014 valuea = a.fDX;
1015 valueb = b.fDX;
1016 }
1017
1018 return valuea < valueb;
1019 }
1020
sort_edges(SkAnalyticEdge * list[],int count,SkAnalyticEdge ** last)1021 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
1022 SkTQSort(list, list + count);
1023
1024 // now make the edges linked in sorted order
1025 for (int i = 1; i < count; ++i) {
1026 list[i - 1]->fNext = list[i];
1027 list[i]->fPrev = list[i - 1];
1028 }
1029
1030 *last = list[count - 1];
1031 return list[0];
1032 }
1033
validate_sort(const SkAnalyticEdge * edge)1034 static void validate_sort(const SkAnalyticEdge* edge) {
1035 #ifdef SK_DEBUG
1036 SkFixed y = SkIntToFixed(-32768);
1037
1038 while (edge->fUpperY != SK_MaxS32) {
1039 edge->validate();
1040 SkASSERT(y <= edge->fUpperY);
1041
1042 y = edge->fUpperY;
1043 edge = (SkAnalyticEdge*)edge->fNext;
1044 }
1045 #endif
1046 }
1047
1048 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
1049 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
1050 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
is_smooth_enough(SkAnalyticEdge * thisEdge,SkAnalyticEdge * nextEdge,int stop_y)1051 static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
1052 if (thisEdge->fCurveCount < 0) {
1053 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
1054 int ddshift = cEdge.fCurveShift;
1055 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
1056 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
1057 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
1058 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
1059 } else if (thisEdge->fCurveCount > 0) {
1060 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
1061 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
1062 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
1063 // current Dy is (fQDy - fQDDy) >> shift
1064 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
1065 }
1066 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
1067 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
1068 }
1069
1070 // Check if the leftE and riteE are changing smoothly in terms of fDX.
1071 // If yes, we can later skip the fractional y and directly jump to integer y.
is_smooth_enough(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkAnalyticEdge * currE,int stop_y)1072 static bool is_smooth_enough(SkAnalyticEdge* leftE,
1073 SkAnalyticEdge* riteE,
1074 SkAnalyticEdge* currE,
1075 int stop_y) {
1076 if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
1077 return false; // We're at the end so we won't skip anything
1078 }
1079 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
1080 return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
1081 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
1082 return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
1083 }
1084
1085 // Now both edges are changing, find the second next edge
1086 SkAnalyticEdge* nextCurrE = currE->fNext;
1087 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
1088 return false;
1089 }
1090 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
1091 if (nextCurrE->fUpperX < currE->fUpperX) {
1092 std::swap(currE, nextCurrE);
1093 }
1094 return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
1095 }
1096
aaa_walk_convex_edges(SkAnalyticEdge * prevHead,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftBound,SkFixed riteBound,bool isUsingMask)1097 static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
1098 AdditiveBlitter* blitter,
1099 int start_y,
1100 int stop_y,
1101 SkFixed leftBound,
1102 SkFixed riteBound,
1103 bool isUsingMask) {
1104 validate_sort((SkAnalyticEdge*)prevHead->fNext);
1105
1106 SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
1107 SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
1108 SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
1109
1110 SkFixed y = std::max(leftE->fUpperY, riteE->fUpperY);
1111
1112 for (;;) {
1113 // We have to check fLowerY first because some edges might be alone (e.g., there's only
1114 // a left edge but no right edge in a given y scan line) due to precision limit.
1115 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1116 if (!leftE->update(y)) {
1117 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1118 goto END_WALK;
1119 }
1120 leftE = currE;
1121 currE = (SkAnalyticEdge*)currE->fNext;
1122 }
1123 }
1124 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1125 if (!riteE->update(y)) {
1126 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1127 goto END_WALK;
1128 }
1129 riteE = currE;
1130 currE = (SkAnalyticEdge*)currE->fNext;
1131 }
1132 }
1133
1134 SkASSERT(leftE);
1135 SkASSERT(riteE);
1136
1137 // check our bottom clip
1138 if (SkFixedFloorToInt(y) >= stop_y) {
1139 break;
1140 }
1141
1142 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
1143 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
1144
1145 leftE->goY(y);
1146 riteE->goY(y);
1147
1148 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
1149 std::swap(leftE, riteE);
1150 }
1151
1152 SkFixed local_bot_fixed = std::min(leftE->fLowerY, riteE->fLowerY);
1153 if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
1154 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
1155 }
1156 local_bot_fixed = std::min(local_bot_fixed, SkIntToFixed(stop_y));
1157
1158 SkFixed left = std::max(leftBound, leftE->fX);
1159 SkFixed dLeft = leftE->fDX;
1160 SkFixed rite = std::min(riteBound, riteE->fX);
1161 SkFixed dRite = riteE->fDX;
1162 if (0 == (dLeft | dRite)) {
1163 int fullLeft = SkFixedCeilToInt(left);
1164 int fullRite = SkFixedFloorToInt(rite);
1165 SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
1166 SkFixed partialRite = rite - SkIntToFixed(fullRite);
1167 int fullTop = SkFixedCeilToInt(y);
1168 int fullBot = SkFixedFloorToInt(local_bot_fixed);
1169 SkFixed partialTop = SkIntToFixed(fullTop) - y;
1170 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
1171 if (fullTop > fullBot) { // The rectangle is within one pixel height...
1172 partialTop -= (SK_Fixed1 - partialBot);
1173 partialBot = 0;
1174 }
1175
1176 if (fullRite >= fullLeft) {
1177 if (partialTop > 0) { // blit first partial row
1178 if (partialLeft > 0) {
1179 blitter->blitAntiH(fullLeft - 1,
1180 fullTop - 1,
1181 fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
1182 }
1183 blitter->blitAntiH(
1184 fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
1185 if (partialRite > 0) {
1186 blitter->blitAntiH(fullRite,
1187 fullTop - 1,
1188 fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
1189 }
1190 blitter->flush_if_y_changed(y, y + partialTop);
1191 }
1192
1193 // Blit all full-height rows from fullTop to fullBot
1194 if (fullBot > fullTop &&
1195 // SkAAClip cannot handle the empty rect so check the non-emptiness here
1196 // (bug chromium:662800)
1197 (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
1198 fixed_to_alpha(partialRite) > 0)) {
1199 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
1200 fullTop,
1201 fullRite - fullLeft,
1202 fullBot - fullTop,
1203 fixed_to_alpha(partialLeft),
1204 fixed_to_alpha(partialRite));
1205 }
1206
1207 if (partialBot > 0) { // blit last partial row
1208 if (partialLeft > 0) {
1209 blitter->blitAntiH(fullLeft - 1,
1210 fullBot,
1211 fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
1212 }
1213 blitter->blitAntiH(
1214 fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
1215 if (partialRite > 0) {
1216 blitter->blitAntiH(fullRite,
1217 fullBot,
1218 fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
1219 }
1220 }
1221 } else {
1222 // Normal conditions, this means left and rite are within the same pixel, but if
1223 // both left and rite were < leftBounds or > rightBounds, both edges are clipped and
1224 // we should not do any blitting (particularly since the negative width saturates to
1225 // full alpha).
1226 SkFixed width = rite - left;
1227 if (width > 0) {
1228 if (partialTop > 0) {
1229 blitter->blitAntiH(fullLeft - 1,
1230 fullTop - 1,
1231 1,
1232 fixed_to_alpha(SkFixedMul(partialTop, width)));
1233 blitter->flush_if_y_changed(y, y + partialTop);
1234 }
1235 if (fullBot > fullTop) {
1236 blitter->getRealBlitter()->blitV(
1237 fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(width));
1238 }
1239 if (partialBot > 0) {
1240 blitter->blitAntiH(fullLeft - 1,
1241 fullBot,
1242 1,
1243 fixed_to_alpha(SkFixedMul(partialBot, width)));
1244 }
1245 }
1246 }
1247
1248 y = local_bot_fixed;
1249 } else {
1250 // The following constant are used to snap X
1251 // We snap X mainly for speedup (no tiny triangle) and
1252 // avoiding edge cases caused by precision errors
1253 const SkFixed kSnapDigit = SK_Fixed1 >> 4;
1254 const SkFixed kSnapHalf = kSnapDigit >> 1;
1255 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
1256 left += kSnapHalf;
1257 rite += kSnapHalf; // For fast rounding
1258
1259 // Number of blit_trapezoid_row calls we'll have
1260 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
1261
1262 // If we're using mask blitter, we advance the mask row in this function
1263 // to save some "if" condition checks.
1264 SkAlpha* maskRow = nullptr;
1265 if (isUsingMask) {
1266 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1267 }
1268
1269 // Instead of writing one loop that handles both partial-row blit_trapezoid_row
1270 // and full-row trapezoid_row together, we use the following 3-stage flow to
1271 // handle partial-row blit and full-row blit separately. It will save us much time
1272 // on changing y, left, and rite.
1273 if (count > 1) {
1274 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
1275 count--;
1276 SkFixed nextY = SkFixedCeilToFixed(y + 1);
1277 SkFixed dY = nextY - y;
1278 SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
1279 SkFixed nextRite = rite + SkFixedMul(dRite, dY);
1280 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1281 (nextLeft & kSnapMask) >= leftBound &&
1282 (nextRite & kSnapMask) <= riteBound);
1283 blit_trapezoid_row(blitter,
1284 y >> 16,
1285 left & kSnapMask,
1286 rite & kSnapMask,
1287 nextLeft & kSnapMask,
1288 nextRite & kSnapMask,
1289 leftE->fDY,
1290 riteE->fDY,
1291 get_partial_alpha(0xFF, dY),
1292 maskRow,
1293 isUsingMask);
1294 blitter->flush_if_y_changed(y, nextY);
1295 left = nextLeft;
1296 rite = nextRite;
1297 y = nextY;
1298 }
1299
1300 while (count > 1) { // Full rows in the middle
1301 count--;
1302 if (isUsingMask) {
1303 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1304 }
1305 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
1306 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1307 (nextLeft & kSnapMask) >= leftBound &&
1308 (nextRite & kSnapMask) <= riteBound);
1309 blit_trapezoid_row(blitter,
1310 y >> 16,
1311 left & kSnapMask,
1312 rite & kSnapMask,
1313 nextLeft & kSnapMask,
1314 nextRite & kSnapMask,
1315 leftE->fDY,
1316 riteE->fDY,
1317 0xFF,
1318 maskRow,
1319 isUsingMask);
1320 blitter->flush_if_y_changed(y, nextY);
1321 left = nextLeft;
1322 rite = nextRite;
1323 y = nextY;
1324 }
1325 }
1326
1327 if (isUsingMask) {
1328 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1329 }
1330
1331 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
1332 SkASSERT(dY <= SK_Fixed1);
1333 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
1334 // Take them back into the bound here.
1335 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
1336 SkFixed nextLeft = std::max(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
1337 SkFixed nextRite = std::min(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
1338 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1339 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
1340 blit_trapezoid_row(blitter,
1341 y >> 16,
1342 left & kSnapMask,
1343 rite & kSnapMask,
1344 nextLeft & kSnapMask,
1345 nextRite & kSnapMask,
1346 leftE->fDY,
1347 riteE->fDY,
1348 get_partial_alpha(0xFF, dY),
1349 maskRow,
1350 isUsingMask);
1351 blitter->flush_if_y_changed(y, local_bot_fixed);
1352 left = nextLeft;
1353 rite = nextRite;
1354 y = local_bot_fixed;
1355 left -= kSnapHalf;
1356 rite -= kSnapHalf;
1357 }
1358
1359 leftE->fX = left;
1360 riteE->fX = rite;
1361 leftE->fY = riteE->fY = y;
1362 }
1363
1364 END_WALK:;
1365 }
1366
update_next_next_y(SkFixed y,SkFixed nextY,SkFixed * nextNextY)1367 static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
1368 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
1369 }
1370
check_intersection(const SkAnalyticEdge * edge,SkFixed nextY,SkFixed * nextNextY)1371 static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
1372 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
1373 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1374 }
1375 }
1376
insert_new_edges(SkAnalyticEdge * newEdge,SkFixed y,SkFixed * nextNextY)1377 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
1378 if (newEdge->fUpperY > y) {
1379 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1380 return;
1381 }
1382 SkAnalyticEdge* prev = newEdge->fPrev;
1383 if (prev->fX <= newEdge->fX) {
1384 while (newEdge->fUpperY <= y) {
1385 check_intersection(newEdge, y, nextNextY);
1386 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1387 newEdge = newEdge->fNext;
1388 }
1389 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1390 return;
1391 }
1392 // find first x pos to insert
1393 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
1394 // insert the lot, fixing up the links as we go
1395 do {
1396 SkAnalyticEdge* next = newEdge->fNext;
1397 do {
1398 if (start->fNext == newEdge) {
1399 goto nextEdge;
1400 }
1401 SkAnalyticEdge* after = start->fNext;
1402 if (after->fX >= newEdge->fX) {
1403 break;
1404 }
1405 SkASSERT(start != after);
1406 start = after;
1407 } while (true);
1408 remove_edge(newEdge);
1409 insert_edge_after(newEdge, start);
1410 nextEdge:
1411 check_intersection(newEdge, y, nextNextY);
1412 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1413 start = newEdge;
1414 newEdge = next;
1415 } while (newEdge->fUpperY <= y);
1416 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1417 }
1418
validate_edges_for_y(const SkAnalyticEdge * edge,SkFixed y)1419 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
1420 #ifdef SK_DEBUG
1421 while (edge->fUpperY <= y) {
1422 SkASSERT(edge->fPrev && edge->fNext);
1423 SkASSERT(edge->fPrev->fNext == edge);
1424 SkASSERT(edge->fNext->fPrev == edge);
1425 SkASSERT(edge->fUpperY <= edge->fLowerY);
1426 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
1427 edge = edge->fNext;
1428 }
1429 #endif
1430 }
1431
1432 // Return true if prev->fX, next->fX are too close in the current pixel row.
edges_too_close(SkAnalyticEdge * prev,SkAnalyticEdge * next,SkFixed lowerY)1433 static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
1434 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
1435 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
1436 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
1437 // edges prev and next are within one pixel.
1438 constexpr SkFixed SLACK = SK_Fixed1;
1439
1440 // Note that even if the following test failed, the edges might still be very close to each
1441 // other at some point within the current pixel row because of prev->fDX and next->fDX.
1442 // However, to handle that case, we have to sacrafice more performance.
1443 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
1444 // so I'll ignore fDX for performance tradeoff.
1445 return next && prev && next->fUpperY < lowerY &&
1446 prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
1447 // The following is more accurate but also slower.
1448 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
1449 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
1450 }
1451
1452 // This function exists for the case where the previous rite edge is removed because
1453 // its fLowerY <= nextY
edges_too_close(int prevRite,SkFixed ul,SkFixed ll)1454 static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
1455 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
1456 }
1457
blit_saved_trapezoid(SkAnalyticEdge * leftE,SkFixed lowerY,SkFixed lowerLeft,SkFixed lowerRite,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip)1458 static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
1459 SkFixed lowerY,
1460 SkFixed lowerLeft,
1461 SkFixed lowerRite,
1462 AdditiveBlitter* blitter,
1463 SkAlpha* maskRow,
1464 bool isUsingMask,
1465 bool noRealBlitter,
1466 SkFixed leftClip,
1467 SkFixed rightClip) {
1468 SkAnalyticEdge* riteE = leftE->fRiteE;
1469 SkASSERT(riteE);
1470 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
1471 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
1472 int y = SkFixedFloorToInt(leftE->fSavedY);
1473 // Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
1474 // to elimiate cumulative error: if there are many fractional y scan lines within the
1475 // same row, the former may accumulate the rounding error while the later won't.
1476 SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
1477 fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));
1478 // We need fSavedDY because the (quad or cubic) edge might be updated
1479 blit_trapezoid_row(
1480 blitter,
1481 y,
1482 std::max(leftE->fSavedX, leftClip),
1483 std::min(riteE->fSavedX, rightClip),
1484 std::max(lowerLeft, leftClip),
1485 std::min(lowerRite, rightClip),
1486 leftE->fSavedDY,
1487 riteE->fSavedDY,
1488 fullAlpha,
1489 maskRow,
1490 isUsingMask,
1491 noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
1492 edges_too_close(riteE, riteE->fNext, lowerY))),
1493 true);
1494 leftE->fRiteE = nullptr;
1495 }
1496
deferred_blit(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkFixed left,SkFixed leftDY,SkFixed y,SkFixed nextY,bool isIntegralNextY,bool leftEnds,bool riteEnds,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip,int yShift)1497 static void deferred_blit(SkAnalyticEdge* leftE,
1498 SkAnalyticEdge* riteE,
1499 SkFixed left,
1500 SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
1501 SkFixed y,
1502 SkFixed nextY,
1503 bool isIntegralNextY,
1504 bool leftEnds,
1505 bool riteEnds,
1506 AdditiveBlitter* blitter,
1507 SkAlpha* maskRow,
1508 bool isUsingMask,
1509 bool noRealBlitter,
1510 SkFixed leftClip,
1511 SkFixed rightClip,
1512 int yShift) {
1513 if (leftE->fRiteE && leftE->fRiteE != riteE) {
1514 // leftE's right edge changed. Blit the saved trapezoid.
1515 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
1516 blit_saved_trapezoid(leftE,
1517 y,
1518 left,
1519 leftE->fRiteE->fX,
1520 blitter,
1521 maskRow,
1522 isUsingMask,
1523 noRealBlitter,
1524 leftClip,
1525 rightClip);
1526 }
1527 if (!leftE->fRiteE) {
1528 // Save and defer blitting the trapezoid
1529 SkASSERT(riteE->fRiteE == nullptr);
1530 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1531 SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
1532 leftE->saveXY(left, y, leftDY);
1533 riteE->saveXY(riteE->fX, y, riteE->fDY);
1534 leftE->fRiteE = riteE;
1535 }
1536 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1537 riteE->goY(nextY, yShift);
1538 // Always blit when edges end or nextY is integral
1539 if (isIntegralNextY || leftEnds || riteEnds) {
1540 blit_saved_trapezoid(leftE,
1541 nextY,
1542 leftE->fX,
1543 riteE->fX,
1544 blitter,
1545 maskRow,
1546 isUsingMask,
1547 noRealBlitter,
1548 leftClip,
1549 rightClip);
1550 }
1551 }
1552
aaa_walk_edges(SkAnalyticEdge * prevHead,SkAnalyticEdge * nextTail,SkPathFillType fillType,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftClip,SkFixed rightClip,bool isUsingMask,bool forceRLE,bool useDeferred,bool skipIntersect)1553 static void aaa_walk_edges(SkAnalyticEdge* prevHead,
1554 SkAnalyticEdge* nextTail,
1555 SkPathFillType fillType,
1556 AdditiveBlitter* blitter,
1557 int start_y,
1558 int stop_y,
1559 SkFixed leftClip,
1560 SkFixed rightClip,
1561 bool isUsingMask,
1562 bool forceRLE,
1563 bool useDeferred,
1564 bool skipIntersect) {
1565 prevHead->fX = prevHead->fUpperX = leftClip;
1566 nextTail->fX = nextTail->fUpperX = rightClip;
1567 SkFixed y = std::max(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
1568 SkFixed nextNextY = SK_MaxS32;
1569
1570 {
1571 SkAnalyticEdge* edge;
1572 for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
1573 edge->goY(y);
1574 update_next_next_y(edge->fLowerY, y, &nextNextY);
1575 }
1576 update_next_next_y(edge->fUpperY, y, &nextNextY);
1577 }
1578
1579 int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
1580 bool isInverse = SkPathFillType_IsInverse(fillType);
1581
1582 if (isInverse && SkIntToFixed(start_y) != y) {
1583 int width = SkFixedFloorToInt(rightClip - leftClip);
1584 if (SkFixedFloorToInt(y) != start_y) {
1585 blitter->getRealBlitter()->blitRect(
1586 SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
1587 start_y = SkFixedFloorToInt(y);
1588 }
1589 SkAlpha* maskRow =
1590 isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
1591 blit_full_alpha(blitter,
1592 start_y,
1593 SkFixedFloorToInt(leftClip),
1594 width,
1595 fixed_to_alpha(y - SkIntToFixed(start_y)),
1596 maskRow,
1597 isUsingMask,
1598 false,
1599 false);
1600 }
1601
1602 while (true) {
1603 int w = 0;
1604 bool in_interval = isInverse;
1605 SkFixed prevX = prevHead->fX;
1606 SkFixed nextY = std::min(nextNextY, SkFixedCeilToFixed(y + 1));
1607 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
1608 SkAnalyticEdge* currE = prevHead->fNext;
1609 SkAnalyticEdge* leftE = prevHead;
1610 SkFixed left = leftClip;
1611 SkFixed leftDY = 0;
1612 bool leftEnds = false;
1613 int prevRite = SkFixedFloorToInt(leftClip);
1614
1615 nextNextY = SK_MaxS32;
1616
1617 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
1618 int yShift = 0;
1619 if ((nextY - y) & (SK_Fixed1 >> 2)) {
1620 yShift = 2;
1621 nextY = y + (SK_Fixed1 >> 2);
1622 } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
1623 yShift = 1;
1624 SkASSERT(nextY == y + (SK_Fixed1 >> 1));
1625 }
1626
1627 SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
1628
1629 // If we're using mask blitter, we advance the mask row in this function
1630 // to save some "if" condition checks.
1631 SkAlpha* maskRow = nullptr;
1632 if (isUsingMask) {
1633 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
1634 }
1635
1636 SkASSERT(currE->fPrev == prevHead);
1637 validate_edges_for_y(currE, y);
1638
1639 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
1640 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
1641 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
1642
1643 while (currE->fUpperY <= y) {
1644 SkASSERT(currE->fLowerY >= nextY);
1645 SkASSERT(currE->fY == y);
1646
1647 w += currE->fWinding;
1648 bool prev_in_interval = in_interval;
1649 in_interval = !(w & windingMask) == isInverse;
1650
1651 bool isLeft = in_interval && !prev_in_interval;
1652 bool isRite = !in_interval && prev_in_interval;
1653 bool currEnds = currE->fLowerY == nextY;
1654
1655 if (useDeferred) {
1656 if (currE->fRiteE && !isLeft) {
1657 // currE is a left edge previously, but now it's not.
1658 // Blit the trapezoid between fSavedY and y.
1659 SkASSERT(currE->fRiteE->fY == y);
1660 blit_saved_trapezoid(currE,
1661 y,
1662 currE->fX,
1663 currE->fRiteE->fX,
1664 blitter,
1665 maskRow,
1666 isUsingMask,
1667 noRealBlitter,
1668 leftClip,
1669 rightClip);
1670 }
1671 if (leftE->fRiteE == currE && !isRite) {
1672 // currE is a right edge previously, but now it's not.
1673 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
1674 // in the previous if clause). Hence we blit the trapezoid.
1675 blit_saved_trapezoid(leftE,
1676 y,
1677 left,
1678 currE->fX,
1679 blitter,
1680 maskRow,
1681 isUsingMask,
1682 noRealBlitter,
1683 leftClip,
1684 rightClip);
1685 }
1686 }
1687
1688 if (isRite) {
1689 if (useDeferred) {
1690 deferred_blit(leftE,
1691 currE,
1692 left,
1693 leftDY,
1694 y,
1695 nextY,
1696 isIntegralNextY,
1697 leftEnds,
1698 currEnds,
1699 blitter,
1700 maskRow,
1701 isUsingMask,
1702 noRealBlitter,
1703 leftClip,
1704 rightClip,
1705 yShift);
1706 } else {
1707 SkFixed rite = currE->fX;
1708 currE->goY(nextY, yShift);
1709 SkFixed nextLeft = std::max(leftClip, leftE->fX);
1710 rite = std::min(rightClip, rite);
1711 SkFixed nextRite = std::min(rightClip, currE->fX);
1712 blit_trapezoid_row(
1713 blitter,
1714 y >> 16,
1715 left,
1716 rite,
1717 nextLeft,
1718 nextRite,
1719 leftDY,
1720 currE->fDY,
1721 fullAlpha,
1722 maskRow,
1723 isUsingMask,
1724 noRealBlitter || (fullAlpha == 0xFF &&
1725 (edges_too_close(prevRite, left, leftE->fX) ||
1726 edges_too_close(currE, currE->fNext, nextY))),
1727 true);
1728 prevRite = SkFixedCeilToInt(std::max(rite, currE->fX));
1729 }
1730 } else {
1731 if (isLeft) {
1732 left = std::max(currE->fX, leftClip);
1733 leftDY = currE->fDY;
1734 leftE = currE;
1735 leftEnds = leftE->fLowerY == nextY;
1736 }
1737 currE->goY(nextY, yShift);
1738 }
1739
1740 SkAnalyticEdge* next = currE->fNext;
1741 SkFixed newX;
1742
1743 while (currE->fLowerY <= nextY) {
1744 if (currE->fCurveCount < 0) {
1745 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
1746 cubicEdge->keepContinuous();
1747 if (!cubicEdge->updateCubic()) {
1748 break;
1749 }
1750 } else if (currE->fCurveCount > 0) {
1751 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
1752 quadEdge->keepContinuous();
1753 if (!quadEdge->updateQuadratic()) {
1754 break;
1755 }
1756 } else {
1757 break;
1758 }
1759 }
1760 SkASSERT(currE->fY == nextY);
1761
1762 if (currE->fLowerY <= nextY) {
1763 remove_edge(currE);
1764 } else {
1765 update_next_next_y(currE->fLowerY, nextY, &nextNextY);
1766 newX = currE->fX;
1767 SkASSERT(currE->fLowerY > nextY);
1768 if (newX < prevX) { // ripple currE backwards until it is x-sorted
1769 // If the crossing edge is a right edge, blit the saved trapezoid.
1770 if (leftE->fRiteE == currE && useDeferred) {
1771 SkASSERT(leftE->fY == nextY && currE->fY == nextY);
1772 blit_saved_trapezoid(leftE,
1773 nextY,
1774 leftE->fX,
1775 currE->fX,
1776 blitter,
1777 maskRow,
1778 isUsingMask,
1779 noRealBlitter,
1780 leftClip,
1781 rightClip);
1782 }
1783 backward_insert_edge_based_on_x(currE);
1784 } else {
1785 prevX = newX;
1786 }
1787 if (!skipIntersect) {
1788 check_intersection(currE, nextY, &nextNextY);
1789 }
1790 }
1791
1792 currE = next;
1793 SkASSERT(currE);
1794 }
1795
1796 // was our right-edge culled away?
1797 if (in_interval) {
1798 if (useDeferred) {
1799 deferred_blit(leftE,
1800 nextTail,
1801 left,
1802 leftDY,
1803 y,
1804 nextY,
1805 isIntegralNextY,
1806 leftEnds,
1807 false,
1808 blitter,
1809 maskRow,
1810 isUsingMask,
1811 noRealBlitter,
1812 leftClip,
1813 rightClip,
1814 yShift);
1815 } else {
1816 blit_trapezoid_row(blitter,
1817 y >> 16,
1818 left,
1819 rightClip,
1820 std::max(leftClip, leftE->fX),
1821 rightClip,
1822 leftDY,
1823 0,
1824 fullAlpha,
1825 maskRow,
1826 isUsingMask,
1827 noRealBlitter || (fullAlpha == 0xFF &&
1828 edges_too_close(leftE->fPrev, leftE, nextY)),
1829 true);
1830 }
1831 }
1832
1833 if (forceRLE) {
1834 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
1835 }
1836
1837 y = nextY;
1838 if (y >= SkIntToFixed(stop_y)) {
1839 break;
1840 }
1841
1842 // now currE points to the first edge with a fUpperY larger than the previous y
1843 insert_new_edges(currE, y, &nextNextY);
1844 }
1845 }
1846
aaa_fill_path(const SkPath & path,const SkIRect & clipRect,AdditiveBlitter * blitter,int start_y,int stop_y,bool pathContainedInClip,bool isUsingMask,bool forceRLE)1847 static SK_ALWAYS_INLINE void aaa_fill_path(
1848 const SkPath& path,
1849 const SkIRect& clipRect,
1850 AdditiveBlitter* blitter,
1851 int start_y,
1852 int stop_y,
1853 bool pathContainedInClip,
1854 bool isUsingMask,
1855 bool forceRLE) { // forceRLE implies that SkAAClip is calling us
1856 SkASSERT(blitter);
1857
1858 SkAnalyticEdgeBuilder builder;
1859 int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
1860 SkAnalyticEdge** list = builder.analyticEdgeList();
1861
1862 SkIRect rect = clipRect;
1863 if (0 == count) {
1864 if (path.isInverseFillType()) {
1865 /*
1866 * Since we are in inverse-fill, our caller has already drawn above
1867 * our top (start_y) and will draw below our bottom (stop_y). Thus
1868 * we need to restrict our drawing to the intersection of the clip
1869 * and those two limits.
1870 */
1871 if (rect.fTop < start_y) {
1872 rect.fTop = start_y;
1873 }
1874 if (rect.fBottom > stop_y) {
1875 rect.fBottom = stop_y;
1876 }
1877 if (!rect.isEmpty()) {
1878 blitter->getRealBlitter()->blitRect(
1879 rect.fLeft, rect.fTop, rect.width(), rect.height());
1880 }
1881 }
1882 return;
1883 }
1884
1885 SkAnalyticEdge headEdge, tailEdge, *last;
1886 // this returns the first and last edge after they're sorted into a dlink list
1887 SkAnalyticEdge* edge = sort_edges(list, count, &last);
1888
1889 headEdge.fRiteE = nullptr;
1890 headEdge.fPrev = nullptr;
1891 headEdge.fNext = edge;
1892 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
1893 headEdge.fX = SK_MinS32;
1894 headEdge.fDX = 0;
1895 headEdge.fDY = SK_MaxS32;
1896 headEdge.fUpperX = SK_MinS32;
1897 edge->fPrev = &headEdge;
1898
1899 tailEdge.fRiteE = nullptr;
1900 tailEdge.fPrev = last;
1901 tailEdge.fNext = nullptr;
1902 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
1903 tailEdge.fX = SK_MaxS32;
1904 tailEdge.fDX = 0;
1905 tailEdge.fDY = SK_MaxS32;
1906 tailEdge.fUpperX = SK_MaxS32;
1907 last->fNext = &tailEdge;
1908
1909 // now edge is the head of the sorted linklist
1910
1911 if (!pathContainedInClip && start_y < clipRect.fTop) {
1912 start_y = clipRect.fTop;
1913 }
1914 if (!pathContainedInClip && stop_y > clipRect.fBottom) {
1915 stop_y = clipRect.fBottom;
1916 }
1917
1918 SkFixed leftBound = SkIntToFixed(rect.fLeft);
1919 SkFixed rightBound = SkIntToFixed(rect.fRight);
1920 if (isUsingMask) {
1921 // If we're using mask, then we have to limit the bound within the path bounds.
1922 // Otherwise, the edge drift may access an invalid address inside the mask.
1923 SkIRect ir;
1924 path.getBounds().roundOut(&ir);
1925 leftBound = std::max(leftBound, SkIntToFixed(ir.fLeft));
1926 rightBound = std::min(rightBound, SkIntToFixed(ir.fRight));
1927 }
1928
1929 if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
1930 aaa_walk_convex_edges(
1931 &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
1932 } else {
1933 // Only use deferred blitting if there are many edges.
1934 bool useDeferred =
1935 count >
1936 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
1937
1938 // We skip intersection computation if there are many points which probably already
1939 // give us enough fractional scan lines.
1940 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
1941
1942 aaa_walk_edges(&headEdge,
1943 &tailEdge,
1944 path.getFillType(),
1945 blitter,
1946 start_y,
1947 stop_y,
1948 leftBound,
1949 rightBound,
1950 isUsingMask,
1951 forceRLE,
1952 useDeferred,
1953 skipIntersect);
1954 }
1955 }
1956
AAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)1957 void SkScan::AAAFillPath(const SkPath& path,
1958 SkBlitter* blitter,
1959 const SkIRect& ir,
1960 const SkIRect& clipBounds,
1961 bool forceRLE) {
1962 bool containedInClip = clipBounds.contains(ir);
1963 bool isInverse = path.isInverseFillType();
1964
1965 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
1966 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
1967 // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
1968 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
1969 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
1970 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
1971 // blitFatAntiRect to avoid the mask and its overhead.
1972 if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
1973 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
1974 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
1975 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
1976 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1977 aaa_fill_path(path,
1978 clipBounds,
1979 &additiveBlitter,
1980 ir.fTop,
1981 ir.fBottom,
1982 containedInClip,
1983 true,
1984 forceRLE);
1985 }
1986 } else if (!isInverse && path.isConvex()) {
1987 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
1988 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
1989 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
1990 // RunBasedAdditiveBlitter would suffice.
1991 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1992 aaa_fill_path(path,
1993 clipBounds,
1994 &additiveBlitter,
1995 ir.fTop,
1996 ir.fBottom,
1997 containedInClip,
1998 false,
1999 forceRLE);
2000 } else {
2001 // If the filling area might not be convex, the more involved aaa_walk_edges would
2002 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
2003 // does that at a cost of performance.
2004 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
2005 aaa_fill_path(path,
2006 clipBounds,
2007 &additiveBlitter,
2008 ir.fTop,
2009 ir.fBottom,
2010 containedInClip,
2011 false,
2012 forceRLE);
2013 }
2014 }
2015 #endif // defined(SK_DISABLE_AAA)
2016