1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkPath.h"
9 #include "include/core/SkRegion.h"
10 #include "include/private/SkTemplates.h"
11 #include "include/private/SkTo.h"
12 #include "src/core/SkAnalyticEdge.h"
13 #include "src/core/SkAntiRun.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkBlitter.h"
16 #include "src/core/SkEdge.h"
17 #include "src/core/SkEdgeBuilder.h"
18 #include "src/core/SkGeometry.h"
19 #include "src/core/SkQuadClipper.h"
20 #include "src/core/SkRasterClip.h"
21 #include "src/core/SkScan.h"
22 #include "src/core/SkScanPriv.h"
23 #include "src/core/SkTSort.h"
24 #include "src/utils/SkUTF.h"
25
26 #include <utility>
27
28 #if defined(SK_DISABLE_AAA)
AAAFillPath(const SkPath &,SkBlitter *,const SkIRect &,const SkIRect &,bool)29 void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
30 SkDEBUGFAIL("AAA Disabled");
31 return;
32 }
33 #else
34
35 /*
36
37 The following is a high-level overview of our analytic anti-aliasing
38 algorithm. We consider a path as a collection of line segments, as
39 quadratic/cubic curves are converted to small line segments. Without loss of
40 generality, let's assume that the draw region is [0, W] x [0, H].
41
42 Our algorithm is based on horizontal scan lines (y = c_i) as the previous
43 sampling-based algorithm did. However, our algorithm uses non-equal-spaced
44 scan lines, while the previous method always uses equal-spaced scan lines,
45 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
46 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
47 16-supersampling AA algorithm.
48
49 Our algorithm contains scan lines y = c_i for c_i that is either:
50
51 1. an integer between [0, H]
52
53 2. the y value of a line segment endpoint
54
55 3. the y value of an intersection of two line segments
56
57 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
58 the coverage of this horizontal strip of our path on each pixel. This can be
59 done very efficiently because the strip of our path now only consists of
60 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
61 rectangles and triangles as special cases).
62
63 We now describe how the coverage of single pixel is computed against such a
64 trapezoid. That coverage is essentially the intersection area of a rectangle
65 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
66 could be complicated, as shown in the example region A below:
67
68 +-----------\----+
69 | \ C|
70 | \ |
71 \ \ |
72 |\ A \|
73 | \ \
74 | \ |
75 | B \ |
76 +----\-----------+
77
78 However, we don't have to compute the area of A directly. Instead, we can
79 compute the excluded area, which are B and C, quite easily, because they're
80 just triangles. In fact, we can prove that an excluded region (take B as an
81 example) is either itself a simple trapezoid (including rectangles, triangles,
82 and empty regions), or its opposite (the opposite of B is A + C) is a simple
83 trapezoid. In any case, we can compute its area efficiently.
84
85 In summary, our algorithm has a higher quality because it generates ground-
86 truth coverages analytically. It is also faster because it has much fewer
87 unnessasary horizontal scan lines. For example, given a triangle path, the
88 number of scan lines in our algorithm is only about 3 + H while the
89 16-supersampling algorithm has about 4H scan lines.
90
91 */
92
add_alpha(SkAlpha * alpha,SkAlpha delta)93 static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
94 SkASSERT(*alpha + delta <= 256);
95 *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
96 }
97
safely_add_alpha(SkAlpha * alpha,SkAlpha delta)98 static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
99 *alpha = SkTMin(0xFF, *alpha + delta);
100 }
101
102 class AdditiveBlitter : public SkBlitter {
103 public:
~AdditiveBlitter()104 ~AdditiveBlitter() override {}
105
106 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
107
108 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
109 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
110 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
111
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])112 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
113 SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
114 }
115
blitV(int x,int y,int height,SkAlpha alpha)116 void blitV(int x, int y, int height, SkAlpha alpha) override {
117 SkDEBUGFAIL("Please call real blitter's blitV instead.");
118 }
119
blitH(int x,int y,int width)120 void blitH(int x, int y, int width) override {
121 SkDEBUGFAIL("Please call real blitter's blitH instead.");
122 }
123
blitRect(int x,int y,int width,int height)124 void blitRect(int x, int y, int width, int height) override {
125 SkDEBUGFAIL("Please call real blitter's blitRect instead.");
126 }
127
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)128 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
129 override {
130 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
131 }
132
133 virtual int getWidth() = 0;
134
135 // Flush the additive alpha cache if floor(y) and floor(nextY) is different
136 // (i.e., we'll start working on a new pixel row).
137 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
138 };
139
140 // We need this mask blitter because it significantly accelerates small path filling.
141 class MaskAdditiveBlitter : public AdditiveBlitter {
142 public:
143 MaskAdditiveBlitter(SkBlitter* realBlitter,
144 const SkIRect& ir,
145 const SkIRect& clipBounds,
146 bool isInverse);
~MaskAdditiveBlitter()147 ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
148
149 // Most of the time, we still consider this mask blitter as the real blitter
150 // so we can accelerate blitRect and others. But sometimes we want to return
151 // the absolute real blitter (e.g., when we fall back to the old code path).
getRealBlitter(bool forceRealBlitter)152 SkBlitter* getRealBlitter(bool forceRealBlitter) override {
153 return forceRealBlitter ? fRealBlitter : this;
154 }
155
156 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
157 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
158
159 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
160 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
161 void blitAntiH(int x, int y, const SkAlpha alpha) override;
162 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
163 void blitV(int x, int y, int height, SkAlpha alpha) override;
164 void blitRect(int x, int y, int width, int height) override;
165 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
166 override;
167
168 // The flush is only needed for RLE (RunBasedAdditiveBlitter)
flush_if_y_changed(SkFixed y,SkFixed nextY)169 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
170
getWidth()171 int getWidth() override { return fClipRect.width(); }
172
CanHandleRect(const SkIRect & bounds)173 static bool CanHandleRect(const SkIRect& bounds) {
174 int width = bounds.width();
175 if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
176 return false;
177 }
178 int64_t rb = SkAlign4(width);
179 // use 64bits to detect overflow
180 int64_t storage = rb * bounds.height();
181
182 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
183 (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
184 }
185
186 // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
getRow(int y)187 uint8_t* getRow(int y) {
188 if (y != fY) {
189 fY = y;
190 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
191 }
192 return fRow;
193 }
194
195 private:
196 // so we don't try to do very wide things, where the RLE blitter would be faster
197 static const int kMAX_WIDTH = 32;
198 static const int kMAX_STORAGE = 1024;
199
200 SkBlitter* fRealBlitter;
201 SkMask fMask;
202 SkIRect fClipRect;
203 // we add 2 because we can write 1 extra byte at either end due to precision error
204 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
205
206 uint8_t* fRow;
207 int fY;
208 };
209
MaskAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)210 MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
211 const SkIRect& ir,
212 const SkIRect& clipBounds,
213 bool isInverse) {
214 SkASSERT(CanHandleRect(ir));
215 SkASSERT(!isInverse);
216
217 fRealBlitter = realBlitter;
218
219 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
220 fMask.fBounds = ir;
221 fMask.fRowBytes = ir.width();
222 fMask.fFormat = SkMask::kA8_Format;
223
224 fY = ir.fTop - 1;
225 fRow = nullptr;
226
227 fClipRect = ir;
228 if (!fClipRect.intersect(clipBounds)) {
229 SkASSERT(0);
230 fClipRect.setEmpty();
231 }
232
233 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
234 }
235
blitAntiH(int x,int y,const SkAlpha antialias[],int len)236 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
237 SK_ABORT("Don't use this; directly add alphas to the mask.");
238 }
239
blitAntiH(int x,int y,const SkAlpha alpha)240 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
241 SkASSERT(x >= fMask.fBounds.fLeft - 1);
242 add_alpha(&this->getRow(y)[x], alpha);
243 }
244
blitAntiH(int x,int y,int width,const SkAlpha alpha)245 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
246 SkASSERT(x >= fMask.fBounds.fLeft - 1);
247 uint8_t* row = this->getRow(y);
248 for (int i = 0; i < width; ++i) {
249 add_alpha(&row[x + i], alpha);
250 }
251 }
252
blitV(int x,int y,int height,SkAlpha alpha)253 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
254 if (alpha == 0) {
255 return;
256 }
257 SkASSERT(x >= fMask.fBounds.fLeft - 1);
258 // This must be called as if this is a real blitter.
259 // So we directly set alpha rather than adding it.
260 uint8_t* row = this->getRow(y);
261 for (int i = 0; i < height; ++i) {
262 row[x] = alpha;
263 row += fMask.fRowBytes;
264 }
265 }
266
blitRect(int x,int y,int width,int height)267 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
268 SkASSERT(x >= fMask.fBounds.fLeft - 1);
269 // This must be called as if this is a real blitter.
270 // So we directly set alpha rather than adding it.
271 uint8_t* row = this->getRow(y);
272 for (int i = 0; i < height; ++i) {
273 memset(row + x, 0xFF, width);
274 row += fMask.fRowBytes;
275 }
276 }
277
blitAntiRect(int x,int y,int width,int height,SkAlpha leftAlpha,SkAlpha rightAlpha)278 void MaskAdditiveBlitter::blitAntiRect(int x,
279 int y,
280 int width,
281 int height,
282 SkAlpha leftAlpha,
283 SkAlpha rightAlpha) {
284 blitV(x, y, height, leftAlpha);
285 blitV(x + 1 + width, y, height, rightAlpha);
286 blitRect(x + 1, y, width, height);
287 }
288
289 class RunBasedAdditiveBlitter : public AdditiveBlitter {
290 public:
291 RunBasedAdditiveBlitter(SkBlitter* realBlitter,
292 const SkIRect& ir,
293 const SkIRect& clipBounds,
294 bool isInverse);
295
~RunBasedAdditiveBlitter()296 ~RunBasedAdditiveBlitter() override { this->flush(); }
297
getRealBlitter(bool forceRealBlitter)298 SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
299
300 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
301 void blitAntiH(int x, int y, const SkAlpha alpha) override;
302 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
303
getWidth()304 int getWidth() override { return fWidth; }
305
flush_if_y_changed(SkFixed y,SkFixed nextY)306 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
307 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
308 this->flush();
309 }
310 }
311
312 protected:
313 SkBlitter* fRealBlitter;
314
315 int fCurrY; // Current y coordinate.
316 int fWidth; // Widest row of region to be blitted
317 int fLeft; // Leftmost x coordinate in any row
318 int fTop; // Initial y coordinate (top of bounds)
319
320 // The next three variables are used to track a circular buffer that
321 // contains the values used in SkAlphaRuns. These variables should only
322 // ever be updated in advanceRuns(), and fRuns should always point to
323 // a valid SkAlphaRuns...
324 int fRunsToBuffer;
325 void* fRunsBuffer;
326 int fCurrentRun;
327 SkAlphaRuns fRuns;
328
329 int fOffsetX;
330
check(int x,int width) const331 bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
332
333 // extra one to store the zero at the end
getRunsSz() const334 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
335
336 // This function updates the fRuns variable to point to the next buffer space
337 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
338 // and resets fRuns to point to an empty scanline.
advanceRuns()339 void advanceRuns() {
340 const size_t kRunsSz = this->getRunsSz();
341 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
342 fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
343 fCurrentRun * kRunsSz);
344 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
345 fRuns.reset(fWidth);
346 }
347
348 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
snapAlpha(SkAlpha alpha)349 SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
350
flush()351 void flush() {
352 if (fCurrY >= fTop) {
353 SkASSERT(fCurrentRun < fRunsToBuffer);
354 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
355 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
356 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
357 }
358 if (!fRuns.empty()) {
359 // SkDEBUGCODE(fRuns.dump();)
360 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
361 this->advanceRuns();
362 fOffsetX = 0;
363 }
364 fCurrY = fTop - 1;
365 }
366 }
367
checkY(int y)368 void checkY(int y) {
369 if (y != fCurrY) {
370 this->flush();
371 fCurrY = y;
372 }
373 }
374 };
375
RunBasedAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)376 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
377 const SkIRect& ir,
378 const SkIRect& clipBounds,
379 bool isInverse) {
380 fRealBlitter = realBlitter;
381
382 SkIRect sectBounds;
383 if (isInverse) {
384 // We use the clip bounds instead of the ir, since we may be asked to
385 // draw outside of the rect when we're a inverse filltype
386 sectBounds = clipBounds;
387 } else {
388 if (!sectBounds.intersect(ir, clipBounds)) {
389 sectBounds.setEmpty();
390 }
391 }
392
393 const int left = sectBounds.left();
394 const int right = sectBounds.right();
395
396 fLeft = left;
397 fWidth = right - left;
398 fTop = sectBounds.top();
399 fCurrY = fTop - 1;
400
401 fRunsToBuffer = realBlitter->requestRowsPreserved();
402 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
403 fCurrentRun = -1;
404
405 this->advanceRuns();
406
407 fOffsetX = 0;
408 }
409
blitAntiH(int x,int y,const SkAlpha antialias[],int len)410 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
411 checkY(y);
412 x -= fLeft;
413
414 if (x < 0) {
415 len += x;
416 antialias -= x;
417 x = 0;
418 }
419 len = SkTMin(len, fWidth - x);
420 SkASSERT(check(x, len));
421
422 if (x < fOffsetX) {
423 fOffsetX = 0;
424 }
425
426 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
427 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
428 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
429 fRuns.fRuns[x + i + j] = 1;
430 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
431 }
432 fRuns.fRuns[x + i] = 1;
433 }
434 for (int i = 0; i < len; ++i) {
435 add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
436 }
437 }
438
blitAntiH(int x,int y,const SkAlpha alpha)439 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
440 checkY(y);
441 x -= fLeft;
442
443 if (x < fOffsetX) {
444 fOffsetX = 0;
445 }
446
447 if (this->check(x, 1)) {
448 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
449 }
450 }
451
blitAntiH(int x,int y,int width,const SkAlpha alpha)452 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
453 checkY(y);
454 x -= fLeft;
455
456 if (x < fOffsetX) {
457 fOffsetX = 0;
458 }
459
460 if (this->check(x, width)) {
461 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
462 }
463 }
464
465 // This exists specifically for concave path filling.
466 // In those cases, we can easily accumulate alpha greater than 0xFF.
467 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
468 public:
SafeRLEAdditiveBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)469 SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
470 const SkIRect& ir,
471 const SkIRect& clipBounds,
472 bool isInverse)
473 : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
474
475 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
476 void blitAntiH(int x, int y, const SkAlpha alpha) override;
477 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
478 };
479
blitAntiH(int x,int y,const SkAlpha antialias[],int len)480 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
481 checkY(y);
482 x -= fLeft;
483
484 if (x < 0) {
485 len += x;
486 antialias -= x;
487 x = 0;
488 }
489 len = SkTMin(len, fWidth - x);
490 SkASSERT(check(x, len));
491
492 if (x < fOffsetX) {
493 fOffsetX = 0;
494 }
495
496 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
497 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
498 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
499 fRuns.fRuns[x + i + j] = 1;
500 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
501 }
502 fRuns.fRuns[x + i] = 1;
503 }
504 for (int i = 0; i < len; ++i) {
505 safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
506 }
507 }
508
blitAntiH(int x,int y,const SkAlpha alpha)509 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
510 checkY(y);
511 x -= fLeft;
512
513 if (x < fOffsetX) {
514 fOffsetX = 0;
515 }
516
517 if (check(x, 1)) {
518 // Break the run
519 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
520 safely_add_alpha(&fRuns.fAlpha[x], alpha);
521 }
522 }
523
blitAntiH(int x,int y,int width,const SkAlpha alpha)524 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
525 checkY(y);
526 x -= fLeft;
527
528 if (x < fOffsetX) {
529 fOffsetX = 0;
530 }
531
532 if (check(x, width)) {
533 // Break the run
534 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
535 for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
536 safely_add_alpha(&fRuns.fAlpha[i], alpha);
537 }
538 }
539 }
540
541 // Return the alpha of a trapezoid whose height is 1
trapezoid_to_alpha(SkFixed l1,SkFixed l2)542 static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
543 SkASSERT(l1 >= 0 && l2 >= 0);
544 SkFixed area = (l1 + l2) / 2;
545 return SkTo<SkAlpha>(area >> 8);
546 }
547
548 // The alpha of right-triangle (a, a*b)
partial_triangle_to_alpha(SkFixed a,SkFixed b)549 static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
550 SkASSERT(a <= SK_Fixed1);
551 #if 0
552 // TODO(mtklein): skia:8877
553 SkASSERT(b <= SK_Fixed1);
554 #endif
555
556 // Approximating...
557 // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
558 SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
559
560 #if 0
561 // TODO(mtklein): skia:8877
562 return SkTo<SkAlpha>(area >> 8);
563 #else
564 return SkTo<SkAlpha>((area >> 8) & 0xFF);
565 #endif
566 }
567
get_partial_alpha(SkAlpha alpha,SkFixed partialHeight)568 static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
569 return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
570 }
571
get_partial_alpha(SkAlpha alpha,SkAlpha fullAlpha)572 static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
573 return (alpha * fullAlpha) >> 8;
574 }
575
576 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
577 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
578 // This is rarely the problem so we'll only use this for blitting rectangles.
fixed_to_alpha(SkFixed f)579 static SkAlpha fixed_to_alpha(SkFixed f) {
580 SkASSERT(f <= SK_Fixed1);
581 return get_partial_alpha(0xFF, f);
582 }
583
584 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
585 // approximate (very coarsely) the x coordinate of the intersection.
approximate_intersection(SkFixed l1,SkFixed r1,SkFixed l2,SkFixed r2)586 static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
587 if (l1 > r1) {
588 std::swap(l1, r1);
589 }
590 if (l2 > r2) {
591 std::swap(l2, r2);
592 }
593 return (SkTMax(l1, l2) + SkTMin(r1, r2)) / 2;
594 }
595
596 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_above_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)597 static void compute_alpha_above_line(SkAlpha* alphas,
598 SkFixed l,
599 SkFixed r,
600 SkFixed dY,
601 SkAlpha fullAlpha) {
602 SkASSERT(l <= r);
603 SkASSERT(l >> 16 == 0);
604 int R = SkFixedCeilToInt(r);
605 if (R == 0) {
606 return;
607 } else if (R == 1) {
608 alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
609 } else {
610 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
611 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
612 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
613 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
614 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
615 for (int i = 1; i < R - 1; ++i) {
616 alphas[i] = alpha16 >> 8;
617 alpha16 += dY;
618 }
619 alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
620 }
621 }
622
623 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
compute_alpha_below_line(SkAlpha * alphas,SkFixed l,SkFixed r,SkFixed dY,SkAlpha fullAlpha)624 static void compute_alpha_below_line(SkAlpha* alphas,
625 SkFixed l,
626 SkFixed r,
627 SkFixed dY,
628 SkAlpha fullAlpha) {
629 SkASSERT(l <= r);
630 SkASSERT(l >> 16 == 0);
631 int R = SkFixedCeilToInt(r);
632 if (R == 0) {
633 return;
634 } else if (R == 1) {
635 alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
636 } else {
637 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
638 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
639 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
640 alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
641 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
642 for (int i = R - 2; i > 0; i--) {
643 alphas[i] = (alpha16 >> 8) & 0xFF;
644 alpha16 += dY;
645 }
646 alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
647 }
648 }
649
650 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
blit_single_alpha(AdditiveBlitter * blitter,int y,int x,SkAlpha alpha,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)651 static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter,
652 int y,
653 int x,
654 SkAlpha alpha,
655 SkAlpha fullAlpha,
656 SkAlpha* maskRow,
657 bool isUsingMask,
658 bool noRealBlitter,
659 bool needSafeCheck) {
660 if (isUsingMask) {
661 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
662 maskRow[x] = alpha;
663 } else if (needSafeCheck) {
664 safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
665 } else {
666 add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
667 }
668 } else {
669 if (fullAlpha == 0xFF && !noRealBlitter) {
670 blitter->getRealBlitter()->blitV(x, y, 1, alpha);
671 } else {
672 blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
673 }
674 }
675 }
676
blit_two_alphas(AdditiveBlitter * blitter,int y,int x,SkAlpha a1,SkAlpha a2,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)677 static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter,
678 int y,
679 int x,
680 SkAlpha a1,
681 SkAlpha a2,
682 SkAlpha fullAlpha,
683 SkAlpha* maskRow,
684 bool isUsingMask,
685 bool noRealBlitter,
686 bool needSafeCheck) {
687 if (isUsingMask) {
688 if (needSafeCheck) {
689 safely_add_alpha(&maskRow[x], a1);
690 safely_add_alpha(&maskRow[x + 1], a2);
691 } else {
692 add_alpha(&maskRow[x], a1);
693 add_alpha(&maskRow[x + 1], a2);
694 }
695 } else {
696 if (fullAlpha == 0xFF && !noRealBlitter) {
697 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
698 } else {
699 blitter->blitAntiH(x, y, a1);
700 blitter->blitAntiH(x + 1, y, a2);
701 }
702 }
703 }
704
blit_full_alpha(AdditiveBlitter * blitter,int y,int x,int len,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)705 static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter,
706 int y,
707 int x,
708 int len,
709 SkAlpha fullAlpha,
710 SkAlpha* maskRow,
711 bool isUsingMask,
712 bool noRealBlitter,
713 bool needSafeCheck) {
714 if (isUsingMask) {
715 for (int i = 0; i < len; ++i) {
716 if (needSafeCheck) {
717 safely_add_alpha(&maskRow[x + i], fullAlpha);
718 } else {
719 add_alpha(&maskRow[x + i], fullAlpha);
720 }
721 }
722 } else {
723 if (fullAlpha == 0xFF && !noRealBlitter) {
724 blitter->getRealBlitter()->blitH(x, y, len);
725 } else {
726 blitter->blitAntiH(x, y, len, fullAlpha);
727 }
728 }
729 }
730
blit_aaa_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,bool needSafeCheck)731 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
732 int y,
733 SkFixed ul,
734 SkFixed ur,
735 SkFixed ll,
736 SkFixed lr,
737 SkFixed lDY,
738 SkFixed rDY,
739 SkAlpha fullAlpha,
740 SkAlpha* maskRow,
741 bool isUsingMask,
742 bool noRealBlitter,
743 bool needSafeCheck) {
744 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
745 int len = R - L;
746
747 if (len == 1) {
748 SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
749 blit_single_alpha(blitter,
750 y,
751 L,
752 alpha,
753 fullAlpha,
754 maskRow,
755 isUsingMask,
756 noRealBlitter,
757 needSafeCheck);
758 return;
759 }
760
761 const int kQuickLen = 31;
762 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
763 SkAlpha* alphas;
764
765 if (len <= kQuickLen) {
766 alphas = (SkAlpha*)quickMemory;
767 } else {
768 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
769 }
770
771 SkAlpha* tempAlphas = alphas + len + 1;
772 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
773
774 for (int i = 0; i < len; ++i) {
775 runs[i] = 1;
776 alphas[i] = fullAlpha;
777 }
778 runs[len] = 0;
779
780 int uL = SkFixedFloorToInt(ul);
781 int lL = SkFixedCeilToInt(ll);
782 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
783 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
784 SkFixed second = ll - ul - first;
785 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
786 SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
787 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
788 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
789 } else {
790 compute_alpha_below_line(
791 tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
792 for (int i = uL; i < lL; ++i) {
793 if (alphas[i - L] > tempAlphas[i - L]) {
794 alphas[i - L] -= tempAlphas[i - L];
795 } else {
796 alphas[i - L] = 0;
797 }
798 }
799 }
800
801 int uR = SkFixedFloorToInt(ur);
802 int lR = SkFixedCeilToInt(lr);
803 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
804 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
805 SkFixed second = lr - ur - first;
806 SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
807 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
808 alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
809 alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
810 } else {
811 compute_alpha_above_line(
812 tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
813 for (int i = uR; i < lR; ++i) {
814 if (alphas[i - L] > tempAlphas[i - L]) {
815 alphas[i - L] -= tempAlphas[i - L];
816 } else {
817 alphas[i - L] = 0;
818 }
819 }
820 }
821
822 if (isUsingMask) {
823 for (int i = 0; i < len; ++i) {
824 if (needSafeCheck) {
825 safely_add_alpha(&maskRow[L + i], alphas[i]);
826 } else {
827 add_alpha(&maskRow[L + i], alphas[i]);
828 }
829 }
830 } else {
831 if (fullAlpha == 0xFF && !noRealBlitter) {
832 // Real blitter is faster than RunBasedAdditiveBlitter
833 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
834 } else {
835 blitter->blitAntiH(L, y, alphas, len);
836 }
837 }
838
839 if (len > kQuickLen) {
840 delete[] alphas;
841 }
842 }
843
blit_trapezoid_row(AdditiveBlitter * blitter,int y,SkFixed ul,SkFixed ur,SkFixed ll,SkFixed lr,SkFixed lDY,SkFixed rDY,SkAlpha fullAlpha,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter=false,bool needSafeCheck=false)844 static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter,
845 int y,
846 SkFixed ul,
847 SkFixed ur,
848 SkFixed ll,
849 SkFixed lr,
850 SkFixed lDY,
851 SkFixed rDY,
852 SkAlpha fullAlpha,
853 SkAlpha* maskRow,
854 bool isUsingMask,
855 bool noRealBlitter = false,
856 bool needSafeCheck = false) {
857 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
858
859 if (ul > ur) {
860 return;
861 }
862
863 // Edge crosses. Approximate it. This should only happend due to precision limit,
864 // so the approximation could be very coarse.
865 if (ll > lr) {
866 ll = lr = approximate_intersection(ul, ll, ur, lr);
867 }
868
869 if (ul == ur && ll == lr) {
870 return; // empty trapzoid
871 }
872
873 // We're going to use the left line ul-ll and the rite line ur-lr
874 // to exclude the area that's not covered by the path.
875 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
876 // so we'll do that for simplicity.
877 if (ul > ll) {
878 std::swap(ul, ll);
879 }
880 if (ur > lr) {
881 std::swap(ur, lr);
882 }
883
884 SkFixed joinLeft = SkFixedCeilToFixed(ll);
885 SkFixed joinRite = SkFixedFloorToFixed(ur);
886 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
887 if (ul < joinLeft) {
888 int len = SkFixedCeilToInt(joinLeft - ul);
889 if (len == 1) {
890 SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
891 blit_single_alpha(blitter,
892 y,
893 ul >> 16,
894 alpha,
895 fullAlpha,
896 maskRow,
897 isUsingMask,
898 noRealBlitter,
899 needSafeCheck);
900 } else if (len == 2) {
901 SkFixed first = joinLeft - SK_Fixed1 - ul;
902 SkFixed second = ll - ul - first;
903 SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
904 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
905 blit_two_alphas(blitter,
906 y,
907 ul >> 16,
908 a1,
909 a2,
910 fullAlpha,
911 maskRow,
912 isUsingMask,
913 noRealBlitter,
914 needSafeCheck);
915 } else {
916 blit_aaa_trapezoid_row(blitter,
917 y,
918 ul,
919 joinLeft,
920 ll,
921 joinLeft,
922 lDY,
923 SK_MaxS32,
924 fullAlpha,
925 maskRow,
926 isUsingMask,
927 noRealBlitter,
928 needSafeCheck);
929 }
930 }
931 // SkAAClip requires that we blit from left to right.
932 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
933 if (joinLeft < joinRite) {
934 blit_full_alpha(blitter,
935 y,
936 SkFixedFloorToInt(joinLeft),
937 SkFixedFloorToInt(joinRite - joinLeft),
938 fullAlpha,
939 maskRow,
940 isUsingMask,
941 noRealBlitter,
942 needSafeCheck);
943 }
944 if (lr > joinRite) {
945 int len = SkFixedCeilToInt(lr - joinRite);
946 if (len == 1) {
947 SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
948 blit_single_alpha(blitter,
949 y,
950 joinRite >> 16,
951 alpha,
952 fullAlpha,
953 maskRow,
954 isUsingMask,
955 noRealBlitter,
956 needSafeCheck);
957 } else if (len == 2) {
958 SkFixed first = joinRite + SK_Fixed1 - ur;
959 SkFixed second = lr - ur - first;
960 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
961 SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
962 blit_two_alphas(blitter,
963 y,
964 joinRite >> 16,
965 a1,
966 a2,
967 fullAlpha,
968 maskRow,
969 isUsingMask,
970 noRealBlitter,
971 needSafeCheck);
972 } else {
973 blit_aaa_trapezoid_row(blitter,
974 y,
975 joinRite,
976 ur,
977 joinRite,
978 lr,
979 SK_MaxS32,
980 rDY,
981 fullAlpha,
982 maskRow,
983 isUsingMask,
984 noRealBlitter,
985 needSafeCheck);
986 }
987 }
988 } else {
989 blit_aaa_trapezoid_row(blitter,
990 y,
991 ul,
992 ur,
993 ll,
994 lr,
995 lDY,
996 rDY,
997 fullAlpha,
998 maskRow,
999 isUsingMask,
1000 noRealBlitter,
1001 needSafeCheck);
1002 }
1003 }
1004
operator <(const SkAnalyticEdge & a,const SkAnalyticEdge & b)1005 static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
1006 int valuea = a.fUpperY;
1007 int valueb = b.fUpperY;
1008
1009 if (valuea == valueb) {
1010 valuea = a.fX;
1011 valueb = b.fX;
1012 }
1013
1014 if (valuea == valueb) {
1015 valuea = a.fDX;
1016 valueb = b.fDX;
1017 }
1018
1019 return valuea < valueb;
1020 }
1021
sort_edges(SkAnalyticEdge * list[],int count,SkAnalyticEdge ** last)1022 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
1023 SkTQSort(list, list + count - 1);
1024
1025 // now make the edges linked in sorted order
1026 for (int i = 1; i < count; ++i) {
1027 list[i - 1]->fNext = list[i];
1028 list[i]->fPrev = list[i - 1];
1029 }
1030
1031 *last = list[count - 1];
1032 return list[0];
1033 }
1034
validate_sort(const SkAnalyticEdge * edge)1035 static void validate_sort(const SkAnalyticEdge* edge) {
1036 #ifdef SK_DEBUG
1037 SkFixed y = SkIntToFixed(-32768);
1038
1039 while (edge->fUpperY != SK_MaxS32) {
1040 edge->validate();
1041 SkASSERT(y <= edge->fUpperY);
1042
1043 y = edge->fUpperY;
1044 edge = (SkAnalyticEdge*)edge->fNext;
1045 }
1046 #endif
1047 }
1048
1049 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
1050 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
1051 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
is_smooth_enough(SkAnalyticEdge * thisEdge,SkAnalyticEdge * nextEdge,int stop_y)1052 static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
1053 if (thisEdge->fCurveCount < 0) {
1054 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
1055 int ddshift = cEdge.fCurveShift;
1056 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
1057 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
1058 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
1059 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
1060 } else if (thisEdge->fCurveCount > 0) {
1061 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
1062 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
1063 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
1064 // current Dy is (fQDy - fQDDy) >> shift
1065 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
1066 }
1067 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
1068 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
1069 }
1070
1071 // Check if the leftE and riteE are changing smoothly in terms of fDX.
1072 // If yes, we can later skip the fractional y and directly jump to integer y.
is_smooth_enough(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkAnalyticEdge * currE,int stop_y)1073 static bool is_smooth_enough(SkAnalyticEdge* leftE,
1074 SkAnalyticEdge* riteE,
1075 SkAnalyticEdge* currE,
1076 int stop_y) {
1077 if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
1078 return false; // We're at the end so we won't skip anything
1079 }
1080 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
1081 return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
1082 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
1083 return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
1084 }
1085
1086 // Now both edges are changing, find the second next edge
1087 SkAnalyticEdge* nextCurrE = currE->fNext;
1088 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
1089 return false;
1090 }
1091 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
1092 if (nextCurrE->fUpperX < currE->fUpperX) {
1093 std::swap(currE, nextCurrE);
1094 }
1095 return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
1096 }
1097
aaa_walk_convex_edges(SkAnalyticEdge * prevHead,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftBound,SkFixed riteBound,bool isUsingMask)1098 static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
1099 AdditiveBlitter* blitter,
1100 int start_y,
1101 int stop_y,
1102 SkFixed leftBound,
1103 SkFixed riteBound,
1104 bool isUsingMask) {
1105 validate_sort((SkAnalyticEdge*)prevHead->fNext);
1106
1107 SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
1108 SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
1109 SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
1110
1111 SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY);
1112
1113 for (;;) {
1114 // We have to check fLowerY first because some edges might be alone (e.g., there's only
1115 // a left edge but no right edge in a given y scan line) due to precision limit.
1116 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1117 if (!leftE->update(y)) {
1118 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1119 goto END_WALK;
1120 }
1121 leftE = currE;
1122 currE = (SkAnalyticEdge*)currE->fNext;
1123 }
1124 }
1125 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1126 if (!riteE->update(y)) {
1127 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1128 goto END_WALK;
1129 }
1130 riteE = currE;
1131 currE = (SkAnalyticEdge*)currE->fNext;
1132 }
1133 }
1134
1135 SkASSERT(leftE);
1136 SkASSERT(riteE);
1137
1138 // check our bottom clip
1139 if (SkFixedFloorToInt(y) >= stop_y) {
1140 break;
1141 }
1142
1143 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
1144 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
1145
1146 leftE->goY(y);
1147 riteE->goY(y);
1148
1149 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
1150 std::swap(leftE, riteE);
1151 }
1152
1153 SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY);
1154 if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
1155 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
1156 }
1157 local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y));
1158
1159 SkFixed left = SkTMax(leftBound, leftE->fX);
1160 SkFixed dLeft = leftE->fDX;
1161 SkFixed rite = SkTMin(riteBound, riteE->fX);
1162 SkFixed dRite = riteE->fDX;
1163 if (0 == (dLeft | dRite)) {
1164 int fullLeft = SkFixedCeilToInt(left);
1165 int fullRite = SkFixedFloorToInt(rite);
1166 SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
1167 SkFixed partialRite = rite - SkIntToFixed(fullRite);
1168 int fullTop = SkFixedCeilToInt(y);
1169 int fullBot = SkFixedFloorToInt(local_bot_fixed);
1170 SkFixed partialTop = SkIntToFixed(fullTop) - y;
1171 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
1172 if (fullTop > fullBot) { // The rectangle is within one pixel height...
1173 partialTop -= (SK_Fixed1 - partialBot);
1174 partialBot = 0;
1175 }
1176
1177 if (fullRite >= fullLeft) {
1178 if (partialTop > 0) { // blit first partial row
1179 if (partialLeft > 0) {
1180 blitter->blitAntiH(fullLeft - 1,
1181 fullTop - 1,
1182 fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
1183 }
1184 blitter->blitAntiH(
1185 fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
1186 if (partialRite > 0) {
1187 blitter->blitAntiH(fullRite,
1188 fullTop - 1,
1189 fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
1190 }
1191 blitter->flush_if_y_changed(y, y + partialTop);
1192 }
1193
1194 // Blit all full-height rows from fullTop to fullBot
1195 if (fullBot > fullTop &&
1196 // SkAAClip cannot handle the empty rect so check the non-emptiness here
1197 // (bug chromium:662800)
1198 (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
1199 fixed_to_alpha(partialRite) > 0)) {
1200 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
1201 fullTop,
1202 fullRite - fullLeft,
1203 fullBot - fullTop,
1204 fixed_to_alpha(partialLeft),
1205 fixed_to_alpha(partialRite));
1206 }
1207
1208 if (partialBot > 0) { // blit last partial row
1209 if (partialLeft > 0) {
1210 blitter->blitAntiH(fullLeft - 1,
1211 fullBot,
1212 fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
1213 }
1214 blitter->blitAntiH(
1215 fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
1216 if (partialRite > 0) {
1217 blitter->blitAntiH(fullRite,
1218 fullBot,
1219 fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
1220 }
1221 }
1222 } else { // left and rite are within the same pixel
1223 if (partialTop > 0) {
1224 blitter->blitAntiH(fullLeft - 1,
1225 fullTop - 1,
1226 1,
1227 fixed_to_alpha(SkFixedMul(partialTop, rite - left)));
1228 blitter->flush_if_y_changed(y, y + partialTop);
1229 }
1230 if (fullBot > fullTop) {
1231 blitter->getRealBlitter()->blitV(
1232 fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(rite - left));
1233 }
1234 if (partialBot > 0) {
1235 blitter->blitAntiH(fullLeft - 1,
1236 fullBot,
1237 1,
1238 fixed_to_alpha(SkFixedMul(partialBot, rite - left)));
1239 }
1240 }
1241
1242 y = local_bot_fixed;
1243 } else {
1244 // The following constant are used to snap X
1245 // We snap X mainly for speedup (no tiny triangle) and
1246 // avoiding edge cases caused by precision errors
1247 const SkFixed kSnapDigit = SK_Fixed1 >> 4;
1248 const SkFixed kSnapHalf = kSnapDigit >> 1;
1249 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
1250 left += kSnapHalf;
1251 rite += kSnapHalf; // For fast rounding
1252
1253 // Number of blit_trapezoid_row calls we'll have
1254 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
1255
1256 // If we're using mask blitter, we advance the mask row in this function
1257 // to save some "if" condition checks.
1258 SkAlpha* maskRow = nullptr;
1259 if (isUsingMask) {
1260 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1261 }
1262
1263 // Instead of writing one loop that handles both partial-row blit_trapezoid_row
1264 // and full-row trapezoid_row together, we use the following 3-stage flow to
1265 // handle partial-row blit and full-row blit separately. It will save us much time
1266 // on changing y, left, and rite.
1267 if (count > 1) {
1268 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
1269 count--;
1270 SkFixed nextY = SkFixedCeilToFixed(y + 1);
1271 SkFixed dY = nextY - y;
1272 SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
1273 SkFixed nextRite = rite + SkFixedMul(dRite, dY);
1274 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1275 (nextLeft & kSnapMask) >= leftBound &&
1276 (nextRite & kSnapMask) <= riteBound);
1277 blit_trapezoid_row(blitter,
1278 y >> 16,
1279 left & kSnapMask,
1280 rite & kSnapMask,
1281 nextLeft & kSnapMask,
1282 nextRite & kSnapMask,
1283 leftE->fDY,
1284 riteE->fDY,
1285 get_partial_alpha(0xFF, dY),
1286 maskRow,
1287 isUsingMask);
1288 blitter->flush_if_y_changed(y, nextY);
1289 left = nextLeft;
1290 rite = nextRite;
1291 y = nextY;
1292 }
1293
1294 while (count > 1) { // Full rows in the middle
1295 count--;
1296 if (isUsingMask) {
1297 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1298 }
1299 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
1300 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1301 (nextLeft & kSnapMask) >= leftBound &&
1302 (nextRite & kSnapMask) <= riteBound);
1303 blit_trapezoid_row(blitter,
1304 y >> 16,
1305 left & kSnapMask,
1306 rite & kSnapMask,
1307 nextLeft & kSnapMask,
1308 nextRite & kSnapMask,
1309 leftE->fDY,
1310 riteE->fDY,
1311 0xFF,
1312 maskRow,
1313 isUsingMask);
1314 blitter->flush_if_y_changed(y, nextY);
1315 left = nextLeft;
1316 rite = nextRite;
1317 y = nextY;
1318 }
1319 }
1320
1321 if (isUsingMask) {
1322 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1323 }
1324
1325 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
1326 SkASSERT(dY <= SK_Fixed1);
1327 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
1328 // Take them back into the bound here.
1329 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
1330 SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
1331 SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
1332 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1333 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
1334 blit_trapezoid_row(blitter,
1335 y >> 16,
1336 left & kSnapMask,
1337 rite & kSnapMask,
1338 nextLeft & kSnapMask,
1339 nextRite & kSnapMask,
1340 leftE->fDY,
1341 riteE->fDY,
1342 get_partial_alpha(0xFF, dY),
1343 maskRow,
1344 isUsingMask);
1345 blitter->flush_if_y_changed(y, local_bot_fixed);
1346 left = nextLeft;
1347 rite = nextRite;
1348 y = local_bot_fixed;
1349 left -= kSnapHalf;
1350 rite -= kSnapHalf;
1351 }
1352
1353 leftE->fX = left;
1354 riteE->fX = rite;
1355 leftE->fY = riteE->fY = y;
1356 }
1357
1358 END_WALK:;
1359 }
1360
update_next_next_y(SkFixed y,SkFixed nextY,SkFixed * nextNextY)1361 static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
1362 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
1363 }
1364
check_intersection(const SkAnalyticEdge * edge,SkFixed nextY,SkFixed * nextNextY)1365 static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
1366 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
1367 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1368 }
1369 }
1370
insert_new_edges(SkAnalyticEdge * newEdge,SkFixed y,SkFixed * nextNextY)1371 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
1372 if (newEdge->fUpperY > y) {
1373 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1374 return;
1375 }
1376 SkAnalyticEdge* prev = newEdge->fPrev;
1377 if (prev->fX <= newEdge->fX) {
1378 while (newEdge->fUpperY <= y) {
1379 check_intersection(newEdge, y, nextNextY);
1380 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1381 newEdge = newEdge->fNext;
1382 }
1383 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1384 return;
1385 }
1386 // find first x pos to insert
1387 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
1388 // insert the lot, fixing up the links as we go
1389 do {
1390 SkAnalyticEdge* next = newEdge->fNext;
1391 do {
1392 if (start->fNext == newEdge) {
1393 goto nextEdge;
1394 }
1395 SkAnalyticEdge* after = start->fNext;
1396 if (after->fX >= newEdge->fX) {
1397 break;
1398 }
1399 SkASSERT(start != after);
1400 start = after;
1401 } while (true);
1402 remove_edge(newEdge);
1403 insert_edge_after(newEdge, start);
1404 nextEdge:
1405 check_intersection(newEdge, y, nextNextY);
1406 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1407 start = newEdge;
1408 newEdge = next;
1409 } while (newEdge->fUpperY <= y);
1410 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1411 }
1412
validate_edges_for_y(const SkAnalyticEdge * edge,SkFixed y)1413 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
1414 #ifdef SK_DEBUG
1415 while (edge->fUpperY <= y) {
1416 SkASSERT(edge->fPrev && edge->fNext);
1417 SkASSERT(edge->fPrev->fNext == edge);
1418 SkASSERT(edge->fNext->fPrev == edge);
1419 SkASSERT(edge->fUpperY <= edge->fLowerY);
1420 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
1421 edge = edge->fNext;
1422 }
1423 #endif
1424 }
1425
1426 // Return true if prev->fX, next->fX are too close in the current pixel row.
edges_too_close(SkAnalyticEdge * prev,SkAnalyticEdge * next,SkFixed lowerY)1427 static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
1428 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
1429 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
1430 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
1431 // edges prev and next are within one pixel.
1432 constexpr SkFixed SLACK = SK_Fixed1;
1433
1434 // Note that even if the following test failed, the edges might still be very close to each
1435 // other at some point within the current pixel row because of prev->fDX and next->fDX.
1436 // However, to handle that case, we have to sacrafice more performance.
1437 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
1438 // so I'll ignore fDX for performance tradeoff.
1439 return next && prev && next->fUpperY < lowerY &&
1440 prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
1441 // The following is more accurate but also slower.
1442 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
1443 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
1444 }
1445
1446 // This function exists for the case where the previous rite edge is removed because
1447 // its fLowerY <= nextY
edges_too_close(int prevRite,SkFixed ul,SkFixed ll)1448 static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
1449 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
1450 }
1451
blit_saved_trapezoid(SkAnalyticEdge * leftE,SkFixed lowerY,SkFixed lowerLeft,SkFixed lowerRite,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip)1452 static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
1453 SkFixed lowerY,
1454 SkFixed lowerLeft,
1455 SkFixed lowerRite,
1456 AdditiveBlitter* blitter,
1457 SkAlpha* maskRow,
1458 bool isUsingMask,
1459 bool noRealBlitter,
1460 SkFixed leftClip,
1461 SkFixed rightClip) {
1462 SkAnalyticEdge* riteE = leftE->fRiteE;
1463 SkASSERT(riteE);
1464 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
1465 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
1466 int y = SkFixedFloorToInt(leftE->fSavedY);
1467 // Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
1468 // to elimiate cumulative error: if there are many fractional y scan lines within the
1469 // same row, the former may accumulate the rounding error while the later won't.
1470 SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
1471 fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));
1472 // We need fSavedDY because the (quad or cubic) edge might be updated
1473 blit_trapezoid_row(
1474 blitter,
1475 y,
1476 SkTMax(leftE->fSavedX, leftClip),
1477 SkTMin(riteE->fSavedX, rightClip),
1478 SkTMax(lowerLeft, leftClip),
1479 SkTMin(lowerRite, rightClip),
1480 leftE->fSavedDY,
1481 riteE->fSavedDY,
1482 fullAlpha,
1483 maskRow,
1484 isUsingMask,
1485 noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
1486 edges_too_close(riteE, riteE->fNext, lowerY))),
1487 true);
1488 leftE->fRiteE = nullptr;
1489 }
1490
deferred_blit(SkAnalyticEdge * leftE,SkAnalyticEdge * riteE,SkFixed left,SkFixed leftDY,SkFixed y,SkFixed nextY,bool isIntegralNextY,bool leftEnds,bool riteEnds,AdditiveBlitter * blitter,SkAlpha * maskRow,bool isUsingMask,bool noRealBlitter,SkFixed leftClip,SkFixed rightClip,int yShift)1491 static void deferred_blit(SkAnalyticEdge* leftE,
1492 SkAnalyticEdge* riteE,
1493 SkFixed left,
1494 SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
1495 SkFixed y,
1496 SkFixed nextY,
1497 bool isIntegralNextY,
1498 bool leftEnds,
1499 bool riteEnds,
1500 AdditiveBlitter* blitter,
1501 SkAlpha* maskRow,
1502 bool isUsingMask,
1503 bool noRealBlitter,
1504 SkFixed leftClip,
1505 SkFixed rightClip,
1506 int yShift) {
1507 if (leftE->fRiteE && leftE->fRiteE != riteE) {
1508 // leftE's right edge changed. Blit the saved trapezoid.
1509 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
1510 blit_saved_trapezoid(leftE,
1511 y,
1512 left,
1513 leftE->fRiteE->fX,
1514 blitter,
1515 maskRow,
1516 isUsingMask,
1517 noRealBlitter,
1518 leftClip,
1519 rightClip);
1520 }
1521 if (!leftE->fRiteE) {
1522 // Save and defer blitting the trapezoid
1523 SkASSERT(riteE->fRiteE == nullptr);
1524 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1525 SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
1526 leftE->saveXY(left, y, leftDY);
1527 riteE->saveXY(riteE->fX, y, riteE->fDY);
1528 leftE->fRiteE = riteE;
1529 }
1530 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1531 riteE->goY(nextY, yShift);
1532 // Always blit when edges end or nextY is integral
1533 if (isIntegralNextY || leftEnds || riteEnds) {
1534 blit_saved_trapezoid(leftE,
1535 nextY,
1536 leftE->fX,
1537 riteE->fX,
1538 blitter,
1539 maskRow,
1540 isUsingMask,
1541 noRealBlitter,
1542 leftClip,
1543 rightClip);
1544 }
1545 }
1546
aaa_walk_edges(SkAnalyticEdge * prevHead,SkAnalyticEdge * nextTail,SkPath::FillType fillType,AdditiveBlitter * blitter,int start_y,int stop_y,SkFixed leftClip,SkFixed rightClip,bool isUsingMask,bool forceRLE,bool useDeferred,bool skipIntersect)1547 static void aaa_walk_edges(SkAnalyticEdge* prevHead,
1548 SkAnalyticEdge* nextTail,
1549 SkPath::FillType fillType,
1550 AdditiveBlitter* blitter,
1551 int start_y,
1552 int stop_y,
1553 SkFixed leftClip,
1554 SkFixed rightClip,
1555 bool isUsingMask,
1556 bool forceRLE,
1557 bool useDeferred,
1558 bool skipIntersect) {
1559 prevHead->fX = prevHead->fUpperX = leftClip;
1560 nextTail->fX = nextTail->fUpperX = rightClip;
1561 SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
1562 SkFixed nextNextY = SK_MaxS32;
1563
1564 {
1565 SkAnalyticEdge* edge;
1566 for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
1567 edge->goY(y);
1568 update_next_next_y(edge->fLowerY, y, &nextNextY);
1569 }
1570 update_next_next_y(edge->fUpperY, y, &nextNextY);
1571 }
1572
1573 // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
1574 int windingMask = (fillType & 1) ? 1 : -1;
1575
1576 bool isInverse = SkPath::IsInverseFillType(fillType);
1577
1578 if (isInverse && SkIntToFixed(start_y) != y) {
1579 int width = SkFixedFloorToInt(rightClip - leftClip);
1580 if (SkFixedFloorToInt(y) != start_y) {
1581 blitter->getRealBlitter()->blitRect(
1582 SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
1583 start_y = SkFixedFloorToInt(y);
1584 }
1585 SkAlpha* maskRow =
1586 isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
1587 blit_full_alpha(blitter,
1588 start_y,
1589 SkFixedFloorToInt(leftClip),
1590 width,
1591 fixed_to_alpha(y - SkIntToFixed(start_y)),
1592 maskRow,
1593 isUsingMask,
1594 false,
1595 false);
1596 }
1597
1598 while (true) {
1599 int w = 0;
1600 bool in_interval = isInverse;
1601 SkFixed prevX = prevHead->fX;
1602 SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
1603 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
1604 SkAnalyticEdge* currE = prevHead->fNext;
1605 SkAnalyticEdge* leftE = prevHead;
1606 SkFixed left = leftClip;
1607 SkFixed leftDY = 0;
1608 bool leftEnds = false;
1609 int prevRite = SkFixedFloorToInt(leftClip);
1610
1611 nextNextY = SK_MaxS32;
1612
1613 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
1614 int yShift = 0;
1615 if ((nextY - y) & (SK_Fixed1 >> 2)) {
1616 yShift = 2;
1617 nextY = y + (SK_Fixed1 >> 2);
1618 } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
1619 yShift = 1;
1620 SkASSERT(nextY == y + (SK_Fixed1 >> 1));
1621 }
1622
1623 SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
1624
1625 // If we're using mask blitter, we advance the mask row in this function
1626 // to save some "if" condition checks.
1627 SkAlpha* maskRow = nullptr;
1628 if (isUsingMask) {
1629 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
1630 }
1631
1632 SkASSERT(currE->fPrev == prevHead);
1633 validate_edges_for_y(currE, y);
1634
1635 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
1636 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
1637 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
1638
1639 while (currE->fUpperY <= y) {
1640 SkASSERT(currE->fLowerY >= nextY);
1641 SkASSERT(currE->fY == y);
1642
1643 w += currE->fWinding;
1644 bool prev_in_interval = in_interval;
1645 in_interval = !(w & windingMask) == isInverse;
1646
1647 bool isLeft = in_interval && !prev_in_interval;
1648 bool isRite = !in_interval && prev_in_interval;
1649 bool currEnds = currE->fLowerY == nextY;
1650
1651 if (useDeferred) {
1652 if (currE->fRiteE && !isLeft) {
1653 // currE is a left edge previously, but now it's not.
1654 // Blit the trapezoid between fSavedY and y.
1655 SkASSERT(currE->fRiteE->fY == y);
1656 blit_saved_trapezoid(currE,
1657 y,
1658 currE->fX,
1659 currE->fRiteE->fX,
1660 blitter,
1661 maskRow,
1662 isUsingMask,
1663 noRealBlitter,
1664 leftClip,
1665 rightClip);
1666 }
1667 if (leftE->fRiteE == currE && !isRite) {
1668 // currE is a right edge previously, but now it's not.
1669 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
1670 // in the previous if clause). Hence we blit the trapezoid.
1671 blit_saved_trapezoid(leftE,
1672 y,
1673 left,
1674 currE->fX,
1675 blitter,
1676 maskRow,
1677 isUsingMask,
1678 noRealBlitter,
1679 leftClip,
1680 rightClip);
1681 }
1682 }
1683
1684 if (isRite) {
1685 if (useDeferred) {
1686 deferred_blit(leftE,
1687 currE,
1688 left,
1689 leftDY,
1690 y,
1691 nextY,
1692 isIntegralNextY,
1693 leftEnds,
1694 currEnds,
1695 blitter,
1696 maskRow,
1697 isUsingMask,
1698 noRealBlitter,
1699 leftClip,
1700 rightClip,
1701 yShift);
1702 } else {
1703 SkFixed rite = currE->fX;
1704 currE->goY(nextY, yShift);
1705 SkFixed nextLeft = SkTMax(leftClip, leftE->fX);
1706 rite = SkTMin(rightClip, rite);
1707 SkFixed nextRite = SkTMin(rightClip, currE->fX);
1708 blit_trapezoid_row(
1709 blitter,
1710 y >> 16,
1711 left,
1712 rite,
1713 nextLeft,
1714 nextRite,
1715 leftDY,
1716 currE->fDY,
1717 fullAlpha,
1718 maskRow,
1719 isUsingMask,
1720 noRealBlitter || (fullAlpha == 0xFF &&
1721 (edges_too_close(prevRite, left, leftE->fX) ||
1722 edges_too_close(currE, currE->fNext, nextY))),
1723 true);
1724 prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
1725 }
1726 } else {
1727 if (isLeft) {
1728 left = SkTMax(currE->fX, leftClip);
1729 leftDY = currE->fDY;
1730 leftE = currE;
1731 leftEnds = leftE->fLowerY == nextY;
1732 }
1733 currE->goY(nextY, yShift);
1734 }
1735
1736 SkAnalyticEdge* next = currE->fNext;
1737 SkFixed newX;
1738
1739 while (currE->fLowerY <= nextY) {
1740 if (currE->fCurveCount < 0) {
1741 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
1742 cubicEdge->keepContinuous();
1743 if (!cubicEdge->updateCubic()) {
1744 break;
1745 }
1746 } else if (currE->fCurveCount > 0) {
1747 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
1748 quadEdge->keepContinuous();
1749 if (!quadEdge->updateQuadratic()) {
1750 break;
1751 }
1752 } else {
1753 break;
1754 }
1755 }
1756 SkASSERT(currE->fY == nextY);
1757
1758 if (currE->fLowerY <= nextY) {
1759 remove_edge(currE);
1760 } else {
1761 update_next_next_y(currE->fLowerY, nextY, &nextNextY);
1762 newX = currE->fX;
1763 SkASSERT(currE->fLowerY > nextY);
1764 if (newX < prevX) { // ripple currE backwards until it is x-sorted
1765 // If the crossing edge is a right edge, blit the saved trapezoid.
1766 if (leftE->fRiteE == currE && useDeferred) {
1767 SkASSERT(leftE->fY == nextY && currE->fY == nextY);
1768 blit_saved_trapezoid(leftE,
1769 nextY,
1770 leftE->fX,
1771 currE->fX,
1772 blitter,
1773 maskRow,
1774 isUsingMask,
1775 noRealBlitter,
1776 leftClip,
1777 rightClip);
1778 }
1779 backward_insert_edge_based_on_x(currE);
1780 } else {
1781 prevX = newX;
1782 }
1783 if (!skipIntersect) {
1784 check_intersection(currE, nextY, &nextNextY);
1785 }
1786 }
1787
1788 currE = next;
1789 SkASSERT(currE);
1790 }
1791
1792 // was our right-edge culled away?
1793 if (in_interval) {
1794 if (useDeferred) {
1795 deferred_blit(leftE,
1796 nextTail,
1797 left,
1798 leftDY,
1799 y,
1800 nextY,
1801 isIntegralNextY,
1802 leftEnds,
1803 false,
1804 blitter,
1805 maskRow,
1806 isUsingMask,
1807 noRealBlitter,
1808 leftClip,
1809 rightClip,
1810 yShift);
1811 } else {
1812 blit_trapezoid_row(blitter,
1813 y >> 16,
1814 left,
1815 rightClip,
1816 SkTMax(leftClip, leftE->fX),
1817 rightClip,
1818 leftDY,
1819 0,
1820 fullAlpha,
1821 maskRow,
1822 isUsingMask,
1823 noRealBlitter || (fullAlpha == 0xFF &&
1824 edges_too_close(leftE->fPrev, leftE, nextY)),
1825 true);
1826 }
1827 }
1828
1829 if (forceRLE) {
1830 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
1831 }
1832
1833 y = nextY;
1834 if (y >= SkIntToFixed(stop_y)) {
1835 break;
1836 }
1837
1838 // now currE points to the first edge with a fUpperY larger than the previous y
1839 insert_new_edges(currE, y, &nextNextY);
1840 }
1841 }
1842
aaa_fill_path(const SkPath & path,const SkIRect & clipRect,AdditiveBlitter * blitter,int start_y,int stop_y,bool pathContainedInClip,bool isUsingMask,bool forceRLE)1843 static SK_ALWAYS_INLINE void aaa_fill_path(
1844 const SkPath& path,
1845 const SkIRect& clipRect,
1846 AdditiveBlitter* blitter,
1847 int start_y,
1848 int stop_y,
1849 bool pathContainedInClip,
1850 bool isUsingMask,
1851 bool forceRLE) { // forceRLE implies that SkAAClip is calling us
1852 SkASSERT(blitter);
1853
1854 SkAnalyticEdgeBuilder builder;
1855 int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
1856 SkAnalyticEdge** list = builder.analyticEdgeList();
1857
1858 SkIRect rect = clipRect;
1859 if (0 == count) {
1860 if (path.isInverseFillType()) {
1861 /*
1862 * Since we are in inverse-fill, our caller has already drawn above
1863 * our top (start_y) and will draw below our bottom (stop_y). Thus
1864 * we need to restrict our drawing to the intersection of the clip
1865 * and those two limits.
1866 */
1867 if (rect.fTop < start_y) {
1868 rect.fTop = start_y;
1869 }
1870 if (rect.fBottom > stop_y) {
1871 rect.fBottom = stop_y;
1872 }
1873 if (!rect.isEmpty()) {
1874 blitter->getRealBlitter()->blitRect(
1875 rect.fLeft, rect.fTop, rect.width(), rect.height());
1876 }
1877 }
1878 return;
1879 }
1880
1881 SkAnalyticEdge headEdge, tailEdge, *last;
1882 // this returns the first and last edge after they're sorted into a dlink list
1883 SkAnalyticEdge* edge = sort_edges(list, count, &last);
1884
1885 headEdge.fRiteE = nullptr;
1886 headEdge.fPrev = nullptr;
1887 headEdge.fNext = edge;
1888 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
1889 headEdge.fX = SK_MinS32;
1890 headEdge.fDX = 0;
1891 headEdge.fDY = SK_MaxS32;
1892 headEdge.fUpperX = SK_MinS32;
1893 edge->fPrev = &headEdge;
1894
1895 tailEdge.fRiteE = nullptr;
1896 tailEdge.fPrev = last;
1897 tailEdge.fNext = nullptr;
1898 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
1899 tailEdge.fX = SK_MaxS32;
1900 tailEdge.fDX = 0;
1901 tailEdge.fDY = SK_MaxS32;
1902 tailEdge.fUpperX = SK_MaxS32;
1903 last->fNext = &tailEdge;
1904
1905 // now edge is the head of the sorted linklist
1906
1907 if (!pathContainedInClip && start_y < clipRect.fTop) {
1908 start_y = clipRect.fTop;
1909 }
1910 if (!pathContainedInClip && stop_y > clipRect.fBottom) {
1911 stop_y = clipRect.fBottom;
1912 }
1913
1914 SkFixed leftBound = SkIntToFixed(rect.fLeft);
1915 SkFixed rightBound = SkIntToFixed(rect.fRight);
1916 if (isUsingMask) {
1917 // If we're using mask, then we have to limit the bound within the path bounds.
1918 // Otherwise, the edge drift may access an invalid address inside the mask.
1919 SkIRect ir;
1920 path.getBounds().roundOut(&ir);
1921 leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
1922 rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
1923 }
1924
1925 if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
1926 aaa_walk_convex_edges(
1927 &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
1928 } else {
1929 // Only use deferred blitting if there are many edges.
1930 bool useDeferred =
1931 count >
1932 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
1933
1934 // We skip intersection computation if there are many points which probably already
1935 // give us enough fractional scan lines.
1936 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
1937
1938 aaa_walk_edges(&headEdge,
1939 &tailEdge,
1940 path.getFillType(),
1941 blitter,
1942 start_y,
1943 stop_y,
1944 leftBound,
1945 rightBound,
1946 isUsingMask,
1947 forceRLE,
1948 useDeferred,
1949 skipIntersect);
1950 }
1951 }
1952
AAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)1953 void SkScan::AAAFillPath(const SkPath& path,
1954 SkBlitter* blitter,
1955 const SkIRect& ir,
1956 const SkIRect& clipBounds,
1957 bool forceRLE) {
1958 bool containedInClip = clipBounds.contains(ir);
1959 bool isInverse = path.isInverseFillType();
1960
1961 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
1962 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
1963 // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
1964 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
1965 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
1966 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
1967 // blitFatAntiRect to avoid the mask and its overhead.
1968 if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
1969 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
1970 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
1971 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
1972 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1973 aaa_fill_path(path,
1974 clipBounds,
1975 &additiveBlitter,
1976 ir.fTop,
1977 ir.fBottom,
1978 containedInClip,
1979 true,
1980 forceRLE);
1981 }
1982 } else if (!isInverse && path.isConvex()) {
1983 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
1984 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
1985 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
1986 // RunBasedAdditiveBlitter would suffice.
1987 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1988 aaa_fill_path(path,
1989 clipBounds,
1990 &additiveBlitter,
1991 ir.fTop,
1992 ir.fBottom,
1993 containedInClip,
1994 false,
1995 forceRLE);
1996 } else {
1997 // If the filling area might not be convex, the more involved aaa_walk_edges would
1998 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
1999 // does that at a cost of performance.
2000 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
2001 aaa_fill_path(path,
2002 clipBounds,
2003 &additiveBlitter,
2004 ir.fTop,
2005 ir.fBottom,
2006 containedInClip,
2007 false,
2008 forceRLE);
2009 }
2010 }
2011 #endif // defined(SK_DISABLE_AAA)
2012