1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "SkScanPriv.h"
10 #include "SkPath.h"
11 #include "SkMatrix.h"
12 #include "SkBlitter.h"
13 #include "SkRegion.h"
14 #include "SkAntiRun.h"
15
16 #define SHIFT 2
17 #define SCALE (1 << SHIFT)
18 #define MASK (SCALE - 1)
19
20 /** @file
21 We have two techniques for capturing the output of the supersampler:
22 - SUPERMASK, which records a large mask-bitmap
23 this is often faster for small, complex objects
24 - RLE, which records a rle-encoded scanline
25 this is often faster for large objects with big spans
26
27 These blitters use two coordinate systems:
28 - destination coordinates, scale equal to the output - often
29 abbreviated with 'i' or 'I' in variable names
30 - supersampled coordinates, scale equal to the output * SCALE
31 */
32
33 //#define FORCE_SUPERMASK
34 //#define FORCE_RLE
35
36 ///////////////////////////////////////////////////////////////////////////////
37
38 /// Base class for a single-pass supersampled blitter.
39 class BaseSuperBlitter : public SkBlitter {
40 public:
41 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
42 const SkRegion& clip, bool isInverse);
43
44 /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])45 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
46 const int16_t runs[]) override {
47 SkDEBUGFAIL("How did I get here?");
48 }
49 /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)50 void blitV(int x, int y, int height, SkAlpha alpha) override {
51 SkDEBUGFAIL("How did I get here?");
52 }
53
54 protected:
55 SkBlitter* fRealBlitter;
56 /// Current y coordinate, in destination coordinates.
57 int fCurrIY;
58 /// Widest row of region to be blitted, in destination coordinates.
59 int fWidth;
60 /// Leftmost x coordinate in any row, in destination coordinates.
61 int fLeft;
62 /// Leftmost x coordinate in any row, in supersampled coordinates.
63 int fSuperLeft;
64
65 SkDEBUGCODE(int fCurrX;)
66 /// Current y coordinate in supersampled coordinates.
67 int fCurrY;
68 /// Initial y coordinate (top of bounds).
69 int fTop;
70
71 SkIRect fSectBounds;
72 };
73
BaseSuperBlitter(SkBlitter * realBlit,const SkIRect & ir,const SkRegion & clip,bool isInverse)74 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir, const SkRegion& clip,
75 bool isInverse) {
76 fRealBlitter = realBlit;
77
78 SkIRect sectBounds;
79 if (isInverse) {
80 // We use the clip bounds instead of the ir, since we may be asked to
81 //draw outside of the rect when we're a inverse filltype
82 sectBounds = clip.getBounds();
83 } else {
84 if (!sectBounds.intersect(ir, clip.getBounds())) {
85 sectBounds.setEmpty();
86 }
87 }
88
89 const int left = sectBounds.left();
90 const int right = sectBounds.right();
91
92 fLeft = left;
93 fSuperLeft = SkLeftShift(left, SHIFT);
94 fWidth = right - left;
95 fTop = sectBounds.top();
96 fCurrIY = fTop - 1;
97 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
98
99 SkDEBUGCODE(fCurrX = -1;)
100 }
101
102 /// Run-length-encoded supersampling antialiased blitter.
103 class SuperBlitter : public BaseSuperBlitter {
104 public:
105 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse);
106
~SuperBlitter()107 ~SuperBlitter() override {
108 this->flush();
109 }
110
111 /// Once fRuns contains a complete supersampled row, flush() blits
112 /// it out through the wrapped blitter.
113 void flush();
114
115 /// Blits a row of pixels, with location and width specified
116 /// in supersampled coordinates.
117 void blitH(int x, int y, int width) override;
118 /// Blits a rectangle of pixels, with location and size specified
119 /// in supersampled coordinates.
120 void blitRect(int x, int y, int width, int height) override;
121
122 private:
123 // The next three variables are used to track a circular buffer that
124 // contains the values used in SkAlphaRuns. These variables should only
125 // ever be updated in advanceRuns(), and fRuns should always point to
126 // a valid SkAlphaRuns...
127 int fRunsToBuffer;
128 void* fRunsBuffer;
129 int fCurrentRun;
130 SkAlphaRuns fRuns;
131
132 // extra one to store the zero at the end
getRunsSz() const133 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
134
135 // This function updates the fRuns variable to point to the next buffer space
136 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
137 // and resets fRuns to point to an empty scanline.
advanceRuns()138 void advanceRuns() {
139 const size_t kRunsSz = this->getRunsSz();
140 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
141 fRuns.fRuns = reinterpret_cast<int16_t*>(
142 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
143 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
144 fRuns.reset(fWidth);
145 }
146
147 int fOffsetX;
148 };
149
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip,bool isInverse)150 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
151 bool isInverse)
152 : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
153 {
154 fRunsToBuffer = realBlitter->requestRowsPreserved();
155 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
156 fCurrentRun = -1;
157
158 this->advanceRuns();
159
160 fOffsetX = 0;
161 }
162
flush()163 void SuperBlitter::flush() {
164 if (fCurrIY >= fTop) {
165
166 SkASSERT(fCurrentRun < fRunsToBuffer);
167 if (!fRuns.empty()) {
168 // SkDEBUGCODE(fRuns.dump();)
169 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
170 this->advanceRuns();
171 fOffsetX = 0;
172 }
173
174 fCurrIY = fTop - 1;
175 SkDEBUGCODE(fCurrX = -1;)
176 }
177 }
178
179 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
180 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
181 to produce a final value in [0, 255] and handles clamping 256->255
182 itself, with the same (alpha - (alpha >> 8)) correction as
183 coverage_to_exact_alpha().
184 */
coverage_to_partial_alpha(int aa)185 static inline int coverage_to_partial_alpha(int aa) {
186 aa <<= 8 - 2*SHIFT;
187 return aa;
188 }
189
190 /** coverage_to_exact_alpha() is being used by our blitter, which wants
191 a final value in [0, 255].
192 */
coverage_to_exact_alpha(int aa)193 static inline int coverage_to_exact_alpha(int aa) {
194 int alpha = (256 >> SHIFT) * aa;
195 // clamp 256->255
196 return alpha - (alpha >> 8);
197 }
198
blitH(int x,int y,int width)199 void SuperBlitter::blitH(int x, int y, int width) {
200 SkASSERT(width > 0);
201
202 int iy = y >> SHIFT;
203 SkASSERT(iy >= fCurrIY);
204
205 x -= fSuperLeft;
206 // hack, until I figure out why my cubics (I think) go beyond the bounds
207 if (x < 0) {
208 width += x;
209 x = 0;
210 }
211
212 #ifdef SK_DEBUG
213 SkASSERT(y != fCurrY || x >= fCurrX);
214 #endif
215 SkASSERT(y >= fCurrY);
216 if (fCurrY != y) {
217 fOffsetX = 0;
218 fCurrY = y;
219 }
220
221 if (iy != fCurrIY) { // new scanline
222 this->flush();
223 fCurrIY = iy;
224 }
225
226 int start = x;
227 int stop = x + width;
228
229 SkASSERT(start >= 0 && stop > start);
230 // integer-pixel-aligned ends of blit, rounded out
231 int fb = start & MASK;
232 int fe = stop & MASK;
233 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
234
235 if (n < 0) {
236 fb = fe - fb;
237 n = 0;
238 fe = 0;
239 } else {
240 if (fb == 0) {
241 n += 1;
242 } else {
243 fb = SCALE - fb;
244 }
245 }
246
247 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
248 n, coverage_to_partial_alpha(fe),
249 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
250 fOffsetX);
251
252 #ifdef SK_DEBUG
253 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
254 fCurrX = x + width;
255 #endif
256 }
257
258 #if 0 // UNUSED
259 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
260 int n, U8CPU riteA) {
261 SkASSERT(leftA <= 0xFF);
262 SkASSERT(riteA <= 0xFF);
263
264 int16_t* run = runs.fRuns;
265 uint8_t* aa = runs.fAlpha;
266
267 if (ileft > 0) {
268 run[0] = ileft;
269 aa[0] = 0;
270 run += ileft;
271 aa += ileft;
272 }
273
274 SkASSERT(leftA < 0xFF);
275 if (leftA > 0) {
276 *run++ = 1;
277 *aa++ = leftA;
278 }
279
280 if (n > 0) {
281 run[0] = n;
282 aa[0] = 0xFF;
283 run += n;
284 aa += n;
285 }
286
287 SkASSERT(riteA < 0xFF);
288 if (riteA > 0) {
289 *run++ = 1;
290 *aa++ = riteA;
291 }
292 run[0] = 0;
293 }
294 #endif
295
blitRect(int x,int y,int width,int height)296 void SuperBlitter::blitRect(int x, int y, int width, int height) {
297 SkASSERT(width > 0);
298 SkASSERT(height > 0);
299
300 // blit leading rows
301 while ((y & MASK)) {
302 this->blitH(x, y++, width);
303 if (--height <= 0) {
304 return;
305 }
306 }
307 SkASSERT(height > 0);
308
309 // Since this is a rect, instead of blitting supersampled rows one at a
310 // time and then resolving to the destination canvas, we can blit
311 // directly to the destintion canvas one row per SCALE supersampled rows.
312 int start_y = y >> SHIFT;
313 int stop_y = (y + height) >> SHIFT;
314 int count = stop_y - start_y;
315 if (count > 0) {
316 y += count << SHIFT;
317 height -= count << SHIFT;
318
319 // save original X for our tail blitH() loop at the bottom
320 int origX = x;
321
322 x -= fSuperLeft;
323 // hack, until I figure out why my cubics (I think) go beyond the bounds
324 if (x < 0) {
325 width += x;
326 x = 0;
327 }
328
329 // There is always a left column, a middle, and a right column.
330 // ileft is the destination x of the first pixel of the entire rect.
331 // xleft is (SCALE - # of covered supersampled pixels) in that
332 // destination pixel.
333 int ileft = x >> SHIFT;
334 int xleft = x & MASK;
335 // irite is the destination x of the last pixel of the OPAQUE section.
336 // xrite is the number of supersampled pixels extending beyond irite;
337 // xrite/SCALE should give us alpha.
338 int irite = (x + width) >> SHIFT;
339 int xrite = (x + width) & MASK;
340 if (!xrite) {
341 xrite = SCALE;
342 irite--;
343 }
344
345 // Need to call flush() to clean up pending draws before we
346 // even consider blitV(), since otherwise it can look nonmonotonic.
347 SkASSERT(start_y > fCurrIY);
348 this->flush();
349
350 int n = irite - ileft - 1;
351 if (n < 0) {
352 // If n < 0, we'll only have a single partially-transparent column
353 // of pixels to render.
354 xleft = xrite - xleft;
355 SkASSERT(xleft <= SCALE);
356 SkASSERT(xleft > 0);
357 fRealBlitter->blitV(ileft + fLeft, start_y, count,
358 coverage_to_exact_alpha(xleft));
359 } else {
360 // With n = 0, we have two possibly-transparent columns of pixels
361 // to render; with n > 0, we have opaque columns between them.
362
363 xleft = SCALE - xleft;
364
365 // Using coverage_to_exact_alpha is not consistent with blitH()
366 const int coverageL = coverage_to_exact_alpha(xleft);
367 const int coverageR = coverage_to_exact_alpha(xrite);
368
369 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
370 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
371
372 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
373 coverageL, coverageR);
374 }
375
376 // preamble for our next call to blitH()
377 fCurrIY = stop_y - 1;
378 fOffsetX = 0;
379 fCurrY = y - 1;
380 fRuns.reset(fWidth);
381 x = origX;
382 }
383
384 // catch any remaining few rows
385 SkASSERT(height <= MASK);
386 while (--height >= 0) {
387 this->blitH(x, y++, width);
388 }
389 }
390
391 ///////////////////////////////////////////////////////////////////////////////
392
393 /// Masked supersampling antialiased blitter.
394 class MaskSuperBlitter : public BaseSuperBlitter {
395 public:
396 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion&, bool isInverse);
~MaskSuperBlitter()397 ~MaskSuperBlitter() override {
398 fRealBlitter->blitMask(fMask, fClipRect);
399 }
400
401 void blitH(int x, int y, int width) override;
402
CanHandleRect(const SkIRect & bounds)403 static bool CanHandleRect(const SkIRect& bounds) {
404 #ifdef FORCE_RLE
405 return false;
406 #endif
407 int width = bounds.width();
408 int64_t rb = SkAlign4(width);
409 // use 64bits to detect overflow
410 int64_t storage = rb * bounds.height();
411
412 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
413 (storage <= MaskSuperBlitter::kMAX_STORAGE);
414 }
415
416 private:
417 enum {
418 #ifdef FORCE_SUPERMASK
419 kMAX_WIDTH = 2048,
420 kMAX_STORAGE = 1024 * 1024 * 2
421 #else
422 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
423 kMAX_STORAGE = 1024
424 #endif
425 };
426
427 SkMask fMask;
428 SkIRect fClipRect;
429 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
430 // perform a test to see if stopAlpha != 0
431 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
432 };
433
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip,bool isInverse)434 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
435 bool isInverse)
436 : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
437 {
438 SkASSERT(CanHandleRect(ir));
439 SkASSERT(!isInverse);
440
441 fMask.fImage = (uint8_t*)fStorage;
442 fMask.fBounds = ir;
443 fMask.fRowBytes = ir.width();
444 fMask.fFormat = SkMask::kA8_Format;
445
446 fClipRect = ir;
447 if (!fClipRect.intersect(clip.getBounds())) {
448 SkASSERT(0);
449 fClipRect.setEmpty();
450 }
451
452 // For valgrind, write 1 extra byte at the end so we don't read
453 // uninitialized memory. See comment in add_aa_span and fStorage[].
454 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
455 }
456
add_aa_span(uint8_t * alpha,U8CPU startAlpha)457 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
458 /* I should be able to just add alpha[x] + startAlpha.
459 However, if the trailing edge of the previous span and the leading
460 edge of the current span round to the same super-sampled x value,
461 I might overflow to 256 with this add, hence the funny subtract.
462 */
463 unsigned tmp = *alpha + startAlpha;
464 SkASSERT(tmp <= 256);
465 *alpha = SkToU8(tmp - (tmp >> 8));
466 }
467
quadplicate_byte(U8CPU value)468 static inline uint32_t quadplicate_byte(U8CPU value) {
469 uint32_t pair = (value << 8) | value;
470 return (pair << 16) | pair;
471 }
472
473 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
474 // only ever call us with at most enough to hit 256 (never larger), so it is
475 // enough to just subtract the high-bit. Actually clamping with a branch would
476 // be slower (e.g. if (tmp > 255) tmp = 255;)
477 //
saturated_add(uint8_t * ptr,U8CPU add)478 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
479 unsigned tmp = *ptr + add;
480 SkASSERT(tmp <= 256);
481 *ptr = SkToU8(tmp - (tmp >> 8));
482 }
483
484 // minimum count before we want to setup an inner loop, adding 4-at-a-time
485 #define MIN_COUNT_FOR_QUAD_LOOP 16
486
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)487 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
488 U8CPU stopAlpha, U8CPU maxValue) {
489 SkASSERT(middleCount >= 0);
490
491 saturated_add(alpha, startAlpha);
492 alpha += 1;
493
494 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
495 // loop until we're quad-byte aligned
496 while (SkTCast<intptr_t>(alpha) & 0x3) {
497 alpha[0] = SkToU8(alpha[0] + maxValue);
498 alpha += 1;
499 middleCount -= 1;
500 }
501
502 int bigCount = middleCount >> 2;
503 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
504 uint32_t qval = quadplicate_byte(maxValue);
505 do {
506 *qptr++ += qval;
507 } while (--bigCount > 0);
508
509 middleCount &= 3;
510 alpha = reinterpret_cast<uint8_t*> (qptr);
511 // fall through to the following while-loop
512 }
513
514 while (--middleCount >= 0) {
515 alpha[0] = SkToU8(alpha[0] + maxValue);
516 alpha += 1;
517 }
518
519 // potentially this can be off the end of our "legal" alpha values, but that
520 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
521 // every time (slow), we just do it, and ensure that we've allocated extra space
522 // (see the + 1 comment in fStorage[]
523 saturated_add(alpha, stopAlpha);
524 }
525
blitH(int x,int y,int width)526 void MaskSuperBlitter::blitH(int x, int y, int width) {
527 int iy = (y >> SHIFT);
528
529 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
530 iy -= fMask.fBounds.fTop; // make it relative to 0
531
532 // This should never happen, but it does. Until the true cause is
533 // discovered, let's skip this span instead of crashing.
534 // See http://crbug.com/17569.
535 if (iy < 0) {
536 return;
537 }
538
539 #ifdef SK_DEBUG
540 {
541 int ix = x >> SHIFT;
542 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
543 }
544 #endif
545
546 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
547
548 // hack, until I figure out why my cubics (I think) go beyond the bounds
549 if (x < 0) {
550 width += x;
551 x = 0;
552 }
553
554 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
555
556 int start = x;
557 int stop = x + width;
558
559 SkASSERT(start >= 0 && stop > start);
560 int fb = start & MASK;
561 int fe = stop & MASK;
562 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
563
564
565 if (n < 0) {
566 SkASSERT(row >= fMask.fImage);
567 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
568 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
569 } else {
570 fb = SCALE - fb;
571 SkASSERT(row >= fMask.fImage);
572 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
573 add_aa_span(row, coverage_to_partial_alpha(fb),
574 n, coverage_to_partial_alpha(fe),
575 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
576 }
577
578 #ifdef SK_DEBUG
579 fCurrX = x + width;
580 #endif
581 }
582
583 ///////////////////////////////////////////////////////////////////////////////
584
fitsInsideLimit(const SkRect & r,SkScalar max)585 static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
586 const SkScalar min = -max;
587 return r.fLeft > min && r.fTop > min &&
588 r.fRight < max && r.fBottom < max;
589 }
590
overflows_short_shift(int value,int shift)591 static int overflows_short_shift(int value, int shift) {
592 const int s = 16 + shift;
593 return (SkLeftShift(value, s) >> s) - value;
594 }
595
596 /**
597 Would any of the coordinates of this rectangle not fit in a short,
598 when left-shifted by shift?
599 */
rect_overflows_short_shift(SkIRect rect,int shift)600 static int rect_overflows_short_shift(SkIRect rect, int shift) {
601 SkASSERT(!overflows_short_shift(8191, SHIFT));
602 SkASSERT(overflows_short_shift(8192, SHIFT));
603 SkASSERT(!overflows_short_shift(32767, 0));
604 SkASSERT(overflows_short_shift(32768, 0));
605
606 // Since we expect these to succeed, we bit-or together
607 // for a tiny extra bit of speed.
608 return overflows_short_shift(rect.fLeft, SHIFT) |
609 overflows_short_shift(rect.fRight, SHIFT) |
610 overflows_short_shift(rect.fTop, SHIFT) |
611 overflows_short_shift(rect.fBottom, SHIFT);
612 }
613
safeRoundOut(const SkRect & src,SkIRect * dst,int32_t maxInt)614 static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
615 const SkScalar maxScalar = SkIntToScalar(maxInt);
616
617 if (fitsInsideLimit(src, maxScalar)) {
618 src.roundOut(dst);
619 return true;
620 }
621 return false;
622 }
623
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE)624 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
625 SkBlitter* blitter, bool forceRLE) {
626 if (origClip.isEmpty()) {
627 return;
628 }
629
630 const bool isInverse = path.isInverseFillType();
631 SkIRect ir;
632
633 if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
634 #if 0
635 const SkRect& r = path.getBounds();
636 SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
637 #endif
638 return;
639 }
640 if (ir.isEmpty()) {
641 if (isInverse) {
642 blitter->blitRegion(origClip);
643 }
644 return;
645 }
646
647 // If the intersection of the path bounds and the clip bounds
648 // will overflow 32767 when << by SHIFT, we can't supersample,
649 // so draw without antialiasing.
650 SkIRect clippedIR;
651 if (isInverse) {
652 // If the path is an inverse fill, it's going to fill the entire
653 // clip, and we care whether the entire clip exceeds our limits.
654 clippedIR = origClip.getBounds();
655 } else {
656 if (!clippedIR.intersect(ir, origClip.getBounds())) {
657 return;
658 }
659 }
660 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
661 SkScan::FillPath(path, origClip, blitter);
662 return;
663 }
664
665 // Our antialiasing can't handle a clip larger than 32767, so we restrict
666 // the clip to that limit here. (the runs[] uses int16_t for its index).
667 //
668 // A more general solution (one that could also eliminate the need to
669 // disable aa based on ir bounds (see overflows_short_shift) would be
670 // to tile the clip/target...
671 SkRegion tmpClipStorage;
672 const SkRegion* clipRgn = &origClip;
673 {
674 static const int32_t kMaxClipCoord = 32767;
675 const SkIRect& bounds = origClip.getBounds();
676 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
677 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
678 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
679 clipRgn = &tmpClipStorage;
680 }
681 }
682 // for here down, use clipRgn, not origClip
683
684 SkScanClipper clipper(blitter, clipRgn, ir);
685 const SkIRect* clipRect = clipper.getClipRect();
686
687 if (clipper.getBlitter() == nullptr) { // clipped out
688 if (isInverse) {
689 blitter->blitRegion(*clipRgn);
690 }
691 return;
692 }
693
694 SkASSERT(clipper.getClipRect() == nullptr ||
695 *clipper.getClipRect() == clipRgn->getBounds());
696
697 // now use the (possibly wrapped) blitter
698 blitter = clipper.getBlitter();
699
700 if (isInverse) {
701 sk_blit_above(blitter, ir, *clipRgn);
702 }
703
704 SkIRect superRect, *superClipRect = nullptr;
705
706 if (clipRect) {
707 superRect.set(SkLeftShift(clipRect->fLeft, SHIFT),
708 SkLeftShift(clipRect->fTop, SHIFT),
709 SkLeftShift(clipRect->fRight, SHIFT),
710 SkLeftShift(clipRect->fBottom, SHIFT));
711 superClipRect = &superRect;
712 }
713
714 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
715
716 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
717 // if we're an inverse filltype
718 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
719 MaskSuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
720 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
721 sk_fill_path(path, clipRgn->getBounds(), &superBlit, ir.fTop, ir.fBottom, SHIFT,
722 superClipRect == nullptr);
723 } else {
724 SuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
725 sk_fill_path(path, clipRgn->getBounds(), &superBlit, ir.fTop, ir.fBottom, SHIFT,
726 superClipRect == nullptr);
727 }
728
729 if (isInverse) {
730 sk_blit_below(blitter, ir, *clipRgn);
731 }
732 }
733
734 ///////////////////////////////////////////////////////////////////////////////
735
736 #include "SkRasterClip.h"
737
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)738 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
739 SkBlitter* blitter) {
740 if (clip.isEmpty()) {
741 return;
742 }
743
744 if (clip.isBW()) {
745 FillPath(path, clip.bwRgn(), blitter);
746 } else {
747 SkRegion tmp;
748 SkAAClipBlitter aaBlitter;
749
750 tmp.setRect(clip.getBounds());
751 aaBlitter.init(blitter, &clip.aaRgn());
752 SkScan::FillPath(path, tmp, &aaBlitter);
753 }
754 }
755
suitableForAAA(const SkPath & path)756 static bool suitableForAAA(const SkPath& path) {
757 if (gSkForceAnalyticAA.load()) {
758 return true;
759 }
760 const SkRect& bounds = path.getBounds();
761 // When the path have so many points compared to the size of its bounds/resolution,
762 // it indicates that the path is not quite smooth in the current resolution:
763 // the expected number of turning points in every pixel row/column is significantly greater than
764 // zero. Hence Aanlytic AA is not likely to produce visible quality improvements, and Analytic
765 // AA might be slower than supersampling.
766 return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
767 }
768
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)769 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
770 SkBlitter* blitter) {
771 // Do not use AAA if path is too complicated:
772 // there won't be any speedup or significant visual improvement.
773 if (gSkUseAnalyticAA.load() && suitableForAAA(path)) {
774 SkScan::AAAFillPath(path, clip, blitter);
775 return;
776 }
777
778 if (clip.isEmpty()) {
779 return;
780 }
781
782 if (clip.isBW()) {
783 AntiFillPath(path, clip.bwRgn(), blitter);
784 } else {
785 SkRegion tmp;
786 SkAAClipBlitter aaBlitter;
787
788 tmp.setRect(clip.getBounds());
789 aaBlitter.init(blitter, &clip.aaRgn());
790 SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
791 }
792 }
793