1
2 /*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "SkScanPriv.h"
11 #include "SkPath.h"
12 #include "SkMatrix.h"
13 #include "SkBlitter.h"
14 #include "SkRegion.h"
15 #include "SkAntiRun.h"
16
17 #define SHIFT 2
18 #define SCALE (1 << SHIFT)
19 #define MASK (SCALE - 1)
20
21 /** @file
22 We have two techniques for capturing the output of the supersampler:
23 - SUPERMASK, which records a large mask-bitmap
24 this is often faster for small, complex objects
25 - RLE, which records a rle-encoded scanline
26 this is often faster for large objects with big spans
27
28 These blitters use two coordinate systems:
29 - destination coordinates, scale equal to the output - often
30 abbreviated with 'i' or 'I' in variable names
31 - supersampled coordinates, scale equal to the output * SCALE
32
33 Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as
34 they were before the fix that unified the output of the RLE and MASK
35 supersamplers.
36 */
37
38 //#define FORCE_SUPERMASK
39 //#define FORCE_RLE
40 //#define SK_USE_LEGACY_AA_COVERAGE
41
42 ///////////////////////////////////////////////////////////////////////////////
43
44 /// Base class for a single-pass supersampled blitter.
45 class BaseSuperBlitter : public SkBlitter {
46 public:
47 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
48 const SkRegion& clip);
49
50 /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])51 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
52 const int16_t runs[]) SK_OVERRIDE {
53 SkDEBUGFAIL("How did I get here?");
54 }
55 /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)56 virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE {
57 SkDEBUGFAIL("How did I get here?");
58 }
59
60 protected:
61 SkBlitter* fRealBlitter;
62 /// Current y coordinate, in destination coordinates.
63 int fCurrIY;
64 /// Widest row of region to be blitted, in destination coordinates.
65 int fWidth;
66 /// Leftmost x coordinate in any row, in destination coordinates.
67 int fLeft;
68 /// Leftmost x coordinate in any row, in supersampled coordinates.
69 int fSuperLeft;
70
71 SkDEBUGCODE(int fCurrX;)
72 /// Current y coordinate in supersampled coordinates.
73 int fCurrY;
74 /// Initial y coordinate (top of bounds).
75 int fTop;
76 };
77
BaseSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip)78 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
79 const SkRegion& clip) {
80 fRealBlitter = realBlitter;
81
82 /*
83 * We use the clip bounds instead of the ir, since we may be asked to
84 * draw outside of the rect if we're a inverse filltype
85 */
86 const int left = clip.getBounds().fLeft;
87 const int right = clip.getBounds().fRight;
88
89 fLeft = left;
90 fSuperLeft = left << SHIFT;
91 fWidth = right - left;
92 #if 0
93 fCurrIY = -1;
94 fCurrY = -1;
95 #else
96 fTop = ir.fTop;
97 fCurrIY = ir.fTop - 1;
98 fCurrY = (ir.fTop << SHIFT) - 1;
99 #endif
100 SkDEBUGCODE(fCurrX = -1;)
101 }
102
103 /// Run-length-encoded supersampling antialiased blitter.
104 class SuperBlitter : public BaseSuperBlitter {
105 public:
106 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
107 const SkRegion& clip);
108
~SuperBlitter()109 virtual ~SuperBlitter() {
110 this->flush();
111 sk_free(fRuns.fRuns);
112 }
113
114 /// Once fRuns contains a complete supersampled row, flush() blits
115 /// it out through the wrapped blitter.
116 void flush();
117
118 /// Blits a row of pixels, with location and width specified
119 /// in supersampled coordinates.
120 virtual void blitH(int x, int y, int width) SK_OVERRIDE;
121 /// Blits a rectangle of pixels, with location and size specified
122 /// in supersampled coordinates.
123 virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE;
124
125 private:
126 SkAlphaRuns fRuns;
127 int fOffsetX;
128 };
129
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip)130 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
131 const SkRegion& clip)
132 : BaseSuperBlitter(realBlitter, ir, clip) {
133 const int width = fWidth;
134
135 // extra one to store the zero at the end
136 fRuns.fRuns = (int16_t*)sk_malloc_throw((width + 1 + (width + 2)/2) * sizeof(int16_t));
137 fRuns.fAlpha = (uint8_t*)(fRuns.fRuns + width + 1);
138 fRuns.reset(width);
139
140 fOffsetX = 0;
141 }
142
flush()143 void SuperBlitter::flush() {
144 if (fCurrIY >= fTop) {
145 if (!fRuns.empty()) {
146 // SkDEBUGCODE(fRuns.dump();)
147 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
148 fRuns.reset(fWidth);
149 fOffsetX = 0;
150 }
151 fCurrIY = fTop - 1;
152 SkDEBUGCODE(fCurrX = -1;)
153 }
154 }
155
156 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
157 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
158 to produce a final value in [0, 255] and handles clamping 256->255
159 itself, with the same (alpha - (alpha >> 8)) correction as
160 coverage_to_exact_alpha().
161 */
coverage_to_partial_alpha(int aa)162 static inline int coverage_to_partial_alpha(int aa) {
163 aa <<= 8 - 2*SHIFT;
164 #ifdef SK_USE_LEGACY_AA_COVERAGE
165 aa -= aa >> (8 - SHIFT - 1);
166 #endif
167 return aa;
168 }
169
170 /** coverage_to_exact_alpha() is being used by our blitter, which wants
171 a final value in [0, 255].
172 */
coverage_to_exact_alpha(int aa)173 static inline int coverage_to_exact_alpha(int aa) {
174 int alpha = (256 >> SHIFT) * aa;
175 // clamp 256->255
176 return alpha - (alpha >> 8);
177 }
178
blitH(int x,int y,int width)179 void SuperBlitter::blitH(int x, int y, int width) {
180 SkASSERT(width > 0);
181
182 int iy = y >> SHIFT;
183 SkASSERT(iy >= fCurrIY);
184
185 x -= fSuperLeft;
186 // hack, until I figure out why my cubics (I think) go beyond the bounds
187 if (x < 0) {
188 width += x;
189 x = 0;
190 }
191
192 #ifdef SK_DEBUG
193 SkASSERT(y != fCurrY || x >= fCurrX);
194 #endif
195 SkASSERT(y >= fCurrY);
196 if (fCurrY != y) {
197 fOffsetX = 0;
198 fCurrY = y;
199 }
200
201 if (iy != fCurrIY) { // new scanline
202 this->flush();
203 fCurrIY = iy;
204 }
205
206 int start = x;
207 int stop = x + width;
208
209 SkASSERT(start >= 0 && stop > start);
210 // integer-pixel-aligned ends of blit, rounded out
211 int fb = start & MASK;
212 int fe = stop & MASK;
213 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
214
215 if (n < 0) {
216 fb = fe - fb;
217 n = 0;
218 fe = 0;
219 } else {
220 if (fb == 0) {
221 n += 1;
222 } else {
223 fb = SCALE - fb;
224 }
225 }
226
227 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
228 n, coverage_to_partial_alpha(fe),
229 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
230 fOffsetX);
231
232 #ifdef SK_DEBUG
233 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
234 fCurrX = x + width;
235 #endif
236 }
237
238 #if 0 // UNUSED
239 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
240 int n, U8CPU riteA) {
241 SkASSERT(leftA <= 0xFF);
242 SkASSERT(riteA <= 0xFF);
243
244 int16_t* run = runs.fRuns;
245 uint8_t* aa = runs.fAlpha;
246
247 if (ileft > 0) {
248 run[0] = ileft;
249 aa[0] = 0;
250 run += ileft;
251 aa += ileft;
252 }
253
254 SkASSERT(leftA < 0xFF);
255 if (leftA > 0) {
256 *run++ = 1;
257 *aa++ = leftA;
258 }
259
260 if (n > 0) {
261 run[0] = n;
262 aa[0] = 0xFF;
263 run += n;
264 aa += n;
265 }
266
267 SkASSERT(riteA < 0xFF);
268 if (riteA > 0) {
269 *run++ = 1;
270 *aa++ = riteA;
271 }
272 run[0] = 0;
273 }
274 #endif
275
blitRect(int x,int y,int width,int height)276 void SuperBlitter::blitRect(int x, int y, int width, int height) {
277 SkASSERT(width > 0);
278 SkASSERT(height > 0);
279
280 // blit leading rows
281 while ((y & MASK)) {
282 this->blitH(x, y++, width);
283 if (--height <= 0) {
284 return;
285 }
286 }
287 SkASSERT(height > 0);
288
289 // Since this is a rect, instead of blitting supersampled rows one at a
290 // time and then resolving to the destination canvas, we can blit
291 // directly to the destintion canvas one row per SCALE supersampled rows.
292 int start_y = y >> SHIFT;
293 int stop_y = (y + height) >> SHIFT;
294 int count = stop_y - start_y;
295 if (count > 0) {
296 y += count << SHIFT;
297 height -= count << SHIFT;
298
299 // save original X for our tail blitH() loop at the bottom
300 int origX = x;
301
302 x -= fSuperLeft;
303 // hack, until I figure out why my cubics (I think) go beyond the bounds
304 if (x < 0) {
305 width += x;
306 x = 0;
307 }
308
309 // There is always a left column, a middle, and a right column.
310 // ileft is the destination x of the first pixel of the entire rect.
311 // xleft is (SCALE - # of covered supersampled pixels) in that
312 // destination pixel.
313 int ileft = x >> SHIFT;
314 int xleft = x & MASK;
315 // irite is the destination x of the last pixel of the OPAQUE section.
316 // xrite is the number of supersampled pixels extending beyond irite;
317 // xrite/SCALE should give us alpha.
318 int irite = (x + width) >> SHIFT;
319 int xrite = (x + width) & MASK;
320 if (!xrite) {
321 xrite = SCALE;
322 irite--;
323 }
324
325 // Need to call flush() to clean up pending draws before we
326 // even consider blitV(), since otherwise it can look nonmonotonic.
327 SkASSERT(start_y > fCurrIY);
328 this->flush();
329
330 int n = irite - ileft - 1;
331 if (n < 0) {
332 // If n < 0, we'll only have a single partially-transparent column
333 // of pixels to render.
334 xleft = xrite - xleft;
335 SkASSERT(xleft <= SCALE);
336 SkASSERT(xleft > 0);
337 xrite = 0;
338 fRealBlitter->blitV(ileft + fLeft, start_y, count,
339 coverage_to_exact_alpha(xleft));
340 } else {
341 // With n = 0, we have two possibly-transparent columns of pixels
342 // to render; with n > 0, we have opaque columns between them.
343
344 xleft = SCALE - xleft;
345
346 // Using coverage_to_exact_alpha is not consistent with blitH()
347 const int coverageL = coverage_to_exact_alpha(xleft);
348 const int coverageR = coverage_to_exact_alpha(xrite);
349
350 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
351 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
352
353 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
354 coverageL, coverageR);
355 }
356
357 // preamble for our next call to blitH()
358 fCurrIY = stop_y - 1;
359 fOffsetX = 0;
360 fCurrY = y - 1;
361 fRuns.reset(fWidth);
362 x = origX;
363 }
364
365 // catch any remaining few rows
366 SkASSERT(height <= MASK);
367 while (--height >= 0) {
368 this->blitH(x, y++, width);
369 }
370 }
371
372 ///////////////////////////////////////////////////////////////////////////////
373
374 /// Masked supersampling antialiased blitter.
375 class MaskSuperBlitter : public BaseSuperBlitter {
376 public:
377 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
378 const SkRegion& clip);
~MaskSuperBlitter()379 virtual ~MaskSuperBlitter() {
380 fRealBlitter->blitMask(fMask, fClipRect);
381 }
382
383 virtual void blitH(int x, int y, int width) SK_OVERRIDE;
384
CanHandleRect(const SkIRect & bounds)385 static bool CanHandleRect(const SkIRect& bounds) {
386 #ifdef FORCE_RLE
387 return false;
388 #endif
389 int width = bounds.width();
390 int64_t rb = SkAlign4(width);
391 // use 64bits to detect overflow
392 int64_t storage = rb * bounds.height();
393
394 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
395 (storage <= MaskSuperBlitter::kMAX_STORAGE);
396 }
397
398 private:
399 enum {
400 #ifdef FORCE_SUPERMASK
401 kMAX_WIDTH = 2048,
402 kMAX_STORAGE = 1024 * 1024 * 2
403 #else
404 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
405 kMAX_STORAGE = 1024
406 #endif
407 };
408
409 SkMask fMask;
410 SkIRect fClipRect;
411 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
412 // perform a test to see if stopAlpha != 0
413 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
414 };
415
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip)416 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
417 const SkRegion& clip)
418 : BaseSuperBlitter(realBlitter, ir, clip) {
419 SkASSERT(CanHandleRect(ir));
420
421 fMask.fImage = (uint8_t*)fStorage;
422 fMask.fBounds = ir;
423 fMask.fRowBytes = ir.width();
424 fMask.fFormat = SkMask::kA8_Format;
425
426 fClipRect = ir;
427 fClipRect.intersect(clip.getBounds());
428
429 // For valgrind, write 1 extra byte at the end so we don't read
430 // uninitialized memory. See comment in add_aa_span and fStorage[].
431 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
432 }
433
add_aa_span(uint8_t * alpha,U8CPU startAlpha)434 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
435 /* I should be able to just add alpha[x] + startAlpha.
436 However, if the trailing edge of the previous span and the leading
437 edge of the current span round to the same super-sampled x value,
438 I might overflow to 256 with this add, hence the funny subtract.
439 */
440 unsigned tmp = *alpha + startAlpha;
441 SkASSERT(tmp <= 256);
442 *alpha = SkToU8(tmp - (tmp >> 8));
443 }
444
quadplicate_byte(U8CPU value)445 static inline uint32_t quadplicate_byte(U8CPU value) {
446 uint32_t pair = (value << 8) | value;
447 return (pair << 16) | pair;
448 }
449
450 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
451 // only ever call us with at most enough to hit 256 (never larger), so it is
452 // enough to just subtract the high-bit. Actually clamping with a branch would
453 // be slower (e.g. if (tmp > 255) tmp = 255;)
454 //
saturated_add(uint8_t * ptr,U8CPU add)455 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
456 unsigned tmp = *ptr + add;
457 SkASSERT(tmp <= 256);
458 *ptr = SkToU8(tmp - (tmp >> 8));
459 }
460
461 // minimum count before we want to setup an inner loop, adding 4-at-a-time
462 #define MIN_COUNT_FOR_QUAD_LOOP 16
463
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)464 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
465 U8CPU stopAlpha, U8CPU maxValue) {
466 SkASSERT(middleCount >= 0);
467
468 saturated_add(alpha, startAlpha);
469 alpha += 1;
470
471 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
472 // loop until we're quad-byte aligned
473 while (SkTCast<intptr_t>(alpha) & 0x3) {
474 alpha[0] = SkToU8(alpha[0] + maxValue);
475 alpha += 1;
476 middleCount -= 1;
477 }
478
479 int bigCount = middleCount >> 2;
480 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
481 uint32_t qval = quadplicate_byte(maxValue);
482 do {
483 *qptr++ += qval;
484 } while (--bigCount > 0);
485
486 middleCount &= 3;
487 alpha = reinterpret_cast<uint8_t*> (qptr);
488 // fall through to the following while-loop
489 }
490
491 while (--middleCount >= 0) {
492 alpha[0] = SkToU8(alpha[0] + maxValue);
493 alpha += 1;
494 }
495
496 // potentially this can be off the end of our "legal" alpha values, but that
497 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
498 // every time (slow), we just do it, and ensure that we've allocated extra space
499 // (see the + 1 comment in fStorage[]
500 saturated_add(alpha, stopAlpha);
501 }
502
blitH(int x,int y,int width)503 void MaskSuperBlitter::blitH(int x, int y, int width) {
504 int iy = (y >> SHIFT);
505
506 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
507 iy -= fMask.fBounds.fTop; // make it relative to 0
508
509 // This should never happen, but it does. Until the true cause is
510 // discovered, let's skip this span instead of crashing.
511 // See http://crbug.com/17569.
512 if (iy < 0) {
513 return;
514 }
515
516 #ifdef SK_DEBUG
517 {
518 int ix = x >> SHIFT;
519 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
520 }
521 #endif
522
523 x -= (fMask.fBounds.fLeft << SHIFT);
524
525 // hack, until I figure out why my cubics (I think) go beyond the bounds
526 if (x < 0) {
527 width += x;
528 x = 0;
529 }
530
531 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
532
533 int start = x;
534 int stop = x + width;
535
536 SkASSERT(start >= 0 && stop > start);
537 int fb = start & MASK;
538 int fe = stop & MASK;
539 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
540
541
542 if (n < 0) {
543 SkASSERT(row >= fMask.fImage);
544 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
545 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
546 } else {
547 fb = SCALE - fb;
548 SkASSERT(row >= fMask.fImage);
549 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
550 add_aa_span(row, coverage_to_partial_alpha(fb),
551 n, coverage_to_partial_alpha(fe),
552 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
553 }
554
555 #ifdef SK_DEBUG
556 fCurrX = x + width;
557 #endif
558 }
559
560 ///////////////////////////////////////////////////////////////////////////////
561
fitsInsideLimit(const SkRect & r,SkScalar max)562 static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
563 const SkScalar min = -max;
564 return r.fLeft > min && r.fTop > min &&
565 r.fRight < max && r.fBottom < max;
566 }
567
overflows_short_shift(int value,int shift)568 static int overflows_short_shift(int value, int shift) {
569 const int s = 16 + shift;
570 return (value << s >> s) - value;
571 }
572
573 /**
574 Would any of the coordinates of this rectangle not fit in a short,
575 when left-shifted by shift?
576 */
rect_overflows_short_shift(SkIRect rect,int shift)577 static int rect_overflows_short_shift(SkIRect rect, int shift) {
578 SkASSERT(!overflows_short_shift(8191, SHIFT));
579 SkASSERT(overflows_short_shift(8192, SHIFT));
580 SkASSERT(!overflows_short_shift(32767, 0));
581 SkASSERT(overflows_short_shift(32768, 0));
582
583 // Since we expect these to succeed, we bit-or together
584 // for a tiny extra bit of speed.
585 return overflows_short_shift(rect.fLeft, SHIFT) |
586 overflows_short_shift(rect.fRight, SHIFT) |
587 overflows_short_shift(rect.fTop, SHIFT) |
588 overflows_short_shift(rect.fBottom, SHIFT);
589 }
590
safeRoundOut(const SkRect & src,SkIRect * dst,int32_t maxInt)591 static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
592 #ifdef SK_SCALAR_IS_FIXED
593 // the max-int (shifted) is exactly what we want to compare against, to know
594 // if we can survive shifting our fixed-point coordinates
595 const SkFixed maxScalar = maxInt;
596 #else
597 const SkScalar maxScalar = SkIntToScalar(maxInt);
598 #endif
599 if (fitsInsideLimit(src, maxScalar)) {
600 src.roundOut(dst);
601 return true;
602 }
603 return false;
604 }
605
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE)606 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
607 SkBlitter* blitter, bool forceRLE) {
608 if (origClip.isEmpty()) {
609 return;
610 }
611
612 SkIRect ir;
613
614 if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
615 #if 0
616 const SkRect& r = path.getBounds();
617 SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
618 #endif
619 return;
620 }
621 if (ir.isEmpty()) {
622 if (path.isInverseFillType()) {
623 blitter->blitRegion(origClip);
624 }
625 return;
626 }
627
628 // If the intersection of the path bounds and the clip bounds
629 // will overflow 32767 when << by SHIFT, we can't supersample,
630 // so draw without antialiasing.
631 SkIRect clippedIR;
632 if (path.isInverseFillType()) {
633 // If the path is an inverse fill, it's going to fill the entire
634 // clip, and we care whether the entire clip exceeds our limits.
635 clippedIR = origClip.getBounds();
636 } else {
637 if (!clippedIR.intersect(ir, origClip.getBounds())) {
638 return;
639 }
640 }
641 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
642 SkScan::FillPath(path, origClip, blitter);
643 return;
644 }
645
646 // Our antialiasing can't handle a clip larger than 32767, so we restrict
647 // the clip to that limit here. (the runs[] uses int16_t for its index).
648 //
649 // A more general solution (one that could also eliminate the need to
650 // disable aa based on ir bounds (see overflows_short_shift) would be
651 // to tile the clip/target...
652 SkRegion tmpClipStorage;
653 const SkRegion* clipRgn = &origClip;
654 {
655 static const int32_t kMaxClipCoord = 32767;
656 const SkIRect& bounds = origClip.getBounds();
657 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
658 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
659 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
660 clipRgn = &tmpClipStorage;
661 }
662 }
663 // for here down, use clipRgn, not origClip
664
665 SkScanClipper clipper(blitter, clipRgn, ir);
666 const SkIRect* clipRect = clipper.getClipRect();
667
668 if (clipper.getBlitter() == NULL) { // clipped out
669 if (path.isInverseFillType()) {
670 blitter->blitRegion(*clipRgn);
671 }
672 return;
673 }
674
675 // now use the (possibly wrapped) blitter
676 blitter = clipper.getBlitter();
677
678 if (path.isInverseFillType()) {
679 sk_blit_above(blitter, ir, *clipRgn);
680 }
681
682 SkIRect superRect, *superClipRect = NULL;
683
684 if (clipRect) {
685 superRect.set( clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT,
686 clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT);
687 superClipRect = &superRect;
688 }
689
690 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
691
692 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
693 // if we're an inverse filltype
694 if (!path.isInverseFillType() && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
695 MaskSuperBlitter superBlit(blitter, ir, *clipRgn);
696 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
697 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
698 } else {
699 SuperBlitter superBlit(blitter, ir, *clipRgn);
700 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
701 }
702
703 if (path.isInverseFillType()) {
704 sk_blit_below(blitter, ir, *clipRgn);
705 }
706 }
707
708 ///////////////////////////////////////////////////////////////////////////////
709
710 #include "SkRasterClip.h"
711
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)712 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
713 SkBlitter* blitter) {
714 if (clip.isEmpty()) {
715 return;
716 }
717
718 if (clip.isBW()) {
719 FillPath(path, clip.bwRgn(), blitter);
720 } else {
721 SkRegion tmp;
722 SkAAClipBlitter aaBlitter;
723
724 tmp.setRect(clip.getBounds());
725 aaBlitter.init(blitter, &clip.aaRgn());
726 SkScan::FillPath(path, tmp, &aaBlitter);
727 }
728 }
729
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)730 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
731 SkBlitter* blitter) {
732 if (clip.isEmpty()) {
733 return;
734 }
735
736 if (clip.isBW()) {
737 AntiFillPath(path, clip.bwRgn(), blitter);
738 } else {
739 SkRegion tmp;
740 SkAAClipBlitter aaBlitter;
741
742 tmp.setRect(clip.getBounds());
743 aaBlitter.init(blitter, &clip.aaRgn());
744 SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
745 }
746 }
747