• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2006 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/core/SkScanPriv.h"
9 
10 #include "include/core/SkMatrix.h"
11 #include "include/core/SkPath.h"
12 #include "include/core/SkRegion.h"
13 #include "include/private/SkTo.h"
14 #include "src/core/SkAntiRun.h"
15 #include "src/core/SkBlitter.h"
16 #include "src/core/SkPathPriv.h"
17 
18 #define SHIFT   SK_SUPERSAMPLE_SHIFT
19 #define SCALE   (1 << SHIFT)
20 #define MASK    (SCALE - 1)
21 
22 /** @file
23     We have two techniques for capturing the output of the supersampler:
24     - SUPERMASK, which records a large mask-bitmap
25         this is often faster for small, complex objects
26     - RLE, which records a rle-encoded scanline
27         this is often faster for large objects with big spans
28 
29     These blitters use two coordinate systems:
30     - destination coordinates, scale equal to the output - often
31         abbreviated with 'i' or 'I' in variable names
32     - supersampled coordinates, scale equal to the output * SCALE
33  */
34 
35 //#define FORCE_SUPERMASK
36 //#define FORCE_RLE
37 
38 ///////////////////////////////////////////////////////////////////////////////
39 
40 /// Base class for a single-pass supersampled blitter.
41 class BaseSuperBlitter : public SkBlitter {
42 public:
43     BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
44                      const SkIRect& clipBounds, bool isInverse);
45 
46     /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])47     virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
48                            const int16_t runs[]) override {
49         SkDEBUGFAIL("How did I get here?");
50     }
51     /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)52     void blitV(int x, int y, int height, SkAlpha alpha) override {
53         SkDEBUGFAIL("How did I get here?");
54     }
55 
56 protected:
57     SkBlitter*  fRealBlitter;
58     /// Current y coordinate, in destination coordinates.
59     int         fCurrIY;
60     /// Widest row of region to be blitted, in destination coordinates.
61     int         fWidth;
62     /// Leftmost x coordinate in any row, in destination coordinates.
63     int         fLeft;
64     /// Leftmost x coordinate in any row, in supersampled coordinates.
65     int         fSuperLeft;
66 
67     SkDEBUGCODE(int fCurrX;)
68     /// Current y coordinate in supersampled coordinates.
69     int fCurrY;
70     /// Initial y coordinate (top of bounds).
71     int fTop;
72 
73     SkIRect fSectBounds;
74 };
75 
BaseSuperBlitter(SkBlitter * realBlit,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)76 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
77                                    const SkIRect& clipBounds, bool isInverse) {
78     fRealBlitter = realBlit;
79 
80     SkIRect sectBounds;
81     if (isInverse) {
82         // We use the clip bounds instead of the ir, since we may be asked to
83         //draw outside of the rect when we're a inverse filltype
84         sectBounds = clipBounds;
85     } else {
86         if (!sectBounds.intersect(ir, clipBounds)) {
87             sectBounds.setEmpty();
88         }
89     }
90 
91     const int left = sectBounds.left();
92     const int right = sectBounds.right();
93 
94     fLeft = left;
95     fSuperLeft = SkLeftShift(left, SHIFT);
96     fWidth = right - left;
97     fTop = sectBounds.top();
98     fCurrIY = fTop - 1;
99     fCurrY = SkLeftShift(fTop, SHIFT) - 1;
100 
101     SkDEBUGCODE(fCurrX = -1;)
102 }
103 
104 /// Run-length-encoded supersampling antialiased blitter.
105 class SuperBlitter : public BaseSuperBlitter {
106 public:
107     SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
108                  bool isInverse);
109 
~SuperBlitter()110     ~SuperBlitter() override {
111         this->flush();
112     }
113 
114     /// Once fRuns contains a complete supersampled row, flush() blits
115     /// it out through the wrapped blitter.
116     void flush();
117 
118     /// Blits a row of pixels, with location and width specified
119     /// in supersampled coordinates.
120     void blitH(int x, int y, int width) override;
121     /// Blits a rectangle of pixels, with location and size specified
122     /// in supersampled coordinates.
123     void blitRect(int x, int y, int width, int height) override;
124 
125 private:
126     // The next three variables are used to track a circular buffer that
127     // contains the values used in SkAlphaRuns. These variables should only
128     // ever be updated in advanceRuns(), and fRuns should always point to
129     // a valid SkAlphaRuns...
130     int         fRunsToBuffer;
131     void*       fRunsBuffer;
132     int         fCurrentRun;
133     SkAlphaRuns fRuns;
134 
135     // extra one to store the zero at the end
getRunsSz() const136     int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
137 
138     // This function updates the fRuns variable to point to the next buffer space
139     // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
140     // and resets fRuns to point to an empty scanline.
advanceRuns()141     void advanceRuns() {
142         const size_t kRunsSz = this->getRunsSz();
143         fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
144         fRuns.fRuns = reinterpret_cast<int16_t*>(
145             reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
146         fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
147         fRuns.reset(fWidth);
148     }
149 
150     int         fOffsetX;
151 };
152 
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)153 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
154                            bool isInverse)
155         : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
156 {
157     fRunsToBuffer = realBlitter->requestRowsPreserved();
158     fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
159     fCurrentRun = -1;
160 
161     this->advanceRuns();
162 
163     fOffsetX = 0;
164 }
165 
flush()166 void SuperBlitter::flush() {
167     if (fCurrIY >= fTop) {
168 
169         SkASSERT(fCurrentRun < fRunsToBuffer);
170         if (!fRuns.empty()) {
171             // SkDEBUGCODE(fRuns.dump();)
172             fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
173             this->advanceRuns();
174             fOffsetX = 0;
175         }
176 
177         fCurrIY = fTop - 1;
178         SkDEBUGCODE(fCurrX = -1;)
179     }
180 }
181 
182 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
183     *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
184     to produce a final value in [0, 255] and handles clamping 256->255
185     itself, with the same (alpha - (alpha >> 8)) correction as
186     coverage_to_exact_alpha().
187 */
coverage_to_partial_alpha(int aa)188 static inline int coverage_to_partial_alpha(int aa) {
189     aa <<= 8 - 2*SHIFT;
190     return aa;
191 }
192 
193 /** coverage_to_exact_alpha() is being used by our blitter, which wants
194     a final value in [0, 255].
195 */
coverage_to_exact_alpha(int aa)196 static inline int coverage_to_exact_alpha(int aa) {
197     int alpha = (256 >> SHIFT) * aa;
198     // clamp 256->255
199     return alpha - (alpha >> 8);
200 }
201 
blitH(int x,int y,int width)202 void SuperBlitter::blitH(int x, int y, int width) {
203     SkASSERT(width > 0);
204 
205     int iy = y >> SHIFT;
206     SkASSERT(iy >= fCurrIY);
207 
208     x -= fSuperLeft;
209     // hack, until I figure out why my cubics (I think) go beyond the bounds
210     if (x < 0) {
211         width += x;
212         x = 0;
213     }
214 
215 #ifdef SK_DEBUG
216     SkASSERT(y != fCurrY || x >= fCurrX);
217 #endif
218     SkASSERT(y >= fCurrY);
219     if (fCurrY != y) {
220         fOffsetX = 0;
221         fCurrY = y;
222     }
223 
224     if (iy != fCurrIY) {  // new scanline
225         this->flush();
226         fCurrIY = iy;
227     }
228 
229     int start = x;
230     int stop = x + width;
231 
232     SkASSERT(start >= 0 && stop > start);
233     // integer-pixel-aligned ends of blit, rounded out
234     int fb = start & MASK;
235     int fe = stop & MASK;
236     int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
237 
238     if (n < 0) {
239         fb = fe - fb;
240         n = 0;
241         fe = 0;
242     } else {
243         if (fb == 0) {
244             n += 1;
245         } else {
246             fb = SCALE - fb;
247         }
248     }
249 
250     fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
251                          n, coverage_to_partial_alpha(fe),
252                          (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
253                          fOffsetX);
254 
255 #ifdef SK_DEBUG
256     fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
257     fCurrX = x + width;
258 #endif
259 }
260 
261 #if 0 // UNUSED
262 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
263                                int n, U8CPU riteA) {
264     SkASSERT(leftA <= 0xFF);
265     SkASSERT(riteA <= 0xFF);
266 
267     int16_t* run = runs.fRuns;
268     uint8_t* aa = runs.fAlpha;
269 
270     if (ileft > 0) {
271         run[0] = ileft;
272         aa[0] = 0;
273         run += ileft;
274         aa += ileft;
275     }
276 
277     SkASSERT(leftA < 0xFF);
278     if (leftA > 0) {
279         *run++ = 1;
280         *aa++ = leftA;
281     }
282 
283     if (n > 0) {
284         run[0] = n;
285         aa[0] = 0xFF;
286         run += n;
287         aa += n;
288     }
289 
290     SkASSERT(riteA < 0xFF);
291     if (riteA > 0) {
292         *run++ = 1;
293         *aa++ = riteA;
294     }
295     run[0] = 0;
296 }
297 #endif
298 
blitRect(int x,int y,int width,int height)299 void SuperBlitter::blitRect(int x, int y, int width, int height) {
300     SkASSERT(width > 0);
301     SkASSERT(height > 0);
302 
303     // blit leading rows
304     while ((y & MASK)) {
305         this->blitH(x, y++, width);
306         if (--height <= 0) {
307             return;
308         }
309     }
310     SkASSERT(height > 0);
311 
312     // Since this is a rect, instead of blitting supersampled rows one at a
313     // time and then resolving to the destination canvas, we can blit
314     // directly to the destintion canvas one row per SCALE supersampled rows.
315     int start_y = y >> SHIFT;
316     int stop_y = (y + height) >> SHIFT;
317     int count = stop_y - start_y;
318     if (count > 0) {
319         y += count << SHIFT;
320         height -= count << SHIFT;
321 
322         // save original X for our tail blitH() loop at the bottom
323         int origX = x;
324 
325         x -= fSuperLeft;
326         // hack, until I figure out why my cubics (I think) go beyond the bounds
327         if (x < 0) {
328             width += x;
329             x = 0;
330         }
331 
332         // There is always a left column, a middle, and a right column.
333         // ileft is the destination x of the first pixel of the entire rect.
334         // xleft is (SCALE - # of covered supersampled pixels) in that
335         // destination pixel.
336         int ileft = x >> SHIFT;
337         int xleft = x & MASK;
338         // irite is the destination x of the last pixel of the OPAQUE section.
339         // xrite is the number of supersampled pixels extending beyond irite;
340         // xrite/SCALE should give us alpha.
341         int irite = (x + width) >> SHIFT;
342         int xrite = (x + width) & MASK;
343         if (!xrite) {
344             xrite = SCALE;
345             irite--;
346         }
347 
348         // Need to call flush() to clean up pending draws before we
349         // even consider blitV(), since otherwise it can look nonmonotonic.
350         SkASSERT(start_y > fCurrIY);
351         this->flush();
352 
353         int n = irite - ileft - 1;
354         if (n < 0) {
355             // If n < 0, we'll only have a single partially-transparent column
356             // of pixels to render.
357             xleft = xrite - xleft;
358             SkASSERT(xleft <= SCALE);
359             SkASSERT(xleft > 0);
360             fRealBlitter->blitV(ileft + fLeft, start_y, count,
361                 coverage_to_exact_alpha(xleft));
362         } else {
363             // With n = 0, we have two possibly-transparent columns of pixels
364             // to render; with n > 0, we have opaque columns between them.
365 
366             xleft = SCALE - xleft;
367 
368             // Using coverage_to_exact_alpha is not consistent with blitH()
369             const int coverageL = coverage_to_exact_alpha(xleft);
370             const int coverageR = coverage_to_exact_alpha(xrite);
371 
372             SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
373             SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
374 
375             fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
376                                        coverageL, coverageR);
377         }
378 
379         // preamble for our next call to blitH()
380         fCurrIY = stop_y - 1;
381         fOffsetX = 0;
382         fCurrY = y - 1;
383         fRuns.reset(fWidth);
384         x = origX;
385     }
386 
387     // catch any remaining few rows
388     SkASSERT(height <= MASK);
389     while (--height >= 0) {
390         this->blitH(x, y++, width);
391     }
392 }
393 
394 ///////////////////////////////////////////////////////////////////////////////
395 
396 /// Masked supersampling antialiased blitter.
397 class MaskSuperBlitter : public BaseSuperBlitter {
398 public:
399     MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
~MaskSuperBlitter()400     ~MaskSuperBlitter() override {
401         fRealBlitter->blitMask(fMask, fClipRect);
402     }
403 
404     void blitH(int x, int y, int width) override;
405 
CanHandleRect(const SkIRect & bounds)406     static bool CanHandleRect(const SkIRect& bounds) {
407 #ifdef FORCE_RLE
408         return false;
409 #endif
410         int width = bounds.width();
411         int64_t rb = SkAlign4(width);
412         // use 64bits to detect overflow
413         int64_t storage = rb * bounds.height();
414 
415         return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
416                (storage <= MaskSuperBlitter::kMAX_STORAGE);
417     }
418 
419 private:
420     enum {
421 #ifdef FORCE_SUPERMASK
422         kMAX_WIDTH = 2048,
423         kMAX_STORAGE = 1024 * 1024 * 2
424 #else
425         kMAX_WIDTH = 32,    // so we don't try to do very wide things, where the RLE blitter would be faster
426         kMAX_STORAGE = 1024
427 #endif
428     };
429 
430     SkMask      fMask;
431     SkIRect     fClipRect;
432     // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
433     // perform a test to see if stopAlpha != 0
434     uint32_t    fStorage[(kMAX_STORAGE >> 2) + 1];
435 };
436 
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)437 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
438                                    const SkIRect& clipBounds, bool isInverse)
439     : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
440 {
441     SkASSERT(CanHandleRect(ir));
442     SkASSERT(!isInverse);
443 
444     fMask.fImage    = (uint8_t*)fStorage;
445     fMask.fBounds   = ir;
446     fMask.fRowBytes = ir.width();
447     fMask.fFormat   = SkMask::kA8_Format;
448 
449     fClipRect = ir;
450     if (!fClipRect.intersect(clipBounds)) {
451         SkASSERT(0);
452         fClipRect.setEmpty();
453     }
454 
455     // For valgrind, write 1 extra byte at the end so we don't read
456     // uninitialized memory. See comment in add_aa_span and fStorage[].
457     memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
458 }
459 
add_aa_span(uint8_t * alpha,U8CPU startAlpha)460 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
461     /*  I should be able to just add alpha[x] + startAlpha.
462         However, if the trailing edge of the previous span and the leading
463         edge of the current span round to the same super-sampled x value,
464         I might overflow to 256 with this add, hence the funny subtract.
465     */
466     unsigned tmp = *alpha + startAlpha;
467     SkASSERT(tmp <= 256);
468     *alpha = SkToU8(tmp - (tmp >> 8));
469 }
470 
quadplicate_byte(U8CPU value)471 static inline uint32_t quadplicate_byte(U8CPU value) {
472     uint32_t pair = (value << 8) | value;
473     return (pair << 16) | pair;
474 }
475 
476 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
477 // only ever call us with at most enough to hit 256 (never larger), so it is
478 // enough to just subtract the high-bit. Actually clamping with a branch would
479 // be slower (e.g. if (tmp > 255) tmp = 255;)
480 //
saturated_add(uint8_t * ptr,U8CPU add)481 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
482     unsigned tmp = *ptr + add;
483     SkASSERT(tmp <= 256);
484     *ptr = SkToU8(tmp - (tmp >> 8));
485 }
486 
487 // minimum count before we want to setup an inner loop, adding 4-at-a-time
488 #define MIN_COUNT_FOR_QUAD_LOOP  16
489 
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)490 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
491                         U8CPU stopAlpha, U8CPU maxValue) {
492     SkASSERT(middleCount >= 0);
493 
494     saturated_add(alpha, startAlpha);
495     alpha += 1;
496 
497     if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
498         // loop until we're quad-byte aligned
499         while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
500             alpha[0] = SkToU8(alpha[0] + maxValue);
501             alpha += 1;
502             middleCount -= 1;
503         }
504 
505         int bigCount = middleCount >> 2;
506         uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
507         uint32_t qval = quadplicate_byte(maxValue);
508         do {
509             *qptr++ += qval;
510         } while (--bigCount > 0);
511 
512         middleCount &= 3;
513         alpha = reinterpret_cast<uint8_t*> (qptr);
514         // fall through to the following while-loop
515     }
516 
517     while (--middleCount >= 0) {
518         alpha[0] = SkToU8(alpha[0] + maxValue);
519         alpha += 1;
520     }
521 
522     // potentially this can be off the end of our "legal" alpha values, but that
523     // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
524     // every time (slow), we just do it, and ensure that we've allocated extra space
525     // (see the + 1 comment in fStorage[]
526     saturated_add(alpha, stopAlpha);
527 }
528 
blitH(int x,int y,int width)529 void MaskSuperBlitter::blitH(int x, int y, int width) {
530     int iy = (y >> SHIFT);
531 
532     SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
533     iy -= fMask.fBounds.fTop;   // make it relative to 0
534 
535     // This should never happen, but it does.  Until the true cause is
536     // discovered, let's skip this span instead of crashing.
537     // See http://crbug.com/17569.
538     if (iy < 0) {
539         return;
540     }
541 
542 #ifdef SK_DEBUG
543     {
544         int ix = x >> SHIFT;
545         SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
546     }
547 #endif
548 
549     x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
550 
551     // hack, until I figure out why my cubics (I think) go beyond the bounds
552     if (x < 0) {
553         width += x;
554         x = 0;
555     }
556 
557     uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
558 
559     int start = x;
560     int stop = x + width;
561 
562     SkASSERT(start >= 0 && stop > start);
563     int fb = start & MASK;
564     int fe = stop & MASK;
565     int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
566 
567 
568     if (n < 0) {
569         SkASSERT(row >= fMask.fImage);
570         SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
571         add_aa_span(row, coverage_to_partial_alpha(fe - fb));
572     } else {
573         fb = SCALE - fb;
574         SkASSERT(row >= fMask.fImage);
575         SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
576         add_aa_span(row,  coverage_to_partial_alpha(fb),
577                     n, coverage_to_partial_alpha(fe),
578                     (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
579     }
580 
581 #ifdef SK_DEBUG
582     fCurrX = x + width;
583 #endif
584 }
585 
586 ///////////////////////////////////////////////////////////////////////////////
587 
safeRoundOut(const SkRect & src)588 static SkIRect safeRoundOut(const SkRect& src) {
589     // roundOut will pin huge floats to max/min int
590     SkIRect dst = src.roundOut();
591 
592     // intersect with a smaller huge rect, so the rect will not be considered empty for being
593     // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
594     // exceeds signed 32bit.
595     const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
596     (void)dst.intersect({ -limit, -limit, limit, limit});
597 
598     return dst;
599 }
600 
601 constexpr int kSampleSize = 8;
602 #if !defined(SK_DISABLE_AAA)
603     constexpr SkScalar kComplexityThreshold = 0.25;
604 #endif
605 
compute_complexity(const SkPath & path,SkScalar & avgLength,SkScalar & complexity)606 static void compute_complexity(const SkPath& path, SkScalar& avgLength, SkScalar& complexity) {
607     int n = path.countPoints();
608     if (n < kSampleSize || path.getBounds().isEmpty()) {
609         // set to invalid value to indicate that we failed to compute
610         avgLength = complexity = -1;
611         return;
612     }
613 
614     SkScalar sumLength = 0;
615     SkPoint lastPoint = path.getPoint(0);
616     for(int i = 1; i < kSampleSize; ++i) {
617         SkPoint point = path.getPoint(i);
618         sumLength += SkPoint::Distance(lastPoint, point);
619         lastPoint = point;
620     }
621     avgLength = sumLength / (kSampleSize - 1);
622 
623     auto sqr = [](SkScalar x) { return x*x; };
624 
625     SkScalar diagonalSqr = sqr(path.getBounds().width()) + sqr(path.getBounds().height());
626 
627     // If the path consists of random line segments, the number of intersections should be
628     // proportional to this.
629     SkScalar intersections = sk_ieee_float_divide(sqr(n) * sqr(avgLength), diagonalSqr);
630 
631     // The number of intersections per scanline should be proportional to this number.
632     complexity = sk_ieee_float_divide(intersections, path.getBounds().height());
633 
634     if (sk_float_isnan(complexity)) {  // it may be possible to have 0.0 / 0.0; inf is fine for us.
635         complexity = -1;
636     }
637 }
638 
ShouldUseAAA(const SkPath & path,SkScalar avgLength,SkScalar complexity)639 static bool ShouldUseAAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
640 #if defined(SK_DISABLE_AAA)
641     return false;
642 #else
643     if (gSkForceAnalyticAA) {
644         return true;
645     }
646     if (!gSkUseAnalyticAA) {
647         return false;
648     }
649     if (path.isRect(nullptr)) {
650         return true;
651     }
652 
653     #ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
654         const SkRect& bounds = path.getBounds();
655         // When the path have so many points compared to the size of its
656         // bounds/resolution, it indicates that the path is not quite smooth in
657         // the current resolution: the expected number of turning points in
658         // every pixel row/column is significantly greater than zero. Hence
659         // Aanlytic AA is not likely to produce visible quality improvements,
660         // and Analytic AA might be slower than supersampling.
661         return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
662     #else
663         if (path.countPoints() >= path.getBounds().height()) {
664             // SAA is faster than AAA in this case even if there are no
665             // intersections because AAA will have too many scan lines. See
666             // skbug.com/8272
667             return false;
668         }
669         // We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
670         return complexity < kComplexityThreshold;
671     #endif
672 #endif
673 }
674 
SAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)675 void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
676                   const SkIRect& clipBounds, bool forceRLE) {
677     bool containedInClip = clipBounds.contains(ir);
678     bool isInverse = path.isInverseFillType();
679 
680     // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
681     // if we're an inverse filltype
682     if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
683         MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
684         SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
685         sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
686     } else {
687         SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
688         sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
689     }
690 }
691 
overflows_short_shift(int value,int shift)692 static int overflows_short_shift(int value, int shift) {
693     const int s = 16 + shift;
694     return (SkLeftShift(value, s) >> s) - value;
695 }
696 
697 /**
698   Would any of the coordinates of this rectangle not fit in a short,
699   when left-shifted by shift?
700 */
rect_overflows_short_shift(SkIRect rect,int shift)701 static int rect_overflows_short_shift(SkIRect rect, int shift) {
702     SkASSERT(!overflows_short_shift(8191, shift));
703     SkASSERT(overflows_short_shift(8192, shift));
704     SkASSERT(!overflows_short_shift(32767, 0));
705     SkASSERT(overflows_short_shift(32768, 0));
706 
707     // Since we expect these to succeed, we bit-or together
708     // for a tiny extra bit of speed.
709     return overflows_short_shift(rect.fLeft, shift) |
710            overflows_short_shift(rect.fRight, shift) |
711            overflows_short_shift(rect.fTop, shift) |
712            overflows_short_shift(rect.fBottom, shift);
713 }
714 
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE)715 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
716                           SkBlitter* blitter, bool forceRLE) {
717     if (origClip.isEmpty()) {
718         return;
719     }
720 
721     const bool isInverse = path.isInverseFillType();
722     SkIRect ir = safeRoundOut(path.getBounds());
723     if (ir.isEmpty()) {
724         if (isInverse) {
725             blitter->blitRegion(origClip);
726         }
727         return;
728     }
729 
730     // If the intersection of the path bounds and the clip bounds
731     // will overflow 32767 when << by SHIFT, we can't supersample,
732     // so draw without antialiasing.
733     SkIRect clippedIR;
734     if (isInverse) {
735        // If the path is an inverse fill, it's going to fill the entire
736        // clip, and we care whether the entire clip exceeds our limits.
737        clippedIR = origClip.getBounds();
738     } else {
739        if (!clippedIR.intersect(ir, origClip.getBounds())) {
740            return;
741        }
742     }
743     if (rect_overflows_short_shift(clippedIR, SHIFT)) {
744         SkScan::FillPath(path, origClip, blitter);
745         return;
746     }
747 
748     // Our antialiasing can't handle a clip larger than 32767, so we restrict
749     // the clip to that limit here. (the runs[] uses int16_t for its index).
750     //
751     // A more general solution (one that could also eliminate the need to
752     // disable aa based on ir bounds (see overflows_short_shift) would be
753     // to tile the clip/target...
754     SkRegion tmpClipStorage;
755     const SkRegion* clipRgn = &origClip;
756     {
757         static const int32_t kMaxClipCoord = 32767;
758         const SkIRect& bounds = origClip.getBounds();
759         if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
760             SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
761             tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
762             clipRgn = &tmpClipStorage;
763         }
764     }
765     // for here down, use clipRgn, not origClip
766 
767     SkScanClipper   clipper(blitter, clipRgn, ir);
768 
769     if (clipper.getBlitter() == nullptr) { // clipped out
770         if (isInverse) {
771             blitter->blitRegion(*clipRgn);
772         }
773         return;
774     }
775 
776     SkASSERT(clipper.getClipRect() == nullptr ||
777             *clipper.getClipRect() == clipRgn->getBounds());
778 
779     // now use the (possibly wrapped) blitter
780     blitter = clipper.getBlitter();
781 
782     if (isInverse) {
783         sk_blit_above(blitter, ir, *clipRgn);
784     }
785 
786     SkScalar avgLength, complexity;
787     compute_complexity(path, avgLength, complexity);
788 
789     if (ShouldUseAAA(path, avgLength, complexity)) {
790         // Do not use AAA if path is too complicated:
791         // there won't be any speedup or significant visual improvement.
792         SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
793     } else {
794         SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
795     }
796 
797     if (isInverse) {
798         sk_blit_below(blitter, ir, *clipRgn);
799     }
800 }
801 
802 ///////////////////////////////////////////////////////////////////////////////
803 
804 #include "src/core/SkRasterClip.h"
805 
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)806 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
807     if (clip.isEmpty() || !path.isFinite()) {
808         return;
809     }
810 
811     if (clip.isBW()) {
812         FillPath(path, clip.bwRgn(), blitter);
813     } else {
814         SkRegion        tmp;
815         SkAAClipBlitter aaBlitter;
816 
817         tmp.setRect(clip.getBounds());
818         aaBlitter.init(blitter, &clip.aaRgn());
819         SkScan::FillPath(path, tmp, &aaBlitter);
820     }
821 }
822 
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)823 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
824     if (clip.isEmpty() || !path.isFinite()) {
825         return;
826     }
827 
828     if (clip.isBW()) {
829         AntiFillPath(path, clip.bwRgn(), blitter, false);
830     } else {
831         SkRegion        tmp;
832         SkAAClipBlitter aaBlitter;
833 
834         tmp.setRect(clip.getBounds());
835         aaBlitter.init(blitter, &clip.aaRgn());
836         AntiFillPath(path, tmp, &aaBlitter, true); // SkAAClipBlitter can blitMask, why forceRLE?
837     }
838 }
839