1
2 /*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "SkGradientShader.h"
11 #include "SkClampRange.h"
12 #include "SkColorPriv.h"
13 #include "SkMallocPixelRef.h"
14 #include "SkUnitMapper.h"
15 #include "SkUtils.h"
16 #include "SkTemplates.h"
17 #include "SkBitmapCache.h"
18
19 #ifndef SK_DISABLE_DITHER_32BIT_GRADIENT
20 #define USE_DITHER_32BIT_GRADIENT
21 #endif
22
sk_memset32_dither(uint32_t dst[],uint32_t v0,uint32_t v1,int count)23 static void sk_memset32_dither(uint32_t dst[], uint32_t v0, uint32_t v1,
24 int count) {
25 if (count > 0) {
26 if (v0 == v1) {
27 sk_memset32(dst, v0, count);
28 } else {
29 int pairs = count >> 1;
30 for (int i = 0; i < pairs; i++) {
31 *dst++ = v0;
32 *dst++ = v1;
33 }
34 if (count & 1) {
35 *dst = v0;
36 }
37 }
38 }
39 }
40
41 ///////////////////////////////////////////////////////////////////////////////
42 // Can't use a two-argument function with side effects like this in a
43 // constructor's initializer's argument list because the order of
44 // evaluations in that context is undefined (and backwards on linux/gcc).
unflatten_point(SkReader32 & buffer)45 static SkPoint unflatten_point(SkReader32& buffer) {
46 SkPoint retval;
47 retval.fX = buffer.readScalar();
48 retval.fY = buffer.readScalar();
49 return retval;
50 }
51
52 // Clamp
53
clamp_tileproc(SkFixed x)54 static SkFixed clamp_tileproc(SkFixed x) {
55 return SkClampMax(x, 0xFFFF);
56 }
57
58 // Repeat
59
repeat_tileproc(SkFixed x)60 static SkFixed repeat_tileproc(SkFixed x) {
61 return x & 0xFFFF;
62 }
63
repeat_bits(int x,const int bits)64 static inline int repeat_bits(int x, const int bits) {
65 return x & ((1 << bits) - 1);
66 }
67
repeat_8bits(int x)68 static inline int repeat_8bits(int x) {
69 return x & 0xFF;
70 }
71
72 // Mirror
73
74 // Visual Studio 2010 (MSC_VER=1600) optimizes bit-shift code incorrectly.
75 // See http://code.google.com/p/skia/issues/detail?id=472
76 #if defined(_MSC_VER) && (_MSC_VER >= 1600)
77 #pragma optimize("", off)
78 #endif
79
mirror_tileproc(SkFixed x)80 static inline SkFixed mirror_tileproc(SkFixed x) {
81 int s = x << 15 >> 31;
82 return (x ^ s) & 0xFFFF;
83 }
84
mirror_bits(int x,const int bits)85 static inline int mirror_bits(int x, const int bits) {
86 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
87 if (x & (1 << bits))
88 x = ~x;
89 return x & ((1 << bits) - 1);
90 #else
91 int s = x << (31 - bits) >> 31;
92 return (x ^ s) & ((1 << bits) - 1);
93 #endif
94 }
95
mirror_8bits(int x)96 static inline int mirror_8bits(int x) {
97 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
98 if (x & 256) {
99 x = ~x;
100 }
101 return x & 255;
102 #else
103 int s = x << 23 >> 31;
104 return (x ^ s) & 0xFF;
105 #endif
106 }
107
108 #if defined(_MSC_VER) && (_MSC_VER >= 1600)
109 #pragma optimize("", on)
110 #endif
111
112 ///////////////////////////////////////////////////////////////////////////////
113
114 typedef SkFixed (*TileProc)(SkFixed);
115
116 static const TileProc gTileProcs[] = {
117 clamp_tileproc,
118 repeat_tileproc,
119 mirror_tileproc
120 };
121
122 ///////////////////////////////////////////////////////////////////////////////
123 ///////////////////////////////////////////////////////////////////////////////
124
125 class Gradient_Shader : public SkShader {
126 public:
127 Gradient_Shader(const SkColor colors[], const SkScalar pos[],
128 int colorCount, SkShader::TileMode mode, SkUnitMapper* mapper);
129 virtual ~Gradient_Shader();
130
131 // overrides
132 virtual bool setContext(const SkBitmap&, const SkPaint&, const SkMatrix&) SK_OVERRIDE;
getFlags()133 virtual uint32_t getFlags() SK_OVERRIDE { return fFlags; }
134 virtual bool isOpaque() const SK_OVERRIDE;
135
136 enum {
137 /// Seems like enough for visual accuracy. TODO: if pos[] deserves
138 /// it, use a larger cache.
139 kCache16Bits = 8,
140 kGradient16Length = (1 << kCache16Bits),
141 /// Each cache gets 1 extra entry at the end so we don't have to
142 /// test for end-of-cache in lerps. This is also the value used
143 /// to stride *writes* into the dither cache; it must not be zero.
144 /// Total space for a cache is 2x kCache16Count entries: one
145 /// regular cache, one for dithering.
146 kCache16Count = kGradient16Length + 1,
147 kCache16Shift = 16 - kCache16Bits,
148 kSqrt16Shift = 8 - kCache16Bits,
149
150 /// Seems like enough for visual accuracy. TODO: if pos[] deserves
151 /// it, use a larger cache.
152 kCache32Bits = 8,
153 kGradient32Length = (1 << kCache32Bits),
154 /// Each cache gets 1 extra entry at the end so we don't have to
155 /// test for end-of-cache in lerps. This is also the value used
156 /// to stride *writes* into the dither cache; it must not be zero.
157 /// Total space for a cache is 2x kCache32Count entries: one
158 /// regular cache, one for dithering.
159 kCache32Count = kGradient32Length + 1,
160 kCache32Shift = 16 - kCache32Bits,
161 kSqrt32Shift = 8 - kCache32Bits,
162
163 /// This value is used to *read* the dither cache; it may be 0
164 /// if dithering is disabled.
165 #ifdef USE_DITHER_32BIT_GRADIENT
166 kDitherStride32 = kCache32Count,
167 #else
168 kDitherStride32 = 0,
169 #endif
170 kDitherStride16 = kCache16Count,
171 kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1
172 };
173
174
175 protected:
176 Gradient_Shader(SkFlattenableReadBuffer& );
177 SkUnitMapper* fMapper;
178 SkMatrix fPtsToUnit; // set by subclass
179 SkMatrix fDstToIndex;
180 SkMatrix::MapXYProc fDstToIndexProc;
181 TileMode fTileMode;
182 TileProc fTileProc;
183 int fColorCount;
184 uint8_t fDstToIndexClass;
185 uint8_t fFlags;
186
187 struct Rec {
188 SkFixed fPos; // 0...1
189 uint32_t fScale; // (1 << 24) / range
190 };
191 Rec* fRecs;
192
193 virtual void flatten(SkFlattenableWriteBuffer& );
194 const uint16_t* getCache16() const;
195 const SkPMColor* getCache32() const;
196
197 void commonAsABitmap(SkBitmap*) const;
198 void commonAsAGradient(GradientInfo*) const;
199
200 private:
201 enum {
202 kColorStorageCount = 4, // more than this many colors, and we'll use sk_malloc for the space
203
204 kStorageSize = kColorStorageCount * (sizeof(SkColor) + sizeof(Rec))
205 };
206 SkColor fStorage[(kStorageSize + 3) >> 2];
207 SkColor* fOrigColors; // original colors, before modulation by paint in setContext
208 bool fColorsAreOpaque;
209
210 mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values
211 mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values
212
213 mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand
214 mutable SkMallocPixelRef* fCache32PixelRef;
215 mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
216
217 static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
218 static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
219 U8CPU alpha);
220 void setCacheAlpha(U8CPU alpha) const;
221 void initCommon();
222
223 typedef SkShader INHERITED;
224 };
225
scalarToU16(SkScalar x)226 static inline unsigned scalarToU16(SkScalar x) {
227 SkASSERT(x >= 0 && x <= SK_Scalar1);
228
229 #ifdef SK_SCALAR_IS_FLOAT
230 return (unsigned)(x * 0xFFFF);
231 #else
232 return x - (x >> 16); // probably should be x - (x > 0x7FFF) but that is slower
233 #endif
234 }
235
Gradient_Shader(const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)236 Gradient_Shader::Gradient_Shader(const SkColor colors[], const SkScalar pos[],
237 int colorCount, SkShader::TileMode mode, SkUnitMapper* mapper) {
238 SkASSERT(colorCount > 1);
239
240 fCacheAlpha = 256; // init to a value that paint.getAlpha() can't return
241
242 fMapper = mapper;
243 SkSafeRef(mapper);
244
245 SkASSERT((unsigned)mode < SkShader::kTileModeCount);
246 SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs));
247 fTileMode = mode;
248 fTileProc = gTileProcs[mode];
249
250 fCache16 = fCache16Storage = NULL;
251 fCache32 = NULL;
252 fCache32PixelRef = NULL;
253
254 /* Note: we let the caller skip the first and/or last position.
255 i.e. pos[0] = 0.3, pos[1] = 0.7
256 In these cases, we insert dummy entries to ensure that the final data
257 will be bracketed by [0, 1].
258 i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1
259
260 Thus colorCount (the caller's value, and fColorCount (our value) may
261 differ by up to 2. In the above example:
262 colorCount = 2
263 fColorCount = 4
264 */
265 fColorCount = colorCount;
266 // check if we need to add in dummy start and/or end position/colors
267 bool dummyFirst = false;
268 bool dummyLast = false;
269 if (pos) {
270 dummyFirst = pos[0] != 0;
271 dummyLast = pos[colorCount - 1] != SK_Scalar1;
272 fColorCount += dummyFirst + dummyLast;
273 }
274
275 if (fColorCount > kColorStorageCount) {
276 size_t size = sizeof(SkColor) + sizeof(Rec);
277 fOrigColors = reinterpret_cast<SkColor*>(
278 sk_malloc_throw(size * fColorCount));
279 }
280 else {
281 fOrigColors = fStorage;
282 }
283
284 // Now copy over the colors, adding the dummies as needed
285 {
286 SkColor* origColors = fOrigColors;
287 if (dummyFirst) {
288 *origColors++ = colors[0];
289 }
290 memcpy(origColors, colors, colorCount * sizeof(SkColor));
291 if (dummyLast) {
292 origColors += colorCount;
293 *origColors = colors[colorCount - 1];
294 }
295 }
296
297 fRecs = (Rec*)(fOrigColors + fColorCount);
298 if (fColorCount > 2) {
299 Rec* recs = fRecs;
300 recs->fPos = 0;
301 // recs->fScale = 0; // unused;
302 recs += 1;
303 if (pos) {
304 /* We need to convert the user's array of relative positions into
305 fixed-point positions and scale factors. We need these results
306 to be strictly monotonic (no two values equal or out of order).
307 Hence this complex loop that just jams a zero for the scale
308 value if it sees a segment out of order, and it assures that
309 we start at 0 and end at 1.0
310 */
311 SkFixed prev = 0;
312 int startIndex = dummyFirst ? 0 : 1;
313 int count = colorCount + dummyLast;
314 for (int i = startIndex; i < count; i++) {
315 // force the last value to be 1.0
316 SkFixed curr;
317 if (i == colorCount) { // we're really at the dummyLast
318 curr = SK_Fixed1;
319 } else {
320 curr = SkScalarToFixed(pos[i]);
321 }
322 // pin curr withing range
323 if (curr < 0) {
324 curr = 0;
325 } else if (curr > SK_Fixed1) {
326 curr = SK_Fixed1;
327 }
328 recs->fPos = curr;
329 if (curr > prev) {
330 recs->fScale = (1 << 24) / (curr - prev);
331 } else {
332 recs->fScale = 0; // ignore this segment
333 }
334 // get ready for the next value
335 prev = curr;
336 recs += 1;
337 }
338 } else { // assume even distribution
339 SkFixed dp = SK_Fixed1 / (colorCount - 1);
340 SkFixed p = dp;
341 SkFixed scale = (colorCount - 1) << 8; // (1 << 24) / dp
342 for (int i = 1; i < colorCount; i++) {
343 recs->fPos = p;
344 recs->fScale = scale;
345 recs += 1;
346 p += dp;
347 }
348 }
349 }
350 this->initCommon();
351 }
352
Gradient_Shader(SkFlattenableReadBuffer & buffer)353 Gradient_Shader::Gradient_Shader(SkFlattenableReadBuffer& buffer) :
354 INHERITED(buffer) {
355 fCacheAlpha = 256;
356
357 fMapper = static_cast<SkUnitMapper*>(buffer.readFlattenable());
358
359 fCache16 = fCache16Storage = NULL;
360 fCache32 = NULL;
361 fCache32PixelRef = NULL;
362
363 int colorCount = fColorCount = buffer.readU32();
364 if (colorCount > kColorStorageCount) {
365 size_t size = sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec);
366 fOrigColors = (SkColor*)sk_malloc_throw(size * colorCount);
367 } else {
368 fOrigColors = fStorage;
369 }
370 buffer.read(fOrigColors, colorCount * sizeof(SkColor));
371
372 fTileMode = (TileMode)buffer.readU8();
373 fTileProc = gTileProcs[fTileMode];
374 fRecs = (Rec*)(fOrigColors + colorCount);
375 if (colorCount > 2) {
376 Rec* recs = fRecs;
377 recs[0].fPos = 0;
378 for (int i = 1; i < colorCount; i++) {
379 recs[i].fPos = buffer.readS32();
380 recs[i].fScale = buffer.readU32();
381 }
382 }
383 SkReadMatrix(&buffer, &fPtsToUnit);
384 this->initCommon();
385 }
386
~Gradient_Shader()387 Gradient_Shader::~Gradient_Shader() {
388 if (fCache16Storage) {
389 sk_free(fCache16Storage);
390 }
391 SkSafeUnref(fCache32PixelRef);
392 if (fOrigColors != fStorage) {
393 sk_free(fOrigColors);
394 }
395 SkSafeUnref(fMapper);
396 }
397
initCommon()398 void Gradient_Shader::initCommon() {
399 fFlags = 0;
400 unsigned colorAlpha = 0xFF;
401 for (int i = 0; i < fColorCount; i++) {
402 colorAlpha &= SkColorGetA(fOrigColors[i]);
403 }
404 fColorsAreOpaque = colorAlpha == 0xFF;
405 }
406
flatten(SkFlattenableWriteBuffer & buffer)407 void Gradient_Shader::flatten(SkFlattenableWriteBuffer& buffer) {
408 this->INHERITED::flatten(buffer);
409 buffer.writeFlattenable(fMapper);
410 buffer.write32(fColorCount);
411 buffer.writeMul4(fOrigColors, fColorCount * sizeof(SkColor));
412 buffer.write8(fTileMode);
413 if (fColorCount > 2) {
414 Rec* recs = fRecs;
415 for (int i = 1; i < fColorCount; i++) {
416 buffer.write32(recs[i].fPos);
417 buffer.write32(recs[i].fScale);
418 }
419 }
420 SkWriteMatrix(&buffer, fPtsToUnit);
421 }
422
isOpaque() const423 bool Gradient_Shader::isOpaque() const {
424 return fColorsAreOpaque;
425 }
426
setContext(const SkBitmap & device,const SkPaint & paint,const SkMatrix & matrix)427 bool Gradient_Shader::setContext(const SkBitmap& device,
428 const SkPaint& paint,
429 const SkMatrix& matrix) {
430 if (!this->INHERITED::setContext(device, paint, matrix)) {
431 return false;
432 }
433
434 const SkMatrix& inverse = this->getTotalInverse();
435
436 if (!fDstToIndex.setConcat(fPtsToUnit, inverse)) {
437 return false;
438 }
439
440 fDstToIndexProc = fDstToIndex.getMapXYProc();
441 fDstToIndexClass = (uint8_t)SkShader::ComputeMatrixClass(fDstToIndex);
442
443 // now convert our colors in to PMColors
444 unsigned paintAlpha = this->getPaintAlpha();
445
446 fFlags = this->INHERITED::getFlags();
447 if (fColorsAreOpaque && paintAlpha == 0xFF) {
448 fFlags |= kOpaqueAlpha_Flag;
449 }
450 // we can do span16 as long as our individual colors are opaque,
451 // regardless of the paint's alpha
452 if (fColorsAreOpaque) {
453 fFlags |= kHasSpan16_Flag;
454 }
455
456 this->setCacheAlpha(paintAlpha);
457 return true;
458 }
459
setCacheAlpha(U8CPU alpha) const460 void Gradient_Shader::setCacheAlpha(U8CPU alpha) const {
461 // if the new alpha differs from the previous time we were called, inval our cache
462 // this will trigger the cache to be rebuilt.
463 // we don't care about the first time, since the cache ptrs will already be NULL
464 if (fCacheAlpha != alpha) {
465 fCache16 = NULL; // inval the cache
466 fCache32 = NULL; // inval the cache
467 fCacheAlpha = alpha; // record the new alpha
468 // inform our subclasses
469 if (fCache32PixelRef) {
470 fCache32PixelRef->notifyPixelsChanged();
471 }
472 }
473 }
474
blend8(int a,int b,int scale)475 static inline int blend8(int a, int b, int scale) {
476 SkASSERT(a == SkToU8(a));
477 SkASSERT(b == SkToU8(b));
478 SkASSERT(scale >= 0 && scale <= 256);
479 return a + ((b - a) * scale >> 8);
480 }
481
dot8_blend_packed32(uint32_t s0,uint32_t s1,int blend)482 static inline uint32_t dot8_blend_packed32(uint32_t s0, uint32_t s1,
483 int blend) {
484 #if 0
485 int a = blend8(SkGetPackedA32(s0), SkGetPackedA32(s1), blend);
486 int r = blend8(SkGetPackedR32(s0), SkGetPackedR32(s1), blend);
487 int g = blend8(SkGetPackedG32(s0), SkGetPackedG32(s1), blend);
488 int b = blend8(SkGetPackedB32(s0), SkGetPackedB32(s1), blend);
489
490 return SkPackARGB32(a, r, g, b);
491 #else
492 int otherBlend = 256 - blend;
493
494 #if 0
495 U32 t0 = (((s0 & 0xFF00FF) * blend + (s1 & 0xFF00FF) * otherBlend) >> 8) & 0xFF00FF;
496 U32 t1 = (((s0 >> 8) & 0xFF00FF) * blend + ((s1 >> 8) & 0xFF00FF) * otherBlend) & 0xFF00FF00;
497 SkASSERT((t0 & t1) == 0);
498 return t0 | t1;
499 #else
500 return ((((s0 & 0xFF00FF) * blend + (s1 & 0xFF00FF) * otherBlend) >> 8) & 0xFF00FF) |
501 ((((s0 >> 8) & 0xFF00FF) * blend + ((s1 >> 8) & 0xFF00FF) * otherBlend) & 0xFF00FF00);
502 #endif
503
504 #endif
505 }
506
507 #define Fixed_To_Dot8(x) (((x) + 0x80) >> 8)
508
509 /** We take the original colors, not our premultiplied PMColors, since we can
510 build a 16bit table as long as the original colors are opaque, even if the
511 paint specifies a non-opaque alpha.
512 */
Build16bitCache(uint16_t cache[],SkColor c0,SkColor c1,int count)513 void Gradient_Shader::Build16bitCache(uint16_t cache[], SkColor c0, SkColor c1,
514 int count) {
515 SkASSERT(count > 1);
516 SkASSERT(SkColorGetA(c0) == 0xFF);
517 SkASSERT(SkColorGetA(c1) == 0xFF);
518
519 SkFixed r = SkColorGetR(c0);
520 SkFixed g = SkColorGetG(c0);
521 SkFixed b = SkColorGetB(c0);
522
523 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1);
524 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1);
525 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1);
526
527 r = SkIntToFixed(r) + 0x8000;
528 g = SkIntToFixed(g) + 0x8000;
529 b = SkIntToFixed(b) + 0x8000;
530
531 do {
532 unsigned rr = r >> 16;
533 unsigned gg = g >> 16;
534 unsigned bb = b >> 16;
535 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb));
536 cache[kCache16Count] = SkDitherPack888ToRGB16(rr, gg, bb);
537 cache += 1;
538 r += dr;
539 g += dg;
540 b += db;
541 } while (--count != 0);
542 }
543
544 /*
545 * 2x2 dither a fixed-point color component (8.16) down to 8, matching the
546 * semantics of how we 2x2 dither 32->16
547 */
dither_fixed_to_8(SkFixed n)548 static inline U8CPU dither_fixed_to_8(SkFixed n) {
549 n >>= 8;
550 return ((n << 1) - ((n >> 8 << 8) | (n >> 8))) >> 8;
551 }
552
553 /*
554 * For dithering with premultiply, we want to ceiling the alpha component,
555 * to ensure that it is always >= any color component.
556 */
dither_ceil_fixed_to_8(SkFixed n)557 static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
558 n >>= 8;
559 return ((n << 1) - (n | (n >> 8))) >> 8;
560 }
561
Build32bitCache(SkPMColor cache[],SkColor c0,SkColor c1,int count,U8CPU paintAlpha)562 void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
563 int count, U8CPU paintAlpha) {
564 SkASSERT(count > 1);
565
566 // need to apply paintAlpha to our two endpoints
567 SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
568 SkFixed da;
569 {
570 int tmp = SkMulDiv255Round(SkColorGetA(c1), paintAlpha);
571 da = SkIntToFixed(tmp - a) / (count - 1);
572 }
573
574 SkFixed r = SkColorGetR(c0);
575 SkFixed g = SkColorGetG(c0);
576 SkFixed b = SkColorGetB(c0);
577 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1);
578 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1);
579 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1);
580
581 a = SkIntToFixed(a) + 0x8000;
582 r = SkIntToFixed(r) + 0x8000;
583 g = SkIntToFixed(g) + 0x8000;
584 b = SkIntToFixed(b) + 0x8000;
585
586 do {
587 cache[0] = SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
588 cache[kCache32Count] =
589 SkPremultiplyARGBInline(dither_ceil_fixed_to_8(a),
590 dither_fixed_to_8(r),
591 dither_fixed_to_8(g),
592 dither_fixed_to_8(b));
593 cache += 1;
594 a += da;
595 r += dr;
596 g += dg;
597 b += db;
598 } while (--count != 0);
599 }
600
SkFixedToFFFF(SkFixed x)601 static inline int SkFixedToFFFF(SkFixed x) {
602 SkASSERT((unsigned)x <= SK_Fixed1);
603 return x - (x >> 16);
604 }
605
bitsTo16(unsigned x,const unsigned bits)606 static inline U16CPU bitsTo16(unsigned x, const unsigned bits) {
607 SkASSERT(x < (1U << bits));
608 if (6 == bits) {
609 return (x << 10) | (x << 4) | (x >> 2);
610 }
611 if (8 == bits) {
612 return (x << 8) | x;
613 }
614 sk_throw();
615 return 0;
616 }
617
618 /** We duplicate the last value in each half of the cache so that
619 interpolation doesn't have to special-case being at the last point.
620 */
complete_16bit_cache(uint16_t * cache,int stride)621 static void complete_16bit_cache(uint16_t* cache, int stride) {
622 cache[stride - 1] = cache[stride - 2];
623 cache[2 * stride - 1] = cache[2 * stride - 2];
624 }
625
getCache16() const626 const uint16_t* Gradient_Shader::getCache16() const {
627 if (fCache16 == NULL) {
628 // double the count for dither entries
629 const int entryCount = kCache16Count * 2;
630 const size_t allocSize = sizeof(uint16_t) * entryCount;
631
632 if (fCache16Storage == NULL) { // set the storage and our working ptr
633 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize);
634 }
635 fCache16 = fCache16Storage;
636 if (fColorCount == 2) {
637 Build16bitCache(fCache16, fOrigColors[0], fOrigColors[1],
638 kGradient16Length);
639 } else {
640 Rec* rec = fRecs;
641 int prevIndex = 0;
642 for (int i = 1; i < fColorCount; i++) {
643 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift;
644 SkASSERT(nextIndex < kCache16Count);
645
646 if (nextIndex > prevIndex)
647 Build16bitCache(fCache16 + prevIndex, fOrigColors[i-1], fOrigColors[i], nextIndex - prevIndex + 1);
648 prevIndex = nextIndex;
649 }
650 // one extra space left over at the end for complete_16bit_cache()
651 SkASSERT(prevIndex == kGradient16Length - 1);
652 }
653
654 if (fMapper) {
655 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize);
656 uint16_t* linear = fCache16; // just computed linear data
657 uint16_t* mapped = fCache16Storage; // storage for mapped data
658 SkUnitMapper* map = fMapper;
659 for (int i = 0; i < kGradient16Length; i++) {
660 int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shift;
661 mapped[i] = linear[index];
662 mapped[i + kCache16Count] = linear[index + kCache16Count];
663 }
664 sk_free(fCache16);
665 fCache16 = fCache16Storage;
666 }
667 complete_16bit_cache(fCache16, kCache16Count);
668 }
669 return fCache16;
670 }
671
672 /** We duplicate the last value in each half of the cache so that
673 interpolation doesn't have to special-case being at the last point.
674 */
complete_32bit_cache(SkPMColor * cache,int stride)675 static void complete_32bit_cache(SkPMColor* cache, int stride) {
676 cache[stride - 1] = cache[stride - 2];
677 cache[2 * stride - 1] = cache[2 * stride - 2];
678 }
679
getCache32() const680 const SkPMColor* Gradient_Shader::getCache32() const {
681 if (fCache32 == NULL) {
682 // double the count for dither entries
683 const int entryCount = kCache32Count * 2;
684 const size_t allocSize = sizeof(SkPMColor) * entryCount;
685
686 if (NULL == fCache32PixelRef) {
687 fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
688 (NULL, allocSize, NULL));
689 }
690 fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
691 if (fColorCount == 2) {
692 Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
693 kGradient32Length, fCacheAlpha);
694 } else {
695 Rec* rec = fRecs;
696 int prevIndex = 0;
697 for (int i = 1; i < fColorCount; i++) {
698 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift;
699 SkASSERT(nextIndex < kGradient32Length);
700
701 if (nextIndex > prevIndex)
702 Build32bitCache(fCache32 + prevIndex, fOrigColors[i-1],
703 fOrigColors[i],
704 nextIndex - prevIndex + 1, fCacheAlpha);
705 prevIndex = nextIndex;
706 }
707 SkASSERT(prevIndex == kGradient32Length - 1);
708 }
709
710 if (fMapper) {
711 SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
712 (NULL, allocSize, NULL));
713 SkPMColor* linear = fCache32; // just computed linear data
714 SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
715 SkUnitMapper* map = fMapper;
716 for (int i = 0; i < kGradient32Length; i++) {
717 int index = map->mapUnit16((i << 8) | i) >> 8;
718 mapped[i] = linear[index];
719 mapped[i + kCache32Count] = linear[index + kCache32Count];
720 }
721 fCache32PixelRef->unref();
722 fCache32PixelRef = newPR;
723 fCache32 = (SkPMColor*)newPR->getAddr();
724 }
725 complete_32bit_cache(fCache32, kCache32Count);
726 }
727 return fCache32;
728 }
729
730 /*
731 * Because our caller might rebuild the same (logically the same) gradient
732 * over and over, we'd like to return exactly the same "bitmap" if possible,
733 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
734 * To do that, we maintain a private cache of built-bitmaps, based on our
735 * colors and positions. Note: we don't try to flatten the fMapper, so if one
736 * is present, we skip the cache for now.
737 */
commonAsABitmap(SkBitmap * bitmap) const738 void Gradient_Shader::commonAsABitmap(SkBitmap* bitmap) const {
739 // our caller assumes no external alpha, so we ensure that our cache is
740 // built with 0xFF
741 this->setCacheAlpha(0xFF);
742
743 // don't have a way to put the mapper into our cache-key yet
744 if (fMapper) {
745 // force our cahce32pixelref to be built
746 (void)this->getCache32();
747 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1);
748 bitmap->setPixelRef(fCache32PixelRef);
749 return;
750 }
751
752 // build our key: [numColors + colors[] + {positions[]} ]
753 int count = 1 + fColorCount;
754 if (fColorCount > 2) {
755 count += fColorCount - 1; // fRecs[].fPos
756 }
757
758 SkAutoSTMalloc<16, int32_t> storage(count);
759 int32_t* buffer = storage.get();
760
761 *buffer++ = fColorCount;
762 memcpy(buffer, fOrigColors, fColorCount * sizeof(SkColor));
763 buffer += fColorCount;
764 if (fColorCount > 2) {
765 for (int i = 1; i < fColorCount; i++) {
766 *buffer++ = fRecs[i].fPos;
767 }
768 }
769 SkASSERT(buffer - storage.get() == count);
770
771 ///////////////////////////////////
772
773 SK_DECLARE_STATIC_MUTEX(gMutex);
774 static SkBitmapCache* gCache;
775 // each cache cost 1K of RAM, since each bitmap will be 1x256 at 32bpp
776 static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32;
777 SkAutoMutexAcquire ama(gMutex);
778
779 if (NULL == gCache) {
780 gCache = new SkBitmapCache(MAX_NUM_CACHED_GRADIENT_BITMAPS);
781 }
782 size_t size = count * sizeof(int32_t);
783
784 if (!gCache->find(storage.get(), size, bitmap)) {
785 // force our cahce32pixelref to be built
786 (void)this->getCache32();
787 // Only expose the linear section of the cache; don't let the caller
788 // know about the padding at the end to make interpolation faster.
789 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1);
790 bitmap->setPixelRef(fCache32PixelRef);
791
792 gCache->add(storage.get(), size, *bitmap);
793 }
794 }
795
commonAsAGradient(GradientInfo * info) const796 void Gradient_Shader::commonAsAGradient(GradientInfo* info) const {
797 if (info) {
798 if (info->fColorCount >= fColorCount) {
799 if (info->fColors) {
800 memcpy(info->fColors, fOrigColors,
801 fColorCount * sizeof(SkColor));
802 }
803 if (info->fColorOffsets) {
804 if (fColorCount == 2) {
805 info->fColorOffsets[0] = 0;
806 info->fColorOffsets[1] = SK_Scalar1;
807 } else if (fColorCount > 2) {
808 for (int i = 0; i < fColorCount; i++)
809 info->fColorOffsets[i] = SkFixedToScalar(fRecs[i].fPos);
810 }
811 }
812 }
813 info->fColorCount = fColorCount;
814 info->fTileMode = fTileMode;
815 }
816 }
817
818 ///////////////////////////////////////////////////////////////////////////////
819
pts_to_unit_matrix(const SkPoint pts[2],SkMatrix * matrix)820 static void pts_to_unit_matrix(const SkPoint pts[2], SkMatrix* matrix) {
821 SkVector vec = pts[1] - pts[0];
822 SkScalar mag = vec.length();
823 SkScalar inv = mag ? SkScalarInvert(mag) : 0;
824
825 vec.scale(inv);
826 matrix->setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
827 matrix->postTranslate(-pts[0].fX, -pts[0].fY);
828 matrix->postScale(inv, inv);
829 }
830
831 ///////////////////////////////////////////////////////////////////////////////
832
833 class Linear_Gradient : public Gradient_Shader {
834 public:
Linear_Gradient(const SkPoint pts[2],const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)835 Linear_Gradient(const SkPoint pts[2],
836 const SkColor colors[], const SkScalar pos[], int colorCount,
837 SkShader::TileMode mode, SkUnitMapper* mapper)
838 : Gradient_Shader(colors, pos, colorCount, mode, mapper),
839 fStart(pts[0]),
840 fEnd(pts[1])
841 {
842 pts_to_unit_matrix(pts, &fPtsToUnit);
843 }
844
845 virtual bool setContext(const SkBitmap&, const SkPaint&, const SkMatrix&) SK_OVERRIDE;
846 virtual void shadeSpan(int x, int y, SkPMColor dstC[], int count) SK_OVERRIDE;
847 virtual void shadeSpan16(int x, int y, uint16_t dstC[], int count) SK_OVERRIDE;
848 virtual BitmapType asABitmap(SkBitmap*, SkMatrix*, TileMode*,
849 SkScalar* twoPointRadialParams) const SK_OVERRIDE;
850 virtual GradientType asAGradient(GradientInfo* info) const SK_OVERRIDE;
851
CreateProc(SkFlattenableReadBuffer & buffer)852 static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
853 return SkNEW_ARGS(Linear_Gradient, (buffer));
854 }
855
flatten(SkFlattenableWriteBuffer & buffer)856 virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE {
857 this->INHERITED::flatten(buffer);
858 buffer.writeScalar(fStart.fX);
859 buffer.writeScalar(fStart.fY);
860 buffer.writeScalar(fEnd.fX);
861 buffer.writeScalar(fEnd.fY);
862 }
863
864 SK_DECLARE_FLATTENABLE_REGISTRAR()
865
866 protected:
Linear_Gradient(SkFlattenableReadBuffer & buffer)867 Linear_Gradient(SkFlattenableReadBuffer& buffer)
868 : Gradient_Shader(buffer),
869 fStart(unflatten_point(buffer)),
870 fEnd(unflatten_point(buffer)) {
871 }
getFactory()872 virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
873
874 private:
875 typedef Gradient_Shader INHERITED;
876 const SkPoint fStart;
877 const SkPoint fEnd;
878 };
879
setContext(const SkBitmap & device,const SkPaint & paint,const SkMatrix & matrix)880 bool Linear_Gradient::setContext(const SkBitmap& device, const SkPaint& paint,
881 const SkMatrix& matrix) {
882 if (!this->INHERITED::setContext(device, paint, matrix)) {
883 return false;
884 }
885
886 unsigned mask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
887 if ((fDstToIndex.getType() & ~mask) == 0) {
888 fFlags |= SkShader::kConstInY32_Flag;
889 if ((fFlags & SkShader::kHasSpan16_Flag) && !paint.isDither()) {
890 // only claim this if we do have a 16bit mode (i.e. none of our
891 // colors have alpha), and if we are not dithering (which obviously
892 // is not const in Y).
893 fFlags |= SkShader::kConstInY16_Flag;
894 }
895 }
896 return true;
897 }
898
899 #define NO_CHECK_ITER \
900 do { \
901 unsigned fi = fx >> Gradient_Shader::kCache32Shift; \
902 SkASSERT(fi <= 0xFF); \
903 fx += dx; \
904 *dstC++ = cache[toggle + fi]; \
905 toggle ^= Gradient_Shader::kDitherStride32; \
906 } while (0)
907
908 namespace {
909
910 typedef void (*LinearShadeProc)(TileProc proc, SkFixed dx, SkFixed fx,
911 SkPMColor* dstC, const SkPMColor* cache,
912 int toggle, int count);
913
914 // This function is deprecated, and will be replaced by
915 // shadeSpan_linear_vertical_lerp() once Chrome has been weaned off of it.
shadeSpan_linear_vertical(TileProc proc,SkFixed dx,SkFixed fx,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int toggle,int count)916 void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
917 SkPMColor* SK_RESTRICT dstC,
918 const SkPMColor* SK_RESTRICT cache,
919 int toggle, int count) {
920 // We're a vertical gradient, so no change in a span.
921 // If colors change sharply across the gradient, dithering is
922 // insufficient (it subsamples the color space) and we need to lerp.
923 unsigned fullIndex = proc(fx);
924 unsigned fi = fullIndex >> (16 - Gradient_Shader::kCache32Bits);
925 sk_memset32_dither(dstC,
926 cache[toggle + fi],
927 cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi],
928 count);
929 }
930
931 // Linear interpolation (lerp) is unnecessary if there are no sharp
932 // discontinuities in the gradient - which must be true if there are
933 // only 2 colors - but it's cheap.
shadeSpan_linear_vertical_lerp(TileProc proc,SkFixed dx,SkFixed fx,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int toggle,int count)934 void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
935 SkPMColor* SK_RESTRICT dstC,
936 const SkPMColor* SK_RESTRICT cache,
937 int toggle, int count) {
938 // We're a vertical gradient, so no change in a span.
939 // If colors change sharply across the gradient, dithering is
940 // insufficient (it subsamples the color space) and we need to lerp.
941 unsigned fullIndex = proc(fx);
942 unsigned fi = fullIndex >> (16 - Gradient_Shader::kCache32Bits);
943 unsigned remainder = fullIndex & Gradient_Shader::kLerpRemainderMask32;
944 SkPMColor lerp =
945 SkFastFourByteInterp(
946 cache[toggle + fi + 1],
947 cache[toggle + fi], remainder);
948 SkPMColor dlerp =
949 SkFastFourByteInterp(
950 cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi + 1],
951 cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi], remainder);
952 sk_memset32_dither(dstC, lerp, dlerp, count);
953 }
954
shadeSpan_linear_clamp(TileProc proc,SkFixed dx,SkFixed fx,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int toggle,int count)955 void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
956 SkPMColor* SK_RESTRICT dstC,
957 const SkPMColor* SK_RESTRICT cache,
958 int toggle, int count) {
959 SkClampRange range;
960 range.init(fx, dx, count, 0, Gradient_Shader::kGradient32Length);
961
962 if ((count = range.fCount0) > 0) {
963 sk_memset32_dither(dstC,
964 cache[toggle + range.fV0],
965 cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV0],
966 count);
967 dstC += count;
968 }
969 if ((count = range.fCount1) > 0) {
970 int unroll = count >> 3;
971 fx = range.fFx1;
972 for (int i = 0; i < unroll; i++) {
973 NO_CHECK_ITER; NO_CHECK_ITER;
974 NO_CHECK_ITER; NO_CHECK_ITER;
975 NO_CHECK_ITER; NO_CHECK_ITER;
976 NO_CHECK_ITER; NO_CHECK_ITER;
977 }
978 if ((count &= 7) > 0) {
979 do {
980 NO_CHECK_ITER;
981 } while (--count != 0);
982 }
983 }
984 if ((count = range.fCount2) > 0) {
985 sk_memset32_dither(dstC,
986 cache[toggle + range.fV1],
987 cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV1],
988 count);
989 }
990 }
991
shadeSpan_linear_mirror(TileProc proc,SkFixed dx,SkFixed fx,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int toggle,int count)992 void shadeSpan_linear_mirror(TileProc proc, SkFixed dx, SkFixed fx,
993 SkPMColor* SK_RESTRICT dstC,
994 const SkPMColor* SK_RESTRICT cache,
995 int toggle, int count) {
996 do {
997 unsigned fi = mirror_8bits(fx >> 8);
998 SkASSERT(fi <= 0xFF);
999 fx += dx;
1000 *dstC++ = cache[toggle + fi];
1001 toggle ^= Gradient_Shader::kDitherStride32;
1002 } while (--count != 0);
1003 }
1004
shadeSpan_linear_repeat(TileProc proc,SkFixed dx,SkFixed fx,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int toggle,int count)1005 void shadeSpan_linear_repeat(TileProc proc, SkFixed dx, SkFixed fx,
1006 SkPMColor* SK_RESTRICT dstC,
1007 const SkPMColor* SK_RESTRICT cache,
1008 int toggle, int count) {
1009 do {
1010 unsigned fi = repeat_8bits(fx >> 8);
1011 SkASSERT(fi <= 0xFF);
1012 fx += dx;
1013 *dstC++ = cache[toggle + fi];
1014 toggle ^= Gradient_Shader::kDitherStride32;
1015 } while (--count != 0);
1016 }
1017
1018 }
1019
shadeSpan(int x,int y,SkPMColor * SK_RESTRICT dstC,int count)1020 void Linear_Gradient::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC,
1021 int count) {
1022 SkASSERT(count > 0);
1023
1024 SkPoint srcPt;
1025 SkMatrix::MapXYProc dstProc = fDstToIndexProc;
1026 TileProc proc = fTileProc;
1027 const SkPMColor* SK_RESTRICT cache = this->getCache32();
1028 #ifdef USE_DITHER_32BIT_GRADIENT
1029 int toggle = ((x ^ y) & 1) * kDitherStride32;
1030 #else
1031 int toggle = 0;
1032 #endif
1033
1034 if (fDstToIndexClass != kPerspective_MatrixClass) {
1035 dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
1036 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
1037 SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
1038
1039 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
1040 SkFixed dxStorage[1];
1041 (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
1042 dx = dxStorage[0];
1043 } else {
1044 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
1045 dx = SkScalarToFixed(fDstToIndex.getScaleX());
1046 }
1047
1048 LinearShadeProc shadeProc = shadeSpan_linear_repeat;
1049 if (SkFixedNearlyZero(dx)) {
1050 #ifdef SK_SIMPLE_TWOCOLOR_VERTICAL_GRADIENTS
1051 if (fColorCount > 2) {
1052 shadeProc = shadeSpan_linear_vertical_lerp;
1053 } else {
1054 shadeProc = shadeSpan_linear_vertical;
1055 }
1056 #else
1057 shadeProc = shadeSpan_linear_vertical_lerp;
1058 #endif
1059 } else if (proc == clamp_tileproc) {
1060 shadeProc = shadeSpan_linear_clamp;
1061 } else if (proc == mirror_tileproc) {
1062 shadeProc = shadeSpan_linear_mirror;
1063 } else {
1064 SkASSERT(proc == repeat_tileproc);
1065 }
1066 (*shadeProc)(proc, dx, fx, dstC, cache, toggle, count);
1067 } else {
1068 SkScalar dstX = SkIntToScalar(x);
1069 SkScalar dstY = SkIntToScalar(y);
1070 do {
1071 dstProc(fDstToIndex, dstX, dstY, &srcPt);
1072 unsigned fi = proc(SkScalarToFixed(srcPt.fX));
1073 SkASSERT(fi <= 0xFFFF);
1074 *dstC++ = cache[toggle + (fi >> kCache32Shift)];
1075 toggle ^= Gradient_Shader::kDitherStride32;
1076 dstX += SK_Scalar1;
1077 } while (--count != 0);
1078 }
1079 }
1080
asABitmap(SkBitmap * bitmap,SkMatrix * matrix,TileMode xy[],SkScalar * twoPointRadialParams) const1081 SkShader::BitmapType Linear_Gradient::asABitmap(SkBitmap* bitmap,
1082 SkMatrix* matrix,
1083 TileMode xy[],
1084 SkScalar* twoPointRadialParams) const {
1085 if (bitmap) {
1086 this->commonAsABitmap(bitmap);
1087 }
1088 if (matrix) {
1089 matrix->setScale(SkIntToScalar(kGradient32Length), SK_Scalar1);
1090 matrix->preConcat(fPtsToUnit);
1091 }
1092 if (xy) {
1093 xy[0] = fTileMode;
1094 xy[1] = kClamp_TileMode;
1095 }
1096 return kDefault_BitmapType;
1097 }
1098
asAGradient(GradientInfo * info) const1099 SkShader::GradientType Linear_Gradient::asAGradient(GradientInfo* info) const {
1100 if (info) {
1101 commonAsAGradient(info);
1102 info->fPoint[0] = fStart;
1103 info->fPoint[1] = fEnd;
1104 }
1105 return kLinear_GradientType;
1106 }
1107
dither_memset16(uint16_t dst[],uint16_t value,uint16_t other,int count)1108 static void dither_memset16(uint16_t dst[], uint16_t value, uint16_t other,
1109 int count) {
1110 if (reinterpret_cast<uintptr_t>(dst) & 2) {
1111 *dst++ = value;
1112 count -= 1;
1113 SkTSwap(value, other);
1114 }
1115
1116 sk_memset32((uint32_t*)dst, (value << 16) | other, count >> 1);
1117
1118 if (count & 1) {
1119 dst[count - 1] = value;
1120 }
1121 }
1122
1123 #define NO_CHECK_ITER_16 \
1124 do { \
1125 unsigned fi = fx >> Gradient_Shader::kCache16Shift; \
1126 SkASSERT(fi < Gradient_Shader::kCache16Count); \
1127 fx += dx; \
1128 *dstC++ = cache[toggle + fi]; \
1129 toggle ^= Gradient_Shader::kDitherStride16; \
1130 } while (0)
1131
1132 namespace {
1133
1134 typedef void (*LinearShade16Proc)(TileProc proc, SkFixed dx, SkFixed fx,
1135 uint16_t* dstC, const uint16_t* cache,
1136 int toggle, int count);
1137
shadeSpan16_linear_vertical(TileProc proc,SkFixed dx,SkFixed fx,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1138 void shadeSpan16_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
1139 uint16_t* SK_RESTRICT dstC,
1140 const uint16_t* SK_RESTRICT cache,
1141 int toggle, int count) {
1142 // we're a vertical gradient, so no change in a span
1143 unsigned fi = proc(fx) >> Gradient_Shader::kCache16Shift;
1144 SkASSERT(fi < Gradient_Shader::kCache16Count);
1145 dither_memset16(dstC, cache[toggle + fi],
1146 cache[(toggle ^ Gradient_Shader::kDitherStride16) + fi], count);
1147
1148 }
1149
shadeSpan16_linear_clamp(TileProc proc,SkFixed dx,SkFixed fx,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1150 void shadeSpan16_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
1151 uint16_t* SK_RESTRICT dstC,
1152 const uint16_t* SK_RESTRICT cache,
1153 int toggle, int count) {
1154 SkClampRange range;
1155 range.init(fx, dx, count, 0, Gradient_Shader::kGradient16Length);
1156
1157 if ((count = range.fCount0) > 0) {
1158 dither_memset16(dstC,
1159 cache[toggle + range.fV0],
1160 cache[(toggle ^ Gradient_Shader::kDitherStride16) + range.fV0],
1161 count);
1162 dstC += count;
1163 }
1164 if ((count = range.fCount1) > 0) {
1165 int unroll = count >> 3;
1166 fx = range.fFx1;
1167 for (int i = 0; i < unroll; i++) {
1168 NO_CHECK_ITER_16; NO_CHECK_ITER_16;
1169 NO_CHECK_ITER_16; NO_CHECK_ITER_16;
1170 NO_CHECK_ITER_16; NO_CHECK_ITER_16;
1171 NO_CHECK_ITER_16; NO_CHECK_ITER_16;
1172 }
1173 if ((count &= 7) > 0) {
1174 do {
1175 NO_CHECK_ITER_16;
1176 } while (--count != 0);
1177 }
1178 }
1179 if ((count = range.fCount2) > 0) {
1180 dither_memset16(dstC,
1181 cache[toggle + range.fV1],
1182 cache[(toggle ^ Gradient_Shader::kDitherStride16) + range.fV1],
1183 count);
1184 }
1185 }
1186
shadeSpan16_linear_mirror(TileProc proc,SkFixed dx,SkFixed fx,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1187 void shadeSpan16_linear_mirror(TileProc proc, SkFixed dx, SkFixed fx,
1188 uint16_t* SK_RESTRICT dstC,
1189 const uint16_t* SK_RESTRICT cache,
1190 int toggle, int count) {
1191 do {
1192 unsigned fi = mirror_bits(fx >> Gradient_Shader::kCache16Shift,
1193 Gradient_Shader::kCache16Bits);
1194 SkASSERT(fi < Gradient_Shader::kCache16Count);
1195 fx += dx;
1196 *dstC++ = cache[toggle + fi];
1197 toggle ^= Gradient_Shader::kDitherStride16;
1198 } while (--count != 0);
1199 }
1200
shadeSpan16_linear_repeat(TileProc proc,SkFixed dx,SkFixed fx,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1201 void shadeSpan16_linear_repeat(TileProc proc, SkFixed dx, SkFixed fx,
1202 uint16_t* SK_RESTRICT dstC,
1203 const uint16_t* SK_RESTRICT cache,
1204 int toggle, int count) {
1205 SkASSERT(proc == repeat_tileproc);
1206 do {
1207 unsigned fi = repeat_bits(fx >> Gradient_Shader::kCache16Shift,
1208 Gradient_Shader::kCache16Bits);
1209 SkASSERT(fi < Gradient_Shader::kCache16Count);
1210 fx += dx;
1211 *dstC++ = cache[toggle + fi];
1212 toggle ^= Gradient_Shader::kDitherStride16;
1213 } while (--count != 0);
1214 }
1215 }
1216
shadeSpan16(int x,int y,uint16_t * SK_RESTRICT dstC,int count)1217 void Linear_Gradient::shadeSpan16(int x, int y,
1218 uint16_t* SK_RESTRICT dstC, int count) {
1219 SkASSERT(count > 0);
1220
1221 SkPoint srcPt;
1222 SkMatrix::MapXYProc dstProc = fDstToIndexProc;
1223 TileProc proc = fTileProc;
1224 const uint16_t* SK_RESTRICT cache = this->getCache16();
1225 int toggle = ((x ^ y) & 1) * kDitherStride16;
1226
1227 if (fDstToIndexClass != kPerspective_MatrixClass) {
1228 dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
1229 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
1230 SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
1231
1232 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
1233 SkFixed dxStorage[1];
1234 (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
1235 dx = dxStorage[0];
1236 } else {
1237 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
1238 dx = SkScalarToFixed(fDstToIndex.getScaleX());
1239 }
1240
1241 LinearShade16Proc shadeProc = shadeSpan16_linear_repeat;
1242 if (SkFixedNearlyZero(dx)) {
1243 shadeProc = shadeSpan16_linear_vertical;
1244 } else if (proc == clamp_tileproc) {
1245 shadeProc = shadeSpan16_linear_clamp;
1246 } else if (proc == mirror_tileproc) {
1247 shadeProc = shadeSpan16_linear_mirror;
1248 } else {
1249 SkASSERT(proc == repeat_tileproc);
1250 }
1251 (*shadeProc)(proc, dx, fx, dstC, cache, toggle, count);
1252 } else {
1253 SkScalar dstX = SkIntToScalar(x);
1254 SkScalar dstY = SkIntToScalar(y);
1255 do {
1256 dstProc(fDstToIndex, dstX, dstY, &srcPt);
1257 unsigned fi = proc(SkScalarToFixed(srcPt.fX));
1258 SkASSERT(fi <= 0xFFFF);
1259
1260 int index = fi >> kCache16Shift;
1261 *dstC++ = cache[toggle + index];
1262 toggle ^= Gradient_Shader::kDitherStride16;
1263
1264 dstX += SK_Scalar1;
1265 } while (--count != 0);
1266 }
1267 }
1268
1269 ///////////////////////////////////////////////////////////////////////////////
1270
1271 #define kSQRT_TABLE_BITS 11
1272 #define kSQRT_TABLE_SIZE (1 << kSQRT_TABLE_BITS)
1273
1274 #include "SkRadialGradient_Table.h"
1275
1276 #if defined(SK_BUILD_FOR_WIN32) && defined(SK_DEBUG)
1277
1278 #include <stdio.h>
1279
SkRadialGradient_BuildTable()1280 void SkRadialGradient_BuildTable() {
1281 // build it 0..127 x 0..127, so we use 2^15 - 1 in the numerator for our "fixed" table
1282
1283 FILE* file = ::fopen("SkRadialGradient_Table.h", "w");
1284 SkASSERT(file);
1285 ::fprintf(file, "static const uint8_t gSqrt8Table[] = {\n");
1286
1287 for (int i = 0; i < kSQRT_TABLE_SIZE; i++) {
1288 if ((i & 15) == 0) {
1289 ::fprintf(file, "\t");
1290 }
1291
1292 uint8_t value = SkToU8(SkFixedSqrt(i * SK_Fixed1 / kSQRT_TABLE_SIZE) >> 8);
1293
1294 ::fprintf(file, "0x%02X", value);
1295 if (i < kSQRT_TABLE_SIZE-1) {
1296 ::fprintf(file, ", ");
1297 }
1298 if ((i & 15) == 15) {
1299 ::fprintf(file, "\n");
1300 }
1301 }
1302 ::fprintf(file, "};\n");
1303 ::fclose(file);
1304 }
1305
1306 #endif
1307
1308
rad_to_unit_matrix(const SkPoint & center,SkScalar radius,SkMatrix * matrix)1309 static void rad_to_unit_matrix(const SkPoint& center, SkScalar radius,
1310 SkMatrix* matrix) {
1311 SkScalar inv = SkScalarInvert(radius);
1312
1313 matrix->setTranslate(-center.fX, -center.fY);
1314 matrix->postScale(inv, inv);
1315 }
1316
1317
1318 namespace {
1319
1320 typedef void (* RadialShade16Proc)(SkScalar sfx, SkScalar sdx,
1321 SkScalar sfy, SkScalar sdy,
1322 uint16_t* dstC, const uint16_t* cache,
1323 int toggle, int count);
1324
shadeSpan16_radial_clamp(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1325 void shadeSpan16_radial_clamp(SkScalar sfx, SkScalar sdx,
1326 SkScalar sfy, SkScalar sdy,
1327 uint16_t* SK_RESTRICT dstC, const uint16_t* SK_RESTRICT cache,
1328 int toggle, int count) {
1329 const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
1330
1331 /* knock these down so we can pin against +- 0x7FFF, which is an
1332 immediate load, rather than 0xFFFF which is slower. This is a
1333 compromise, since it reduces our precision, but that appears
1334 to be visually OK. If we decide this is OK for all of our cases,
1335 we could (it seems) put this scale-down into fDstToIndex,
1336 to avoid having to do these extra shifts each time.
1337 */
1338 SkFixed fx = SkScalarToFixed(sfx) >> 1;
1339 SkFixed dx = SkScalarToFixed(sdx) >> 1;
1340 SkFixed fy = SkScalarToFixed(sfy) >> 1;
1341 SkFixed dy = SkScalarToFixed(sdy) >> 1;
1342 // might perform this check for the other modes,
1343 // but the win will be a smaller % of the total
1344 if (dy == 0) {
1345 fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
1346 fy *= fy;
1347 do {
1348 unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
1349 unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
1350 fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
1351 fx += dx;
1352 *dstC++ = cache[toggle +
1353 (sqrt_table[fi] >> Gradient_Shader::kSqrt16Shift)];
1354 toggle ^= Gradient_Shader::kDitherStride16;
1355 } while (--count != 0);
1356 } else {
1357 do {
1358 unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
1359 unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
1360 fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
1361 fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
1362 fx += dx;
1363 fy += dy;
1364 *dstC++ = cache[toggle +
1365 (sqrt_table[fi] >> Gradient_Shader::kSqrt16Shift)];
1366 toggle ^= Gradient_Shader::kDitherStride16;
1367 } while (--count != 0);
1368 }
1369 }
1370
shadeSpan16_radial_mirror(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1371 void shadeSpan16_radial_mirror(SkScalar sfx, SkScalar sdx,
1372 SkScalar sfy, SkScalar sdy,
1373 uint16_t* SK_RESTRICT dstC, const uint16_t* SK_RESTRICT cache,
1374 int toggle, int count) {
1375 do {
1376 #ifdef SK_SCALAR_IS_FLOAT
1377 float fdist = sk_float_sqrt(sfx*sfx + sfy*sfy);
1378 SkFixed dist = SkFloatToFixed(fdist);
1379 #else
1380 SkFixed magnitudeSquared = SkFixedSquare(sfx) +
1381 SkFixedSquare(sfy);
1382 if (magnitudeSquared < 0) // Overflow.
1383 magnitudeSquared = SK_FixedMax;
1384 SkFixed dist = SkFixedSqrt(magnitudeSquared);
1385 #endif
1386 unsigned fi = mirror_tileproc(dist);
1387 SkASSERT(fi <= 0xFFFF);
1388 *dstC++ = cache[toggle + (fi >> Gradient_Shader::kCache16Shift)];
1389 toggle ^= Gradient_Shader::kDitherStride16;
1390 sfx += sdx;
1391 sfy += sdy;
1392 } while (--count != 0);
1393 }
1394
shadeSpan16_radial_repeat(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,uint16_t * SK_RESTRICT dstC,const uint16_t * SK_RESTRICT cache,int toggle,int count)1395 void shadeSpan16_radial_repeat(SkScalar sfx, SkScalar sdx,
1396 SkScalar sfy, SkScalar sdy,
1397 uint16_t* SK_RESTRICT dstC, const uint16_t* SK_RESTRICT cache,
1398 int toggle, int count) {
1399 SkFixed fx = SkScalarToFixed(sfx);
1400 SkFixed dx = SkScalarToFixed(sdx);
1401 SkFixed fy = SkScalarToFixed(sfy);
1402 SkFixed dy = SkScalarToFixed(sdy);
1403 do {
1404 SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
1405 unsigned fi = repeat_tileproc(dist);
1406 SkASSERT(fi <= 0xFFFF);
1407 fx += dx;
1408 fy += dy;
1409 *dstC++ = cache[toggle + (fi >> Gradient_Shader::kCache16Shift)];
1410 toggle ^= Gradient_Shader::kDitherStride16;
1411 } while (--count != 0);
1412 }
1413
1414 }
1415
1416 class Radial_Gradient : public Gradient_Shader {
1417 public:
Radial_Gradient(const SkPoint & center,SkScalar radius,const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)1418 Radial_Gradient(const SkPoint& center, SkScalar radius,
1419 const SkColor colors[], const SkScalar pos[], int colorCount,
1420 SkShader::TileMode mode, SkUnitMapper* mapper)
1421 : Gradient_Shader(colors, pos, colorCount, mode, mapper),
1422 fCenter(center),
1423 fRadius(radius)
1424 {
1425 // make sure our table is insync with our current #define for kSQRT_TABLE_SIZE
1426 SkASSERT(sizeof(gSqrt8Table) == kSQRT_TABLE_SIZE);
1427
1428 rad_to_unit_matrix(center, radius, &fPtsToUnit);
1429 }
1430
1431 virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count)
1432 SK_OVERRIDE;
shadeSpan16(int x,int y,uint16_t * SK_RESTRICT dstC,int count)1433 virtual void shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC,
1434 int count) SK_OVERRIDE {
1435 SkASSERT(count > 0);
1436
1437 SkPoint srcPt;
1438 SkMatrix::MapXYProc dstProc = fDstToIndexProc;
1439 TileProc proc = fTileProc;
1440 const uint16_t* SK_RESTRICT cache = this->getCache16();
1441 int toggle = ((x ^ y) & 1) * kDitherStride16;
1442
1443 if (fDstToIndexClass != kPerspective_MatrixClass) {
1444 dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
1445 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
1446
1447 SkScalar sdx = fDstToIndex.getScaleX();
1448 SkScalar sdy = fDstToIndex.getSkewY();
1449
1450 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
1451 SkFixed storage[2];
1452 (void)fDstToIndex.fixedStepInX(SkIntToScalar(y),
1453 &storage[0], &storage[1]);
1454 sdx = SkFixedToScalar(storage[0]);
1455 sdy = SkFixedToScalar(storage[1]);
1456 } else {
1457 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
1458 }
1459
1460 RadialShade16Proc shadeProc = shadeSpan16_radial_repeat;
1461 if (proc == clamp_tileproc) {
1462 shadeProc = shadeSpan16_radial_clamp;
1463 } else if (proc == mirror_tileproc) {
1464 shadeProc = shadeSpan16_radial_mirror;
1465 } else {
1466 SkASSERT(proc == repeat_tileproc);
1467 }
1468 (*shadeProc)(srcPt.fX, sdx, srcPt.fY, sdy, dstC,
1469 cache, toggle, count);
1470 } else { // perspective case
1471 SkScalar dstX = SkIntToScalar(x);
1472 SkScalar dstY = SkIntToScalar(y);
1473 do {
1474 dstProc(fDstToIndex, dstX, dstY, &srcPt);
1475 unsigned fi = proc(SkScalarToFixed(srcPt.length()));
1476 SkASSERT(fi <= 0xFFFF);
1477
1478 int index = fi >> (16 - kCache16Bits);
1479 *dstC++ = cache[toggle + index];
1480 toggle ^= kDitherStride16;
1481
1482 dstX += SK_Scalar1;
1483 } while (--count != 0);
1484 }
1485 }
1486
asABitmap(SkBitmap * bitmap,SkMatrix * matrix,TileMode * xy,SkScalar * twoPointRadialParams) const1487 virtual BitmapType asABitmap(SkBitmap* bitmap,
1488 SkMatrix* matrix,
1489 TileMode* xy,
1490 SkScalar* twoPointRadialParams)
1491 const SK_OVERRIDE {
1492 if (bitmap) {
1493 this->commonAsABitmap(bitmap);
1494 }
1495 if (matrix) {
1496 matrix->setScale(SkIntToScalar(kGradient32Length),
1497 SkIntToScalar(kGradient32Length));
1498 matrix->preConcat(fPtsToUnit);
1499 }
1500 if (xy) {
1501 xy[0] = fTileMode;
1502 xy[1] = kClamp_TileMode;
1503 }
1504 return kRadial_BitmapType;
1505 }
asAGradient(GradientInfo * info) const1506 virtual GradientType asAGradient(GradientInfo* info) const SK_OVERRIDE {
1507 if (info) {
1508 commonAsAGradient(info);
1509 info->fPoint[0] = fCenter;
1510 info->fRadius[0] = fRadius;
1511 }
1512 return kRadial_GradientType;
1513 }
1514
CreateProc(SkFlattenableReadBuffer & buffer)1515 static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
1516 return SkNEW_ARGS(Radial_Gradient, (buffer));
1517 }
1518
flatten(SkFlattenableWriteBuffer & buffer)1519 virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE {
1520 this->INHERITED::flatten(buffer);
1521 buffer.writeScalar(fCenter.fX);
1522 buffer.writeScalar(fCenter.fY);
1523 buffer.writeScalar(fRadius);
1524 }
1525
1526 protected:
Radial_Gradient(SkFlattenableReadBuffer & buffer)1527 Radial_Gradient(SkFlattenableReadBuffer& buffer)
1528 : Gradient_Shader(buffer),
1529 fCenter(unflatten_point(buffer)),
1530 fRadius(buffer.readScalar()) {
1531 }
getFactory()1532 virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
1533
1534 private:
1535 typedef Gradient_Shader INHERITED;
1536 const SkPoint fCenter;
1537 const SkScalar fRadius;
1538 };
1539
1540 namespace {
1541
radial_completely_pinned(int fx,int dx,int fy,int dy)1542 inline bool radial_completely_pinned(int fx, int dx, int fy, int dy) {
1543 // fast, overly-conservative test: checks unit square instead
1544 // of unit circle
1545 bool xClamped = (fx >= SK_FixedHalf && dx >= 0) ||
1546 (fx <= -SK_FixedHalf && dx <= 0);
1547 bool yClamped = (fy >= SK_FixedHalf && dy >= 0) ||
1548 (fy <= -SK_FixedHalf && dy <= 0);
1549
1550 return xClamped || yClamped;
1551 }
1552
1553 // Return true if (fx * fy) is always inside the unit circle
1554 // SkPin32 is expensive, but so are all the SkFixedMul in this test,
1555 // so it shouldn't be run if count is small.
no_need_for_radial_pin(int fx,int dx,int fy,int dy,int count)1556 inline bool no_need_for_radial_pin(int fx, int dx,
1557 int fy, int dy, int count) {
1558 SkASSERT(count > 0);
1559 if (SkAbs32(fx) > 0x7FFF || SkAbs32(fy) > 0x7FFF) {
1560 return false;
1561 }
1562 if (fx*fx + fy*fy > 0x7FFF*0x7FFF) {
1563 return false;
1564 }
1565 fx += (count - 1) * dx;
1566 fy += (count - 1) * dy;
1567 if (SkAbs32(fx) > 0x7FFF || SkAbs32(fy) > 0x7FFF) {
1568 return false;
1569 }
1570 return fx*fx + fy*fy <= 0x7FFF*0x7FFF;
1571 }
1572
1573 #define UNPINNED_RADIAL_STEP \
1574 fi = (fx * fx + fy * fy) >> (14 + 16 - kSQRT_TABLE_BITS); \
1575 *dstC++ = cache[toggle + \
1576 (sqrt_table[fi] >> Gradient_Shader::kSqrt32Shift)]; \
1577 toggle ^= Gradient_Shader::kDitherStride32; \
1578 fx += dx; \
1579 fy += dy;
1580
1581 typedef void (* RadialShadeProc)(SkScalar sfx, SkScalar sdx,
1582 SkScalar sfy, SkScalar sdy,
1583 SkPMColor* dstC, const SkPMColor* cache,
1584 int count, int toggle);
1585
1586 // On Linux, this is faster with SkPMColor[] params than SkPMColor* SK_RESTRICT
shadeSpan_radial_clamp(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count,int toggle)1587 void shadeSpan_radial_clamp(SkScalar sfx, SkScalar sdx,
1588 SkScalar sfy, SkScalar sdy,
1589 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1590 int count, int toggle) {
1591 // Floating point seems to be slower than fixed point,
1592 // even when we have float hardware.
1593 const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
1594 SkFixed fx = SkScalarToFixed(sfx) >> 1;
1595 SkFixed dx = SkScalarToFixed(sdx) >> 1;
1596 SkFixed fy = SkScalarToFixed(sfy) >> 1;
1597 SkFixed dy = SkScalarToFixed(sdy) >> 1;
1598 if ((count > 4) && radial_completely_pinned(fx, dx, fy, dy)) {
1599 unsigned fi = Gradient_Shader::kGradient32Length;
1600 sk_memset32_dither(dstC,
1601 cache[toggle + fi],
1602 cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi],
1603 count);
1604 } else if ((count > 4) &&
1605 no_need_for_radial_pin(fx, dx, fy, dy, count)) {
1606 unsigned fi;
1607 // 4x unroll appears to be no faster than 2x unroll on Linux
1608 while (count > 1) {
1609 UNPINNED_RADIAL_STEP;
1610 UNPINNED_RADIAL_STEP;
1611 count -= 2;
1612 }
1613 if (count) {
1614 UNPINNED_RADIAL_STEP;
1615 }
1616 }
1617 else {
1618 // Specializing for dy == 0 gains us 25% on Skia benchmarks
1619 if (dy == 0) {
1620 unsigned yy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
1621 yy *= yy;
1622 do {
1623 unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
1624 unsigned fi = (xx * xx + yy) >> (14 + 16 - kSQRT_TABLE_BITS);
1625 fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
1626 *dstC++ = cache[toggle + (sqrt_table[fi] >>
1627 Gradient_Shader::kSqrt32Shift)];
1628 toggle ^= Gradient_Shader::kDitherStride32;
1629 fx += dx;
1630 } while (--count != 0);
1631 } else {
1632 do {
1633 unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
1634 unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
1635 fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
1636 fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
1637 *dstC++ = cache[toggle + (sqrt_table[fi] >>
1638 Gradient_Shader::kSqrt32Shift)];
1639 toggle ^= Gradient_Shader::kDitherStride32;
1640 fx += dx;
1641 fy += dy;
1642 } while (--count != 0);
1643 }
1644 }
1645 }
1646
1647 // Unrolling this loop doesn't seem to help (when float); we're stalling to
1648 // get the results of the sqrt (?), and don't have enough extra registers to
1649 // have many in flight.
shadeSpan_radial_mirror(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count,int toggle)1650 void shadeSpan_radial_mirror(SkScalar sfx, SkScalar sdx,
1651 SkScalar sfy, SkScalar sdy,
1652 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1653 int count, int toggle) {
1654 do {
1655 #ifdef SK_SCALAR_IS_FLOAT
1656 float fdist = sk_float_sqrt(sfx*sfx + sfy*sfy);
1657 SkFixed dist = SkFloatToFixed(fdist);
1658 #else
1659 SkFixed magnitudeSquared = SkFixedSquare(sfx) +
1660 SkFixedSquare(sfy);
1661 if (magnitudeSquared < 0) // Overflow.
1662 magnitudeSquared = SK_FixedMax;
1663 SkFixed dist = SkFixedSqrt(magnitudeSquared);
1664 #endif
1665 unsigned fi = mirror_tileproc(dist);
1666 SkASSERT(fi <= 0xFFFF);
1667 *dstC++ = cache[toggle + (fi >> Gradient_Shader::kCache32Shift)];
1668 toggle ^= Gradient_Shader::kDitherStride32;
1669 sfx += sdx;
1670 sfy += sdy;
1671 } while (--count != 0);
1672 }
1673
shadeSpan_radial_repeat(SkScalar sfx,SkScalar sdx,SkScalar sfy,SkScalar sdy,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count,int toggle)1674 void shadeSpan_radial_repeat(SkScalar sfx, SkScalar sdx,
1675 SkScalar sfy, SkScalar sdy,
1676 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1677 int count, int toggle) {
1678 SkFixed fx = SkScalarToFixed(sfx);
1679 SkFixed dx = SkScalarToFixed(sdx);
1680 SkFixed fy = SkScalarToFixed(sfy);
1681 SkFixed dy = SkScalarToFixed(sdy);
1682 do {
1683 SkFixed magnitudeSquared = SkFixedSquare(fx) +
1684 SkFixedSquare(fy);
1685 if (magnitudeSquared < 0) // Overflow.
1686 magnitudeSquared = SK_FixedMax;
1687 SkFixed dist = SkFixedSqrt(magnitudeSquared);
1688 unsigned fi = repeat_tileproc(dist);
1689 SkASSERT(fi <= 0xFFFF);
1690 *dstC++ = cache[toggle + (fi >> Gradient_Shader::kCache32Shift)];
1691 toggle ^= Gradient_Shader::kDitherStride32;
1692 fx += dx;
1693 fy += dy;
1694 } while (--count != 0);
1695 }
1696 }
1697
shadeSpan(int x,int y,SkPMColor * SK_RESTRICT dstC,int count)1698 void Radial_Gradient::shadeSpan(int x, int y,
1699 SkPMColor* SK_RESTRICT dstC, int count) {
1700 SkASSERT(count > 0);
1701
1702 SkPoint srcPt;
1703 SkMatrix::MapXYProc dstProc = fDstToIndexProc;
1704 TileProc proc = fTileProc;
1705 const SkPMColor* SK_RESTRICT cache = this->getCache32();
1706 #ifdef USE_DITHER_32BIT_GRADIENT
1707 int toggle = ((x ^ y) & 1) * Gradient_Shader::kDitherStride32;
1708 #else
1709 int toggle = 0;
1710 #endif
1711
1712 if (fDstToIndexClass != kPerspective_MatrixClass) {
1713 dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
1714 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
1715 SkScalar sdx = fDstToIndex.getScaleX();
1716 SkScalar sdy = fDstToIndex.getSkewY();
1717
1718 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
1719 SkFixed storage[2];
1720 (void)fDstToIndex.fixedStepInX(SkIntToScalar(y),
1721 &storage[0], &storage[1]);
1722 sdx = SkFixedToScalar(storage[0]);
1723 sdy = SkFixedToScalar(storage[1]);
1724 } else {
1725 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
1726 }
1727
1728 RadialShadeProc shadeProc = shadeSpan_radial_repeat;
1729 if (proc == clamp_tileproc) {
1730 shadeProc = shadeSpan_radial_clamp;
1731 } else if (proc == mirror_tileproc) {
1732 shadeProc = shadeSpan_radial_mirror;
1733 } else {
1734 SkASSERT(proc == repeat_tileproc);
1735 }
1736 (*shadeProc)(srcPt.fX, sdx, srcPt.fY, sdy, dstC, cache, count, toggle);
1737 } else { // perspective case
1738 SkScalar dstX = SkIntToScalar(x);
1739 SkScalar dstY = SkIntToScalar(y);
1740 do {
1741 dstProc(fDstToIndex, dstX, dstY, &srcPt);
1742 unsigned fi = proc(SkScalarToFixed(srcPt.length()));
1743 SkASSERT(fi <= 0xFFFF);
1744 *dstC++ = cache[fi >> Gradient_Shader::kCache32Shift];
1745 dstX += SK_Scalar1;
1746 } while (--count != 0);
1747 }
1748 }
1749
1750 /* Two-point radial gradients are specified by two circles, each with a center
1751 point and radius. The gradient can be considered to be a series of
1752 concentric circles, with the color interpolated from the start circle
1753 (at t=0) to the end circle (at t=1).
1754
1755 For each point (x, y) in the span, we want to find the
1756 interpolated circle that intersects that point. The center
1757 of the desired circle (Cx, Cy) falls at some distance t
1758 along the line segment between the start point (Sx, Sy) and
1759 end point (Ex, Ey):
1760
1761 Cx = (1 - t) * Sx + t * Ex (0 <= t <= 1)
1762 Cy = (1 - t) * Sy + t * Ey
1763
1764 The radius of the desired circle (r) is also a linear interpolation t
1765 between the start and end radii (Sr and Er):
1766
1767 r = (1 - t) * Sr + t * Er
1768
1769 But
1770
1771 (x - Cx)^2 + (y - Cy)^2 = r^2
1772
1773 so
1774
1775 (x - ((1 - t) * Sx + t * Ex))^2
1776 + (y - ((1 - t) * Sy + t * Ey))^2
1777 = ((1 - t) * Sr + t * Er)^2
1778
1779 Solving for t yields
1780
1781 [(Sx - Ex)^2 + (Sy - Ey)^2 - (Er - Sr)^2)] * t^2
1782 + [2 * (Sx - Ex)(x - Sx) + 2 * (Sy - Ey)(y - Sy) - 2 * (Er - Sr) * Sr] * t
1783 + [(x - Sx)^2 + (y - Sy)^2 - Sr^2] = 0
1784
1785 To simplify, let Dx = Sx - Ex, Dy = Sy - Ey, Dr = Er - Sr, dx = x - Sx, dy = y - Sy
1786
1787 [Dx^2 + Dy^2 - Dr^2)] * t^2
1788 + 2 * [Dx * dx + Dy * dy - Dr * Sr] * t
1789 + [dx^2 + dy^2 - Sr^2] = 0
1790
1791 A quadratic in t. The two roots of the quadratic reflect the two
1792 possible circles on which the point may fall. Solving for t yields
1793 the gradient value to use.
1794
1795 If a<0, the start circle is entirely contained in the
1796 end circle, and one of the roots will be <0 or >1 (off the line
1797 segment). If a>0, the start circle falls at least partially
1798 outside the end circle (or vice versa), and the gradient
1799 defines a "tube" where a point may be on one circle (on the
1800 inside of the tube) or the other (outside of the tube). We choose
1801 one arbitrarily.
1802
1803 In order to keep the math to within the limits of fixed point,
1804 we divide the entire quadratic by Dr^2, and replace
1805 (x - Sx)/Dr with x' and (y - Sy)/Dr with y', giving
1806
1807 [Dx^2 / Dr^2 + Dy^2 / Dr^2 - 1)] * t^2
1808 + 2 * [x' * Dx / Dr + y' * Dy / Dr - Sr / Dr] * t
1809 + [x'^2 + y'^2 - Sr^2/Dr^2] = 0
1810
1811 (x' and y' are computed by appending the subtract and scale to the
1812 fDstToIndex matrix in the constructor).
1813
1814 Since the 'A' component of the quadratic is independent of x' and y', it
1815 is precomputed in the constructor. Since the 'B' component is linear in
1816 x' and y', if x and y are linear in the span, 'B' can be computed
1817 incrementally with a simple delta (db below). If it is not (e.g.,
1818 a perspective projection), it must be computed in the loop.
1819
1820 */
1821
1822 namespace {
1823
two_point_radial(SkScalar b,SkScalar fx,SkScalar fy,SkScalar sr2d2,SkScalar foura,SkScalar oneOverTwoA,bool posRoot)1824 inline SkFixed two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
1825 SkScalar sr2d2, SkScalar foura,
1826 SkScalar oneOverTwoA, bool posRoot) {
1827 SkScalar c = SkScalarSquare(fx) + SkScalarSquare(fy) - sr2d2;
1828 if (0 == foura) {
1829 return SkScalarToFixed(SkScalarDiv(-c, b));
1830 }
1831
1832 SkScalar discrim = SkScalarSquare(b) - SkScalarMul(foura, c);
1833 if (discrim < 0) {
1834 discrim = -discrim;
1835 }
1836 SkScalar rootDiscrim = SkScalarSqrt(discrim);
1837 SkScalar result;
1838 if (posRoot) {
1839 result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
1840 } else {
1841 result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
1842 }
1843 return SkScalarToFixed(result);
1844 }
1845
1846 typedef void (* TwoPointRadialShadeProc)(SkScalar fx, SkScalar dx,
1847 SkScalar fy, SkScalar dy,
1848 SkScalar b, SkScalar db,
1849 SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
1850 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1851 int count);
1852
shadeSpan_twopoint_clamp(SkScalar fx,SkScalar dx,SkScalar fy,SkScalar dy,SkScalar b,SkScalar db,SkScalar fSr2D2,SkScalar foura,SkScalar fOneOverTwoA,bool posRoot,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count)1853 void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
1854 SkScalar fy, SkScalar dy,
1855 SkScalar b, SkScalar db,
1856 SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
1857 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1858 int count) {
1859 for (; count > 0; --count) {
1860 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
1861 fOneOverTwoA, posRoot);
1862 SkFixed index = SkClampMax(t, 0xFFFF);
1863 SkASSERT(index <= 0xFFFF);
1864 *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
1865 fx += dx;
1866 fy += dy;
1867 b += db;
1868 }
1869 }
shadeSpan_twopoint_mirror(SkScalar fx,SkScalar dx,SkScalar fy,SkScalar dy,SkScalar b,SkScalar db,SkScalar fSr2D2,SkScalar foura,SkScalar fOneOverTwoA,bool posRoot,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count)1870 void shadeSpan_twopoint_mirror(SkScalar fx, SkScalar dx,
1871 SkScalar fy, SkScalar dy,
1872 SkScalar b, SkScalar db,
1873 SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
1874 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1875 int count) {
1876 for (; count > 0; --count) {
1877 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
1878 fOneOverTwoA, posRoot);
1879 SkFixed index = mirror_tileproc(t);
1880 SkASSERT(index <= 0xFFFF);
1881 *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
1882 fx += dx;
1883 fy += dy;
1884 b += db;
1885 }
1886 }
1887
shadeSpan_twopoint_repeat(SkScalar fx,SkScalar dx,SkScalar fy,SkScalar dy,SkScalar b,SkScalar db,SkScalar fSr2D2,SkScalar foura,SkScalar fOneOverTwoA,bool posRoot,SkPMColor * SK_RESTRICT dstC,const SkPMColor * SK_RESTRICT cache,int count)1888 void shadeSpan_twopoint_repeat(SkScalar fx, SkScalar dx,
1889 SkScalar fy, SkScalar dy,
1890 SkScalar b, SkScalar db,
1891 SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
1892 SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
1893 int count) {
1894 for (; count > 0; --count) {
1895 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
1896 fOneOverTwoA, posRoot);
1897 SkFixed index = repeat_tileproc(t);
1898 SkASSERT(index <= 0xFFFF);
1899 *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
1900 fx += dx;
1901 fy += dy;
1902 b += db;
1903 }
1904 }
1905
1906
1907
1908 }
1909
1910 class Two_Point_Radial_Gradient : public Gradient_Shader {
1911 public:
Two_Point_Radial_Gradient(const SkPoint & start,SkScalar startRadius,const SkPoint & end,SkScalar endRadius,const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)1912 Two_Point_Radial_Gradient(const SkPoint& start, SkScalar startRadius,
1913 const SkPoint& end, SkScalar endRadius,
1914 const SkColor colors[], const SkScalar pos[],
1915 int colorCount, SkShader::TileMode mode,
1916 SkUnitMapper* mapper)
1917 : Gradient_Shader(colors, pos, colorCount, mode, mapper),
1918 fCenter1(start),
1919 fCenter2(end),
1920 fRadius1(startRadius),
1921 fRadius2(endRadius) {
1922 init();
1923 }
1924
asABitmap(SkBitmap * bitmap,SkMatrix * matrix,TileMode * xy,SkScalar * twoPointRadialParams) const1925 virtual BitmapType asABitmap(SkBitmap* bitmap,
1926 SkMatrix* matrix,
1927 TileMode* xy,
1928 SkScalar* twoPointRadialParams) const {
1929 if (bitmap) {
1930 this->commonAsABitmap(bitmap);
1931 }
1932 SkScalar diffL = 0; // just to avoid gcc warning
1933 if (matrix || twoPointRadialParams) {
1934 diffL = SkScalarSqrt(SkScalarSquare(fDiff.fX) +
1935 SkScalarSquare(fDiff.fY));
1936 }
1937 if (matrix) {
1938 if (diffL) {
1939 SkScalar invDiffL = SkScalarInvert(diffL);
1940 matrix->setSinCos(-SkScalarMul(invDiffL, fDiff.fY),
1941 SkScalarMul(invDiffL, fDiff.fX));
1942 } else {
1943 matrix->reset();
1944 }
1945 matrix->preConcat(fPtsToUnit);
1946 }
1947 if (xy) {
1948 xy[0] = fTileMode;
1949 xy[1] = kClamp_TileMode;
1950 }
1951 if (NULL != twoPointRadialParams) {
1952 twoPointRadialParams[0] = diffL;
1953 twoPointRadialParams[1] = fStartRadius;
1954 twoPointRadialParams[2] = fDiffRadius;
1955 }
1956 return kTwoPointRadial_BitmapType;
1957 }
1958
asAGradient(GradientInfo * info) const1959 virtual GradientType asAGradient(GradientInfo* info) const SK_OVERRIDE {
1960 if (info) {
1961 commonAsAGradient(info);
1962 info->fPoint[0] = fCenter1;
1963 info->fPoint[1] = fCenter2;
1964 info->fRadius[0] = fRadius1;
1965 info->fRadius[1] = fRadius2;
1966 }
1967 return kRadial2_GradientType;
1968 }
1969
shadeSpan(int x,int y,SkPMColor * SK_RESTRICT dstC,int count)1970 virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC,
1971 int count) SK_OVERRIDE {
1972 SkASSERT(count > 0);
1973
1974 // Zero difference between radii: fill with transparent black.
1975 if (fDiffRadius == 0) {
1976 sk_bzero(dstC, count * sizeof(*dstC));
1977 return;
1978 }
1979 SkMatrix::MapXYProc dstProc = fDstToIndexProc;
1980 TileProc proc = fTileProc;
1981 const SkPMColor* SK_RESTRICT cache = this->getCache32();
1982
1983 SkScalar foura = fA * 4;
1984 bool posRoot = fDiffRadius < 0;
1985 if (fDstToIndexClass != kPerspective_MatrixClass) {
1986 SkPoint srcPt;
1987 dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
1988 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
1989 SkScalar dx, fx = srcPt.fX;
1990 SkScalar dy, fy = srcPt.fY;
1991
1992 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
1993 SkFixed fixedX, fixedY;
1994 (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
1995 dx = SkFixedToScalar(fixedX);
1996 dy = SkFixedToScalar(fixedY);
1997 } else {
1998 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
1999 dx = fDstToIndex.getScaleX();
2000 dy = fDstToIndex.getSkewY();
2001 }
2002 SkScalar b = (SkScalarMul(fDiff.fX, fx) +
2003 SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
2004 SkScalar db = (SkScalarMul(fDiff.fX, dx) +
2005 SkScalarMul(fDiff.fY, dy)) * 2;
2006
2007 TwoPointRadialShadeProc shadeProc = shadeSpan_twopoint_repeat;
2008 if (proc == clamp_tileproc) {
2009 shadeProc = shadeSpan_twopoint_clamp;
2010 } else if (proc == mirror_tileproc) {
2011 shadeProc = shadeSpan_twopoint_mirror;
2012 } else {
2013 SkASSERT(proc == repeat_tileproc);
2014 }
2015 (*shadeProc)(fx, dx, fy, dy, b, db,
2016 fSr2D2, foura, fOneOverTwoA, posRoot,
2017 dstC, cache, count);
2018 } else { // perspective case
2019 SkScalar dstX = SkIntToScalar(x);
2020 SkScalar dstY = SkIntToScalar(y);
2021 for (; count > 0; --count) {
2022 SkPoint srcPt;
2023 dstProc(fDstToIndex, dstX, dstY, &srcPt);
2024 SkScalar fx = srcPt.fX;
2025 SkScalar fy = srcPt.fY;
2026 SkScalar b = (SkScalarMul(fDiff.fX, fx) +
2027 SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
2028 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
2029 fOneOverTwoA, posRoot);
2030 SkFixed index = proc(t);
2031 SkASSERT(index <= 0xFFFF);
2032 *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
2033 dstX += SK_Scalar1;
2034 }
2035 }
2036 }
2037
setContext(const SkBitmap & device,const SkPaint & paint,const SkMatrix & matrix)2038 virtual bool setContext(const SkBitmap& device,
2039 const SkPaint& paint,
2040 const SkMatrix& matrix) SK_OVERRIDE {
2041 if (!this->INHERITED::setContext(device, paint, matrix)) {
2042 return false;
2043 }
2044
2045 // we don't have a span16 proc
2046 fFlags &= ~kHasSpan16_Flag;
2047 return true;
2048 }
2049
CreateProc(SkFlattenableReadBuffer & buffer)2050 static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
2051 return SkNEW_ARGS(Two_Point_Radial_Gradient, (buffer));
2052 }
2053
flatten(SkFlattenableWriteBuffer & buffer)2054 virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE {
2055 this->INHERITED::flatten(buffer);
2056 buffer.writeScalar(fCenter1.fX);
2057 buffer.writeScalar(fCenter1.fY);
2058 buffer.writeScalar(fCenter2.fX);
2059 buffer.writeScalar(fCenter2.fY);
2060 buffer.writeScalar(fRadius1);
2061 buffer.writeScalar(fRadius2);
2062 }
2063
2064 protected:
Two_Point_Radial_Gradient(SkFlattenableReadBuffer & buffer)2065 Two_Point_Radial_Gradient(SkFlattenableReadBuffer& buffer)
2066 : Gradient_Shader(buffer),
2067 fCenter1(unflatten_point(buffer)),
2068 fCenter2(unflatten_point(buffer)),
2069 fRadius1(buffer.readScalar()),
2070 fRadius2(buffer.readScalar()) {
2071 init();
2072 };
getFactory()2073 virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
2074
2075 private:
2076 typedef Gradient_Shader INHERITED;
2077 const SkPoint fCenter1;
2078 const SkPoint fCenter2;
2079 const SkScalar fRadius1;
2080 const SkScalar fRadius2;
2081 SkPoint fDiff;
2082 SkScalar fStartRadius, fDiffRadius, fSr2D2, fA, fOneOverTwoA;
2083
init()2084 void init() {
2085 fDiff = fCenter1 - fCenter2;
2086 fDiffRadius = fRadius2 - fRadius1;
2087 SkScalar inv = SkScalarInvert(fDiffRadius);
2088 fDiff.fX = SkScalarMul(fDiff.fX, inv);
2089 fDiff.fY = SkScalarMul(fDiff.fY, inv);
2090 fStartRadius = SkScalarMul(fRadius1, inv);
2091 fSr2D2 = SkScalarSquare(fStartRadius);
2092 fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
2093 fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
2094
2095 fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
2096 fPtsToUnit.postScale(inv, inv);
2097 }
2098 };
2099
2100 ///////////////////////////////////////////////////////////////////////////////
2101
2102 class Sweep_Gradient : public Gradient_Shader {
2103 public:
Sweep_Gradient(SkScalar cx,SkScalar cy,const SkColor colors[],const SkScalar pos[],int count,SkUnitMapper * mapper)2104 Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
2105 const SkScalar pos[], int count, SkUnitMapper* mapper)
2106 : Gradient_Shader(colors, pos, count, SkShader::kClamp_TileMode, mapper),
2107 fCenter(SkPoint::Make(cx, cy))
2108 {
2109 fPtsToUnit.setTranslate(-cx, -cy);
2110 }
2111 virtual void shadeSpan(int x, int y, SkPMColor dstC[], int count) SK_OVERRIDE;
2112 virtual void shadeSpan16(int x, int y, uint16_t dstC[], int count) SK_OVERRIDE;
2113
asABitmap(SkBitmap * bitmap,SkMatrix * matrix,TileMode * xy,SkScalar * twoPointRadialParams) const2114 virtual BitmapType asABitmap(SkBitmap* bitmap,
2115 SkMatrix* matrix,
2116 TileMode* xy,
2117 SkScalar* twoPointRadialParams) const SK_OVERRIDE {
2118 if (bitmap) {
2119 this->commonAsABitmap(bitmap);
2120 }
2121 if (matrix) {
2122 *matrix = fPtsToUnit;
2123 }
2124 if (xy) {
2125 xy[0] = fTileMode;
2126 xy[1] = kClamp_TileMode;
2127 }
2128 return kSweep_BitmapType;
2129 }
2130
asAGradient(GradientInfo * info) const2131 virtual GradientType asAGradient(GradientInfo* info) const SK_OVERRIDE {
2132 if (info) {
2133 commonAsAGradient(info);
2134 info->fPoint[0] = fCenter;
2135 }
2136 return kSweep_GradientType;
2137 }
2138
CreateProc(SkFlattenableReadBuffer & buffer)2139 static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
2140 return SkNEW_ARGS(Sweep_Gradient, (buffer));
2141 }
2142
flatten(SkFlattenableWriteBuffer & buffer)2143 virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE {
2144 this->INHERITED::flatten(buffer);
2145 buffer.writeScalar(fCenter.fX);
2146 buffer.writeScalar(fCenter.fY);
2147 }
2148
2149 protected:
Sweep_Gradient(SkFlattenableReadBuffer & buffer)2150 Sweep_Gradient(SkFlattenableReadBuffer& buffer)
2151 : Gradient_Shader(buffer),
2152 fCenter(unflatten_point(buffer)) {
2153 }
2154
getFactory()2155 virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
2156
2157 private:
2158 typedef Gradient_Shader INHERITED;
2159 const SkPoint fCenter;
2160 };
2161
2162 #ifdef COMPUTE_SWEEP_TABLE
2163 #define PI 3.14159265
2164 static bool gSweepTableReady;
2165 static uint8_t gSweepTable[65];
2166
2167 /* Our table stores precomputed values for atan: [0...1] -> [0..PI/4]
2168 We scale the results to [0..32]
2169 */
build_sweep_table()2170 static const uint8_t* build_sweep_table() {
2171 if (!gSweepTableReady) {
2172 const int N = 65;
2173 const double DENOM = N - 1;
2174
2175 for (int i = 0; i < N; i++)
2176 {
2177 double arg = i / DENOM;
2178 double v = atan(arg);
2179 int iv = (int)round(v * DENOM * 2 / PI);
2180 // printf("[%d] atan(%g) = %g %d\n", i, arg, v, iv);
2181 printf("%d, ", iv);
2182 gSweepTable[i] = iv;
2183 }
2184 gSweepTableReady = true;
2185 }
2186 return gSweepTable;
2187 }
2188 #else
2189 static const uint8_t gSweepTable[] = {
2190 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9,
2191 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 18, 18,
2192 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26,
2193 26, 27, 27, 27, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32,
2194 32
2195 };
build_sweep_table()2196 static const uint8_t* build_sweep_table() { return gSweepTable; }
2197 #endif
2198
2199 // divide numer/denom, with a bias of 6bits. Assumes numer <= denom
2200 // and denom != 0. Since our table is 6bits big (+1), this is a nice fit.
2201 // Same as (but faster than) SkFixedDiv(numer, denom) >> 10
2202
2203 //unsigned div_64(int numer, int denom);
div_64(int numer,int denom)2204 static unsigned div_64(int numer, int denom) {
2205 SkASSERT(numer <= denom);
2206 SkASSERT(numer > 0);
2207 SkASSERT(denom > 0);
2208
2209 int nbits = SkCLZ(numer);
2210 int dbits = SkCLZ(denom);
2211 int bits = 6 - nbits + dbits;
2212 SkASSERT(bits <= 6);
2213
2214 if (bits < 0) { // detect underflow
2215 return 0;
2216 }
2217
2218 denom <<= dbits - 1;
2219 numer <<= nbits - 1;
2220
2221 unsigned result = 0;
2222
2223 // do the first one
2224 if ((numer -= denom) >= 0) {
2225 result = 1;
2226 } else {
2227 numer += denom;
2228 }
2229
2230 // Now fall into our switch statement if there are more bits to compute
2231 if (bits > 0) {
2232 // make room for the rest of the answer bits
2233 result <<= bits;
2234 switch (bits) {
2235 case 6:
2236 if ((numer = (numer << 1) - denom) >= 0)
2237 result |= 32;
2238 else
2239 numer += denom;
2240 case 5:
2241 if ((numer = (numer << 1) - denom) >= 0)
2242 result |= 16;
2243 else
2244 numer += denom;
2245 case 4:
2246 if ((numer = (numer << 1) - denom) >= 0)
2247 result |= 8;
2248 else
2249 numer += denom;
2250 case 3:
2251 if ((numer = (numer << 1) - denom) >= 0)
2252 result |= 4;
2253 else
2254 numer += denom;
2255 case 2:
2256 if ((numer = (numer << 1) - denom) >= 0)
2257 result |= 2;
2258 else
2259 numer += denom;
2260 case 1:
2261 default: // not strictly need, but makes GCC make better ARM code
2262 if ((numer = (numer << 1) - denom) >= 0)
2263 result |= 1;
2264 else
2265 numer += denom;
2266 }
2267 }
2268 return result;
2269 }
2270
2271 // Given x,y in the first quadrant, return 0..63 for the angle [0..90]
atan_0_90(SkFixed y,SkFixed x)2272 static unsigned atan_0_90(SkFixed y, SkFixed x) {
2273 #ifdef SK_DEBUG
2274 {
2275 static bool gOnce;
2276 if (!gOnce) {
2277 gOnce = true;
2278 SkASSERT(div_64(55, 55) == 64);
2279 SkASSERT(div_64(128, 256) == 32);
2280 SkASSERT(div_64(2326528, 4685824) == 31);
2281 SkASSERT(div_64(753664, 5210112) == 9);
2282 SkASSERT(div_64(229376, 4882432) == 3);
2283 SkASSERT(div_64(2, 64) == 2);
2284 SkASSERT(div_64(1, 64) == 1);
2285 // test that we handle underflow correctly
2286 SkASSERT(div_64(12345, 0x54321234) == 0);
2287 }
2288 }
2289 #endif
2290
2291 SkASSERT(y > 0 && x > 0);
2292 const uint8_t* table = build_sweep_table();
2293
2294 unsigned result;
2295 bool swap = (x < y);
2296 if (swap) {
2297 // first part of the atan(v) = PI/2 - atan(1/v) identity
2298 // since our div_64 and table want v <= 1, where v = y/x
2299 SkTSwap<SkFixed>(x, y);
2300 }
2301
2302 result = div_64(y, x);
2303
2304 #ifdef SK_DEBUG
2305 {
2306 unsigned result2 = SkDivBits(y, x, 6);
2307 SkASSERT(result2 == result ||
2308 (result == 1 && result2 == 0));
2309 }
2310 #endif
2311
2312 SkASSERT(result < SK_ARRAY_COUNT(gSweepTable));
2313 result = table[result];
2314
2315 if (swap) {
2316 // complete the atan(v) = PI/2 - atan(1/v) identity
2317 result = 64 - result;
2318 // pin to 63
2319 result -= result >> 6;
2320 }
2321
2322 SkASSERT(result <= 63);
2323 return result;
2324 }
2325
2326 // returns angle in a circle [0..2PI) -> [0..255]
2327 #ifdef SK_SCALAR_IS_FLOAT
SkATan2_255(float y,float x)2328 static unsigned SkATan2_255(float y, float x) {
2329 // static const float g255Over2PI = 255 / (2 * SK_ScalarPI);
2330 static const float g255Over2PI = 40.584510488433314f;
2331
2332 float result = sk_float_atan2(y, x);
2333 if (result < 0) {
2334 result += 2 * SK_ScalarPI;
2335 }
2336 SkASSERT(result >= 0);
2337 // since our value is always >= 0, we can cast to int, which is faster than
2338 // calling floorf()
2339 int ir = (int)(result * g255Over2PI);
2340 SkASSERT(ir >= 0 && ir <= 255);
2341 return ir;
2342 }
2343 #else
SkATan2_255(SkFixed y,SkFixed x)2344 static unsigned SkATan2_255(SkFixed y, SkFixed x) {
2345 if (x == 0) {
2346 if (y == 0) {
2347 return 0;
2348 }
2349 return y < 0 ? 192 : 64;
2350 }
2351 if (y == 0) {
2352 return x < 0 ? 128 : 0;
2353 }
2354
2355 /* Find the right quadrant for x,y
2356 Since atan_0_90 only handles the first quadrant, we rotate x,y
2357 appropriately before calling it, and then add the right amount
2358 to account for the real quadrant.
2359 quadrant 0 : add 0 | x > 0 && y > 0
2360 quadrant 1 : add 64 (90 degrees) | x < 0 && y > 0
2361 quadrant 2 : add 128 (180 degrees) | x < 0 && y < 0
2362 quadrant 3 : add 192 (270 degrees) | x > 0 && y < 0
2363
2364 map x<0 to (1 << 6)
2365 map y<0 to (3 << 6)
2366 add = map_x ^ map_y
2367 */
2368 int xsign = x >> 31;
2369 int ysign = y >> 31;
2370 int add = ((-xsign) ^ (ysign & 3)) << 6;
2371
2372 #ifdef SK_DEBUG
2373 if (0 == add)
2374 SkASSERT(x > 0 && y > 0);
2375 else if (64 == add)
2376 SkASSERT(x < 0 && y > 0);
2377 else if (128 == add)
2378 SkASSERT(x < 0 && y < 0);
2379 else if (192 == add)
2380 SkASSERT(x > 0 && y < 0);
2381 else
2382 SkDEBUGFAIL("bad value for add");
2383 #endif
2384
2385 /* This ^ trick makes x, y positive, and the swap<> handles quadrants
2386 where we need to rotate x,y by 90 or -90
2387 */
2388 x = (x ^ xsign) - xsign;
2389 y = (y ^ ysign) - ysign;
2390 if (add & 64) { // quads 1 or 3 need to swap x,y
2391 SkTSwap<SkFixed>(x, y);
2392 }
2393
2394 unsigned result = add + atan_0_90(y, x);
2395 SkASSERT(result < 256);
2396 return result;
2397 }
2398 #endif
2399
shadeSpan(int x,int y,SkPMColor * SK_RESTRICT dstC,int count)2400 void Sweep_Gradient::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC,
2401 int count) {
2402 SkMatrix::MapXYProc proc = fDstToIndexProc;
2403 const SkMatrix& matrix = fDstToIndex;
2404 const SkPMColor* SK_RESTRICT cache = this->getCache32();
2405 SkPoint srcPt;
2406
2407 if (fDstToIndexClass != kPerspective_MatrixClass) {
2408 proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
2409 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
2410 SkScalar dx, fx = srcPt.fX;
2411 SkScalar dy, fy = srcPt.fY;
2412
2413 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
2414 SkFixed storage[2];
2415 (void)matrix.fixedStepInX(SkIntToScalar(y) + SK_ScalarHalf,
2416 &storage[0], &storage[1]);
2417 dx = SkFixedToScalar(storage[0]);
2418 dy = SkFixedToScalar(storage[1]);
2419 } else {
2420 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
2421 dx = matrix.getScaleX();
2422 dy = matrix.getSkewY();
2423 }
2424
2425 for (; count > 0; --count) {
2426 *dstC++ = cache[SkATan2_255(fy, fx)];
2427 fx += dx;
2428 fy += dy;
2429 }
2430 } else { // perspective case
2431 for (int stop = x + count; x < stop; x++) {
2432 proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
2433 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
2434 *dstC++ = cache[SkATan2_255(srcPt.fY, srcPt.fX)];
2435 }
2436 }
2437 }
2438
shadeSpan16(int x,int y,uint16_t * SK_RESTRICT dstC,int count)2439 void Sweep_Gradient::shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC,
2440 int count) {
2441 SkMatrix::MapXYProc proc = fDstToIndexProc;
2442 const SkMatrix& matrix = fDstToIndex;
2443 const uint16_t* SK_RESTRICT cache = this->getCache16();
2444 int toggle = ((x ^ y) & 1) * kDitherStride16;
2445 SkPoint srcPt;
2446
2447 if (fDstToIndexClass != kPerspective_MatrixClass) {
2448 proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
2449 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
2450 SkScalar dx, fx = srcPt.fX;
2451 SkScalar dy, fy = srcPt.fY;
2452
2453 if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
2454 SkFixed storage[2];
2455 (void)matrix.fixedStepInX(SkIntToScalar(y) + SK_ScalarHalf,
2456 &storage[0], &storage[1]);
2457 dx = SkFixedToScalar(storage[0]);
2458 dy = SkFixedToScalar(storage[1]);
2459 } else {
2460 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
2461 dx = matrix.getScaleX();
2462 dy = matrix.getSkewY();
2463 }
2464
2465 for (; count > 0; --count) {
2466 int index = SkATan2_255(fy, fx) >> (8 - kCache16Bits);
2467 *dstC++ = cache[toggle + index];
2468 toggle ^= kDitherStride16;
2469 fx += dx;
2470 fy += dy;
2471 }
2472 } else { // perspective case
2473 for (int stop = x + count; x < stop; x++) {
2474 proc(matrix, SkIntToScalar(x) + SK_ScalarHalf,
2475 SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
2476
2477 int index = SkATan2_255(srcPt.fY, srcPt.fX);
2478 index >>= (8 - kCache16Bits);
2479 *dstC++ = cache[toggle + index];
2480 toggle ^= kDitherStride16;
2481 }
2482 }
2483 }
2484
2485 ///////////////////////////////////////////////////////////////////////////////
2486 ///////////////////////////////////////////////////////////////////////////////
2487
2488 // assumes colors is SkColor* and pos is SkScalar*
2489 #define EXPAND_1_COLOR(count) \
2490 SkColor tmp[2]; \
2491 do { \
2492 if (1 == count) { \
2493 tmp[0] = tmp[1] = colors[0]; \
2494 colors = tmp; \
2495 pos = NULL; \
2496 count = 2; \
2497 } \
2498 } while (0)
2499
CreateLinear(const SkPoint pts[2],const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)2500 SkShader* SkGradientShader::CreateLinear(const SkPoint pts[2],
2501 const SkColor colors[],
2502 const SkScalar pos[], int colorCount,
2503 SkShader::TileMode mode,
2504 SkUnitMapper* mapper) {
2505 if (NULL == pts || NULL == colors || colorCount < 1) {
2506 return NULL;
2507 }
2508 EXPAND_1_COLOR(colorCount);
2509
2510 return SkNEW_ARGS(Linear_Gradient,
2511 (pts, colors, pos, colorCount, mode, mapper));
2512 }
2513
CreateRadial(const SkPoint & center,SkScalar radius,const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)2514 SkShader* SkGradientShader::CreateRadial(const SkPoint& center, SkScalar radius,
2515 const SkColor colors[],
2516 const SkScalar pos[], int colorCount,
2517 SkShader::TileMode mode,
2518 SkUnitMapper* mapper) {
2519 if (radius <= 0 || NULL == colors || colorCount < 1) {
2520 return NULL;
2521 }
2522 EXPAND_1_COLOR(colorCount);
2523
2524 return SkNEW_ARGS(Radial_Gradient,
2525 (center, radius, colors, pos, colorCount, mode, mapper));
2526 }
2527
CreateTwoPointRadial(const SkPoint & start,SkScalar startRadius,const SkPoint & end,SkScalar endRadius,const SkColor colors[],const SkScalar pos[],int colorCount,SkShader::TileMode mode,SkUnitMapper * mapper)2528 SkShader* SkGradientShader::CreateTwoPointRadial(const SkPoint& start,
2529 SkScalar startRadius,
2530 const SkPoint& end,
2531 SkScalar endRadius,
2532 const SkColor colors[],
2533 const SkScalar pos[],
2534 int colorCount,
2535 SkShader::TileMode mode,
2536 SkUnitMapper* mapper) {
2537 if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) {
2538 return NULL;
2539 }
2540 EXPAND_1_COLOR(colorCount);
2541
2542 return SkNEW_ARGS(Two_Point_Radial_Gradient,
2543 (start, startRadius, end, endRadius, colors, pos,
2544 colorCount, mode, mapper));
2545 }
2546
CreateSweep(SkScalar cx,SkScalar cy,const SkColor colors[],const SkScalar pos[],int count,SkUnitMapper * mapper)2547 SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy,
2548 const SkColor colors[],
2549 const SkScalar pos[],
2550 int count, SkUnitMapper* mapper) {
2551 if (NULL == colors || count < 1) {
2552 return NULL;
2553 }
2554 EXPAND_1_COLOR(count);
2555
2556 return SkNEW_ARGS(Sweep_Gradient, (cx, cy, colors, pos, count, mapper));
2557 }
2558
2559 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkGradientShader)
2560 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(Linear_Gradient)
2561 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(Radial_Gradient)
2562
2563 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(Sweep_Gradient)
2564
2565 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(Two_Point_Radial_Gradient)
2566 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
2567