1 /*
2 * Copyright 2008 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 // The copyright below was added in 2009, but I see no record of moto contributions...?
9
10 /* NEON optimized code (C) COPYRIGHT 2009 Motorola
11 *
12 * Use of this source code is governed by a BSD-style license that can be
13 * found in the LICENSE file.
14 */
15
16 #include "SkBitmapProcState.h"
17 #include "SkShader.h"
18 #include "SkTo.h"
19 #include "SkUtils.h"
20
21 /*
22 * The decal_ functions require that
23 * 1. dx > 0
24 * 2. [fx, fx+dx, fx+2dx, fx+3dx, ... fx+(count-1)dx] are all <= maxX
25 *
26 * In addition, we use SkFractionalInt to keep more fractional precision than
27 * just SkFixed, so we will abort the decal_ call if dx is very small, since
28 * the decal_ function just operates on SkFixed. If that were changed, we could
29 * skip the very_small test here.
30 */
can_truncate_to_fixed_for_decal(SkFixed fx,SkFixed dx,int count,unsigned max)31 static inline bool can_truncate_to_fixed_for_decal(SkFixed fx,
32 SkFixed dx,
33 int count, unsigned max) {
34 SkASSERT(count > 0);
35
36 // if decal_ kept SkFractionalInt precision, this would just be dx <= 0
37 // I just made up the 1/256. Just don't want to perceive accumulated error
38 // if we truncate frDx and lose its low bits.
39 if (dx <= SK_Fixed1 / 256) {
40 return false;
41 }
42
43 // Note: it seems the test should be (fx <= max && lastFx <= max); but
44 // historically it's been a strict inequality check, and changing produces
45 // unexpected diffs. Further investigation is needed.
46
47 // We cast to unsigned so we don't have to check for negative values, which
48 // will now appear as very large positive values, and thus fail our test!
49 if ((unsigned)SkFixedFloorToInt(fx) >= max) {
50 return false;
51 }
52
53 // Promote to 64bit (48.16) to avoid overflow.
54 const uint64_t lastFx = fx + sk_64_mul(dx, count - 1);
55
56 return SkTFitsIn<int32_t>(lastFx) && (unsigned)SkFixedFloorToInt(SkTo<int32_t>(lastFx)) < max;
57 }
58
59
60 // When not filtering, we store 32-bit y, 16-bit x, 16-bit x, 16-bit x, ...
61 // When filtering we write out 32-bit encodings, pairing 14.4 x0 with 14-bit x1.
62
63 // The clamp routines may try to fall into one of these unclamped decal fast-paths.
64 // (Only clamp works in the right coordinate space to check for decal.)
decal_nofilter_scale(uint32_t dst[],SkFixed fx,SkFixed dx,int count)65 static void decal_nofilter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
66 for (; count >= 2; count -= 2) {
67 *dst++ = pack_two_shorts( (fx + 0) >> 16,
68 (fx + dx) >> 16);
69 fx += dx+dx;
70 }
71
72 auto xx = (uint16_t*)dst;
73 while (count --> 0) {
74 *xx++ = SkToU16(fx >> 16);
75 fx += dx;
76 }
77 }
78
79 // A generic implementation for unfiltered scale+translate, templated on tiling method.
80 template <unsigned (*tile)(SkFixed, int), bool tryDecal>
nofilter_scale(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)81 static void nofilter_scale(const SkBitmapProcState& s,
82 uint32_t xy[], int count, int x, int y) {
83 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
84 SkMatrix::kScale_Mask)) == 0);
85
86 // Write out our 32-bit y, and get our intial fx.
87 SkFractionalInt fx;
88 {
89 const SkBitmapProcStateAutoMapper mapper(s, x, y);
90 *xy++ = tile(mapper.fixedY(), s.fPixmap.height() - 1);
91 fx = mapper.fractionalIntX();
92 }
93
94 const unsigned maxX = s.fPixmap.width() - 1;
95 if (0 == maxX) {
96 // If width == 1, all the x-values must refer to that pixel, and must be zero.
97 memset(xy, 0, count * sizeof(uint16_t));
98 return;
99 }
100
101 const SkFractionalInt dx = s.fInvSxFractionalInt;
102
103 if (tryDecal) {
104 const SkFixed fixedFx = SkFractionalIntToFixed(fx);
105 const SkFixed fixedDx = SkFractionalIntToFixed(dx);
106
107 if (can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
108 decal_nofilter_scale(xy, fixedFx, fixedDx, count);
109 return;
110 }
111 }
112
113 // Remember, each x-coordinate is 16-bit.
114 for (; count >= 2; count -= 2) {
115 *xy++ = pack_two_shorts(tile(SkFractionalIntToFixed(fx ), maxX),
116 tile(SkFractionalIntToFixed(fx + dx), maxX));
117 fx += dx+dx;
118 }
119
120 auto xx = (uint16_t*)xy;
121 while (count --> 0) {
122 *xx++ = tile(SkFractionalIntToFixed(fx), maxX);
123 fx += dx;
124 }
125 }
126
127 // Extract the high four fractional bits from fx, the lerp parameter when filtering.
extract_low_bits_clamp(SkFixed fx,int)128 static unsigned extract_low_bits_clamp(SkFixed fx, int /*max*/) {
129 // If we're already scaled up to by max like clamp/decal,
130 // just grab the high four fractional bits.
131 return (fx >> 12) & 0xf;
132 }
extract_low_bits_repeat_mirror(SkFixed fx,int max)133 static unsigned extract_low_bits_repeat_mirror(SkFixed fx, int max) {
134 // In repeat or mirror fx is in [0,1], so scale up by max first.
135 // TODO: remove the +1 here and the -1 at the call sites...
136 return extract_low_bits_clamp((fx & 0xffff) * (max+1), max);
137 }
138
139 template <unsigned (*tile)(SkFixed, int), unsigned (*extract_low_bits)(SkFixed, int), bool tryDecal>
filter_scale(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)140 static void filter_scale(const SkBitmapProcState& s,
141 uint32_t xy[], int count, int x, int y) {
142 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
143 SkMatrix::kScale_Mask)) == 0);
144 SkASSERT(s.fInvKy == 0);
145
146 auto pack = [](SkFixed f, unsigned max, SkFixed one) {
147 unsigned i = tile(f, max);
148 i = (i << 4) | extract_low_bits(f, max);
149 return (i << 14) | (tile((f + one), max));
150 };
151
152 const unsigned maxX = s.fPixmap.width() - 1;
153 const SkFractionalInt dx = s.fInvSxFractionalInt;
154 SkFractionalInt fx;
155 {
156 const SkBitmapProcStateAutoMapper mapper(s, x, y);
157 const SkFixed fy = mapper.fixedY();
158 const unsigned maxY = s.fPixmap.height() - 1;
159 // compute our two Y values up front
160 *xy++ = pack(fy, maxY, s.fFilterOneY);
161 // now initialize fx
162 fx = mapper.fractionalIntX();
163 }
164
165 // For historical reasons we check both ends are < maxX rather than <= maxX.
166 // TODO: try changing this? See also can_truncate_to_fixed_for_decal().
167 if (tryDecal &&
168 (unsigned)SkFractionalIntToInt(fx ) < maxX &&
169 (unsigned)SkFractionalIntToInt(fx + dx*(count-1)) < maxX) {
170 while (count --> 0) {
171 SkFixed fixedFx = SkFractionalIntToFixed(fx);
172 SkASSERT((fixedFx >> (16 + 14)) == 0);
173 *xy++ = (fixedFx >> 12 << 14) | ((fixedFx >> 16) + 1);
174 fx += dx;
175 }
176 return;
177 }
178
179 while (count --> 0) {
180 SkFixed fixedFx = SkFractionalIntToFixed(fx);
181 *xy++ = pack(fixedFx, maxX, s.fFilterOneX);
182 fx += dx;
183 }
184 }
185
186 // Helper to ensure that when we shift down, we do it w/o sign-extension
187 // so the caller doesn't have to manually mask off the top 16 bits.
SK_USHIFT16(unsigned x)188 static inline unsigned SK_USHIFT16(unsigned x) {
189 return x >> 16;
190 }
191
clamp(SkFixed fx,int max)192 static unsigned clamp(SkFixed fx, int max) {
193 return SkClampMax(fx >> 16, max);
194 }
repeat(SkFixed fx,int max)195 static unsigned repeat(SkFixed fx, int max) {
196 SkASSERT(max < 65535);
197 return SK_USHIFT16((unsigned)(fx & 0xFFFF) * (max + 1));
198 }
mirror(SkFixed fx,int max)199 static unsigned mirror(SkFixed fx, int max) {
200 SkASSERT(max < 65535);
201 // s is 0xFFFFFFFF if we're on an odd interval, or 0 if an even interval
202 SkFixed s = SkLeftShift(fx, 15) >> 31;
203
204 // This should be exactly the same as repeat(fx ^ s, max) from here on.
205 return SK_USHIFT16( ((fx ^ s) & 0xFFFF) * (max + 1) );
206 }
207
208 // Mirror/Mirror's always just portable code.
209 static const SkBitmapProcState::MatrixProc MirrorX_MirrorY_Procs[] = {
210 nofilter_scale<mirror, false>,
211 filter_scale<mirror, extract_low_bits_repeat_mirror, false>,
212 };
213
214 // Clamp/Clamp and Repeat/Repeat have NEON or portable implementations.
215 #if defined(SK_ARM_HAS_NEON)
216 #include <arm_neon.h>
217
218 // TODO: this is a fine drop-in for decal_nofilter_scale() generally.
decal_nofilter_scale_neon(uint32_t dst[],SkFixed fx,SkFixed dx,int count)219 static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
220 if (count >= 8) {
221 // SkFixed is 16.16 fixed point
222 SkFixed dx8 = dx * 8;
223 int32x4_t vdx8 = vdupq_n_s32(dx8);
224
225 // setup lbase and hbase
226 int32x4_t lbase, hbase;
227 lbase = vdupq_n_s32(fx);
228 lbase = vsetq_lane_s32(fx + dx, lbase, 1);
229 lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2);
230 lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3);
231 hbase = lbase + vdupq_n_s32(4 * dx);
232
233 do {
234 // store the upper 16 bits
235 vst1q_u32(dst, vreinterpretq_u32_s16(
236 vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1]
237 ));
238
239 // on to the next group of 8
240 lbase += vdx8;
241 hbase += vdx8;
242 dst += 4; // we did 8 elements but the result is twice smaller
243 count -= 8;
244 fx += dx8;
245 } while (count >= 8);
246 }
247
248 uint16_t* xx = (uint16_t*)dst;
249 for (int i = count; i > 0; --i) {
250 *xx++ = SkToU16(fx >> 16); fx += dx;
251 }
252 }
253
decal_filter_scale_neon(uint32_t dst[],SkFixed fx,SkFixed dx,int count)254 static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
255 if (count >= 8) {
256 SkFixed dx8 = dx * 8;
257 int32x4_t vdx8 = vdupq_n_s32(dx8);
258
259 int32x4_t wide_fx, wide_fx2;
260 wide_fx = vdupq_n_s32(fx);
261 wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1);
262 wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2);
263 wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3);
264
265 wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
266
267 while (count >= 8) {
268 int32x4_t wide_out;
269 int32x4_t wide_out2;
270
271 wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14);
272 wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1));
273
274 wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14);
275 wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1));
276
277 vst1q_u32(dst, vreinterpretq_u32_s32(wide_out));
278 vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2));
279
280 dst += 8;
281 fx += dx8;
282 wide_fx += vdx8;
283 wide_fx2 += vdx8;
284 count -= 8;
285 }
286 }
287
288 if (count & 1)
289 {
290 SkASSERT((fx >> (16 + 14)) == 0);
291 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
292 fx += dx;
293 }
294 while ((count -= 2) >= 0)
295 {
296 SkASSERT((fx >> (16 + 14)) == 0);
297 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
298 fx += dx;
299
300 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
301 fx += dx;
302 }
303 }
304
clamp8(int32x4_t low,int32x4_t high,unsigned max)305 static inline int16x8_t clamp8(int32x4_t low, int32x4_t high, unsigned max) {
306 int16x8_t res;
307
308 // get the hi 16s of all those 32s
309 res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1];
310
311 // clamp
312 res = vmaxq_s16(res, vdupq_n_s16(0));
313 res = vminq_s16(res, vdupq_n_s16(max));
314
315 return res;
316 }
317
clamp4(int32x4_t f,unsigned max)318 static inline int32x4_t clamp4(int32x4_t f, unsigned max) {
319 int32x4_t res;
320
321 // get the hi 16s of all those 32s
322 res = vshrq_n_s32(f, 16);
323
324 // clamp
325 res = vmaxq_s32(res, vdupq_n_s32(0));
326 res = vminq_s32(res, vdupq_n_s32(max));
327
328 return res;
329 }
330
extract_low_bits_clamp4(int32x4_t fx,unsigned)331 static inline int32x4_t extract_low_bits_clamp4(int32x4_t fx, unsigned) {
332 int32x4_t ret;
333
334 ret = vshrq_n_s32(fx, 12);
335
336 /* We don't need the mask below because the caller will
337 * overwrite the non-masked bits
338 */
339 //ret = vandq_s32(ret, vdupq_n_s32(0xF));
340
341 return ret;
342 }
343
repeat8(int32x4_t low,int32x4_t high,unsigned max)344 static inline int16x8_t repeat8(int32x4_t low, int32x4_t high, unsigned max) {
345 uint16x8_t res;
346 uint32x4_t tmpl, tmph;
347
348 // get the lower 16 bits
349 res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0];
350
351 // bare multiplication, not SkFixedMul
352 tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1));
353 tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1));
354
355 // extraction of the 16 upper bits
356 res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1];
357
358 return vreinterpretq_s16_u16(res);
359 }
360
repeat4(int32x4_t f,unsigned max)361 static inline int32x4_t repeat4(int32x4_t f, unsigned max) {
362 uint16x4_t res;
363 uint32x4_t tmp;
364
365 // get the lower 16 bits
366 res = vmovn_u32(vreinterpretq_u32_s32(f));
367
368 // bare multiplication, not SkFixedMul
369 tmp = vmull_u16(res, vdup_n_u16(max+1));
370
371 // extraction of the 16 upper bits
372 tmp = vshrq_n_u32(tmp, 16);
373
374 return vreinterpretq_s32_u32(tmp);
375 }
376
extract_low_bits_repeat_mirror4(int32x4_t fx,unsigned max)377 static inline int32x4_t extract_low_bits_repeat_mirror4(int32x4_t fx, unsigned max) {
378 uint16x4_t res;
379 uint32x4_t tmp;
380 int32x4_t ret;
381
382 // get the lower 16 bits
383 res = vmovn_u32(vreinterpretq_u32_s32(fx));
384
385 // bare multiplication, not SkFixedMul
386 tmp = vmull_u16(res, vdup_n_u16(max + 1));
387
388 // shift and mask
389 ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12);
390
391 /* We don't need the mask below because the caller will
392 * overwrite the non-masked bits
393 */
394 //ret = vandq_s32(ret, vdupq_n_s32(0xF));
395
396 return ret;
397 }
398
399 template <unsigned (*tile)(SkFixed, int),
400 int16x8_t (*tile8)(int32x4_t, int32x4_t, unsigned),
401 bool tryDecal>
nofilter_scale_neon(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)402 static void nofilter_scale_neon(const SkBitmapProcState& s,
403 uint32_t xy[], int count, int x, int y) {
404 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
405 SkMatrix::kScale_Mask)) == 0);
406
407 // we store y, x, x, x, x, x
408 const unsigned maxX = s.fPixmap.width() - 1;
409 SkFractionalInt fx;
410 {
411 const SkBitmapProcStateAutoMapper mapper(s, x, y);
412 const unsigned maxY = s.fPixmap.height() - 1;
413 *xy++ = tile(mapper.fixedY(), maxY);
414 fx = mapper.fractionalIntX();
415 }
416
417 if (0 == maxX) {
418 // all of the following X values must be 0
419 memset(xy, 0, count * sizeof(uint16_t));
420 return;
421 }
422
423 const SkFractionalInt dx = s.fInvSxFractionalInt;
424
425 // test if we don't need to apply the tile proc
426 const SkFixed fixedFx = SkFractionalIntToFixed(fx);
427 const SkFixed fixedDx = SkFractionalIntToFixed(dx);
428 if (tryDecal && can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
429 decal_nofilter_scale_neon(xy, fixedFx, fixedDx, count);
430 return;
431 }
432
433 if (count >= 8) {
434 SkFractionalInt dx2 = dx+dx;
435 SkFractionalInt dx4 = dx2+dx2;
436 SkFractionalInt dx8 = dx4+dx4;
437
438 // now build fx/fx+dx/fx+2dx/fx+3dx
439 SkFractionalInt fx1, fx2, fx3;
440 int32x4_t lbase, hbase;
441 int16_t *dst16 = (int16_t *)xy;
442
443 fx1 = fx+dx;
444 fx2 = fx1+dx;
445 fx3 = fx2+dx;
446
447 lbase = vdupq_n_s32(SkFractionalIntToFixed(fx));
448 lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx1), lbase, 1);
449 lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx2), lbase, 2);
450 lbase = vsetq_lane_s32(SkFractionalIntToFixed(fx3), lbase, 3);
451 hbase = vaddq_s32(lbase, vdupq_n_s32(SkFractionalIntToFixed(dx4)));
452
453 // store & bump
454 while (count >= 8) {
455
456 int16x8_t fx8;
457
458 fx8 = tile8(lbase, hbase, maxX);
459
460 vst1q_s16(dst16, fx8);
461
462 // but preserving base & on to the next
463 lbase = vaddq_s32 (lbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
464 hbase = vaddq_s32 (hbase, vdupq_n_s32(SkFractionalIntToFixed(dx8)));
465 dst16 += 8;
466 count -= 8;
467 fx += dx8;
468 };
469 xy = (uint32_t *) dst16;
470 }
471
472 uint16_t* xx = (uint16_t*)xy;
473 for (int i = count; i > 0; --i) {
474 *xx++ = tile(SkFractionalIntToFixed(fx), maxX);
475 fx += dx;
476 }
477 }
478
479 template <unsigned (*tile )(SkFixed, int),
480 int32x4_t (*tile4)(int32x4_t, unsigned),
481 unsigned (*extract_low_bits )(SkFixed, int),
482 int32x4_t (*extract_low_bits4)(int32x4_t, unsigned),
483 bool tryDecal>
filter_scale_neon(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)484 static void filter_scale_neon(const SkBitmapProcState& s,
485 uint32_t xy[], int count, int x, int y) {
486 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask |
487 SkMatrix::kScale_Mask)) == 0);
488 SkASSERT(s.fInvKy == 0);
489
490 auto pack = [&](SkFixed f, unsigned max, SkFixed one) {
491 unsigned i = tile(f, max);
492 i = (i << 4) | extract_low_bits(f, max);
493 return (i << 14) | (tile((f + one), max));
494 };
495
496 auto pack4 = [&](int32x4_t f, unsigned max, SkFixed one) {
497 int32x4_t ret, res;
498
499 res = tile4(f, max);
500
501 ret = extract_low_bits4(f, max);
502 ret = vsliq_n_s32(ret, res, 4);
503
504 res = tile4(f + vdupq_n_s32(one), max);
505 ret = vorrq_s32(vshlq_n_s32(ret, 14), res);
506
507 return ret;
508 };
509
510 const unsigned maxX = s.fPixmap.width() - 1;
511 const SkFixed one = s.fFilterOneX;
512 const SkFractionalInt dx = s.fInvSxFractionalInt;
513 SkFractionalInt fx;
514
515 {
516 const SkBitmapProcStateAutoMapper mapper(s, x, y);
517 const SkFixed fy = mapper.fixedY();
518 const unsigned maxY = s.fPixmap.height() - 1;
519 // compute our two Y values up front
520 *xy++ = pack(fy, maxY, s.fFilterOneY);
521 // now initialize fx
522 fx = mapper.fractionalIntX();
523 }
524
525 // test if we don't need to apply the tile proc
526 const SkFixed fixedFx = SkFractionalIntToFixed(fx);
527 const SkFixed fixedDx = SkFractionalIntToFixed(dx);
528 if (tryDecal && can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
529 decal_filter_scale_neon(xy, fixedFx, fixedDx, count);
530 return;
531 }
532
533 if (count >= 4) {
534 int32x4_t wide_fx;
535
536 wide_fx = vdupq_n_s32(SkFractionalIntToFixed(fx));
537 wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx), wide_fx, 1);
538 wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx), wide_fx, 2);
539 wide_fx = vsetq_lane_s32(SkFractionalIntToFixed(fx+dx+dx+dx), wide_fx, 3);
540
541 while (count >= 4) {
542 int32x4_t res;
543
544 res = pack4(wide_fx, maxX, one);
545
546 vst1q_u32(xy, vreinterpretq_u32_s32(res));
547
548 wide_fx += vdupq_n_s32(SkFractionalIntToFixed(dx+dx+dx+dx));
549 fx += dx+dx+dx+dx;
550 xy += 4;
551 count -= 4;
552 }
553 }
554
555 while (--count >= 0) {
556 *xy++ = pack(SkFractionalIntToFixed(fx), maxX, one);
557 fx += dx;
558 }
559 }
560
561 static const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
562 nofilter_scale_neon<clamp, clamp8, true>,
563 filter_scale_neon<clamp,
564 clamp4,
565 extract_low_bits_clamp,
566 extract_low_bits_clamp4,
567 true>,
568 };
569
570 static const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
571 nofilter_scale_neon<repeat, repeat8, false>,
572 filter_scale_neon<repeat,
573 repeat4,
574 extract_low_bits_repeat_mirror,
575 extract_low_bits_repeat_mirror4,
576 false>,
577 };
578
579 #else
580 static const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
581 nofilter_scale<clamp, true>,
582 filter_scale<clamp, extract_low_bits_clamp, true>,
583 };
584
585 static const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
586 nofilter_scale<repeat, false>,
587 filter_scale<repeat, extract_low_bits_repeat_mirror, false>,
588 };
589 #endif
590
591
592 ///////////////////////////////////////////////////////////////////////////////
593 // This next chunk has some specializations for unfiltered translate-only matrices.
594
int_clamp(int x,int n)595 static inline U16CPU int_clamp(int x, int n) {
596 if (x < 0) { x = 0; }
597 if (x >= n) { x = n - 1; }
598 return x;
599 }
600
601 /* returns 0...(n-1) given any x (positive or negative).
602
603 As an example, if n (which is always positive) is 5...
604
605 x: -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8
606 returns: 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3
607 */
sk_int_mod(int x,int n)608 static inline int sk_int_mod(int x, int n) {
609 SkASSERT(n > 0);
610 if ((unsigned)x >= (unsigned)n) {
611 if (x < 0) {
612 x = n + ~(~x % n);
613 } else {
614 x = x % n;
615 }
616 }
617 return x;
618 }
619
int_repeat(int x,int n)620 static inline U16CPU int_repeat(int x, int n) {
621 return sk_int_mod(x, n);
622 }
623
int_mirror(int x,int n)624 static inline U16CPU int_mirror(int x, int n) {
625 x = sk_int_mod(x, 2 * n);
626 if (x >= n) {
627 x = n + ~(x - n);
628 }
629 return x;
630 }
631
fill_sequential(uint16_t xptr[],int pos,int count)632 static void fill_sequential(uint16_t xptr[], int pos, int count) {
633 while (count --> 0) {
634 *xptr++ = pos++;
635 }
636 }
637
fill_backwards(uint16_t xptr[],int pos,int count)638 static void fill_backwards(uint16_t xptr[], int pos, int count) {
639 while (count --> 0) {
640 SkASSERT(pos >= 0);
641 *xptr++ = pos--;
642 }
643 }
644
clampx_nofilter_trans(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)645 static void clampx_nofilter_trans(const SkBitmapProcState& s,
646 uint32_t xy[], int count, int x, int y) {
647 SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
648
649 const SkBitmapProcStateAutoMapper mapper(s, x, y);
650 *xy++ = int_clamp(mapper.intY(), s.fPixmap.height());
651 int xpos = mapper.intX();
652
653 const int width = s.fPixmap.width();
654 if (1 == width) {
655 // all of the following X values must be 0
656 memset(xy, 0, count * sizeof(uint16_t));
657 return;
658 }
659
660 uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
661 int n;
662
663 // fill before 0 as needed
664 if (xpos < 0) {
665 n = -xpos;
666 if (n > count) {
667 n = count;
668 }
669 memset(xptr, 0, n * sizeof(uint16_t));
670 count -= n;
671 if (0 == count) {
672 return;
673 }
674 xptr += n;
675 xpos = 0;
676 }
677
678 // fill in 0..width-1 if needed
679 if (xpos < width) {
680 n = width - xpos;
681 if (n > count) {
682 n = count;
683 }
684 fill_sequential(xptr, xpos, n);
685 count -= n;
686 if (0 == count) {
687 return;
688 }
689 xptr += n;
690 }
691
692 // fill the remaining with the max value
693 sk_memset16(xptr, width - 1, count);
694 }
695
repeatx_nofilter_trans(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)696 static void repeatx_nofilter_trans(const SkBitmapProcState& s,
697 uint32_t xy[], int count, int x, int y) {
698 SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
699
700 const SkBitmapProcStateAutoMapper mapper(s, x, y);
701 *xy++ = int_repeat(mapper.intY(), s.fPixmap.height());
702 int xpos = mapper.intX();
703
704 const int width = s.fPixmap.width();
705 if (1 == width) {
706 // all of the following X values must be 0
707 memset(xy, 0, count * sizeof(uint16_t));
708 return;
709 }
710
711 uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
712 int start = sk_int_mod(xpos, width);
713 int n = width - start;
714 if (n > count) {
715 n = count;
716 }
717 fill_sequential(xptr, start, n);
718 xptr += n;
719 count -= n;
720
721 while (count >= width) {
722 fill_sequential(xptr, 0, width);
723 xptr += width;
724 count -= width;
725 }
726
727 if (count > 0) {
728 fill_sequential(xptr, 0, count);
729 }
730 }
731
mirrorx_nofilter_trans(const SkBitmapProcState & s,uint32_t xy[],int count,int x,int y)732 static void mirrorx_nofilter_trans(const SkBitmapProcState& s,
733 uint32_t xy[], int count, int x, int y) {
734 SkASSERT((s.fInvType & ~SkMatrix::kTranslate_Mask) == 0);
735
736 const SkBitmapProcStateAutoMapper mapper(s, x, y);
737 *xy++ = int_mirror(mapper.intY(), s.fPixmap.height());
738 int xpos = mapper.intX();
739
740 const int width = s.fPixmap.width();
741 if (1 == width) {
742 // all of the following X values must be 0
743 memset(xy, 0, count * sizeof(uint16_t));
744 return;
745 }
746
747 uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
748 // need to know our start, and our initial phase (forward or backward)
749 bool forward;
750 int n;
751 int start = sk_int_mod(xpos, 2 * width);
752 if (start >= width) {
753 start = width + ~(start - width);
754 forward = false;
755 n = start + 1; // [start .. 0]
756 } else {
757 forward = true;
758 n = width - start; // [start .. width)
759 }
760 if (n > count) {
761 n = count;
762 }
763 if (forward) {
764 fill_sequential(xptr, start, n);
765 } else {
766 fill_backwards(xptr, start, n);
767 }
768 forward = !forward;
769 xptr += n;
770 count -= n;
771
772 while (count >= width) {
773 if (forward) {
774 fill_sequential(xptr, 0, width);
775 } else {
776 fill_backwards(xptr, width - 1, width);
777 }
778 forward = !forward;
779 xptr += width;
780 count -= width;
781 }
782
783 if (count > 0) {
784 if (forward) {
785 fill_sequential(xptr, 0, count);
786 } else {
787 fill_backwards(xptr, width - 1, count);
788 }
789 }
790 }
791
792 ///////////////////////////////////////////////////////////////////////////////
793 // The main entry point to the file, choosing between everything above.
794
chooseMatrixProc(bool translate_only_matrix)795 SkBitmapProcState::MatrixProc SkBitmapProcState::chooseMatrixProc(bool translate_only_matrix) {
796 SkASSERT(fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask));
797 SkASSERT(fTileModeX == fTileModeY);
798 SkASSERT(fTileModeX != SkShader::kDecal_TileMode);
799
800 // Check for our special case translate methods when there is no scale/affine/perspective.
801 if (translate_only_matrix && kNone_SkFilterQuality == fFilterQuality) {
802 switch (fTileModeX) {
803 default: SkASSERT(false);
804 case SkShader::kClamp_TileMode: return clampx_nofilter_trans;
805 case SkShader::kRepeat_TileMode: return repeatx_nofilter_trans;
806 case SkShader::kMirror_TileMode: return mirrorx_nofilter_trans;
807 }
808 }
809
810 // The arrays are all [ nofilter, filter ].
811 int index = fFilterQuality > kNone_SkFilterQuality ? 1 : 0;
812
813 if (fTileModeX == SkShader::kClamp_TileMode) {
814 // clamp gets special version of filterOne, working in non-normalized space (allowing decal)
815 fFilterOneX = SK_Fixed1;
816 fFilterOneY = SK_Fixed1;
817 return ClampX_ClampY_Procs[index];
818 }
819
820 // all remaining procs use this form for filterOne, putting them into normalized space.
821 fFilterOneX = SK_Fixed1 / fPixmap.width();
822 fFilterOneY = SK_Fixed1 / fPixmap.height();
823
824 if (fTileModeX == SkShader::kRepeat_TileMode) {
825 return RepeatX_RepeatY_Procs[index];
826 }
827
828 return MirrorX_MirrorY_Procs[index];
829 }
830