• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "SkBlitRow.h"
9 #include "SkColorPriv.h"
10 #include "SkDither.h"
11 #include "SkMathPriv.h"
12 #include "SkUtils.h"
13 #include "SkUtilsArm.h"
14 
15 // Define USE_NEON_CODE to indicate that we need to build NEON routines
16 #define USE_NEON_CODE  (!SK_ARM_NEON_IS_NONE)
17 
18 // Define USE_ARM_CODE to indicate that we need to build ARM routines
19 #define USE_ARM_CODE   (!SK_ARM_NEON_IS_ALWAYS)
20 
21 #if USE_NEON_CODE
22   #include "SkBlitRow_opts_arm_neon.h"
23 #endif
24 
25 #if USE_ARM_CODE
26 
S32A_D565_Opaque(uint16_t * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha,int,int)27 static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst,
28                              const SkPMColor* SK_RESTRICT src, int count,
29                              U8CPU alpha, int /*x*/, int /*y*/) {
30     SkASSERT(255 == alpha);
31 
32     asm volatile (
33                   "1:                                   \n\t"
34                   "ldr     r3, [%[src]], #4             \n\t"
35                   "cmp     r3, #0xff000000              \n\t"
36                   "blo     2f                           \n\t"
37                   "and     r4, r3, #0x0000f8            \n\t"
38                   "and     r5, r3, #0x00fc00            \n\t"
39                   "and     r6, r3, #0xf80000            \n\t"
40                   "pld     [r1, #32]                    \n\t"
41                   "lsl     r3, r4, #8                   \n\t"
42                   "orr     r3, r3, r5, lsr #5           \n\t"
43                   "orr     r3, r3, r6, lsr #19          \n\t"
44                   "subs    %[count], %[count], #1       \n\t"
45                   "strh    r3, [%[dst]], #2             \n\t"
46                   "bne     1b                           \n\t"
47                   "b       4f                           \n\t"
48                   "2:                                   \n\t"
49                   "lsrs    r7, r3, #24                  \n\t"
50                   "beq     3f                           \n\t"
51                   "ldrh    r4, [%[dst]]                 \n\t"
52                   "rsb     r7, r7, #255                 \n\t"
53                   "and     r6, r4, #0x001f              \n\t"
54 #if SK_ARM_ARCH == 6
55                   "lsl     r5, r4, #21                  \n\t"
56                   "lsr     r5, r5, #26                  \n\t"
57 #else
58                   "ubfx    r5, r4, #5, #6               \n\t"
59 #endif
60                   "pld     [r0, #16]                    \n\t"
61                   "lsr     r4, r4, #11                  \n\t"
62 #ifdef SK_ARM_HAS_EDSP
63                   "smulbb  r6, r6, r7                   \n\t"
64                   "smulbb  r5, r5, r7                   \n\t"
65                   "smulbb  r4, r4, r7                   \n\t"
66 #else
67                   "mul     r6, r6, r7                   \n\t"
68                   "mul     r5, r5, r7                   \n\t"
69                   "mul     r4, r4, r7                   \n\t"
70 #endif
71                   "uxtb    r7, r3, ROR #16              \n\t"
72                   "uxtb    ip, r3, ROR #8               \n\t"
73                   "and     r3, r3, #0xff                \n\t"
74                   "add     r6, r6, #16                  \n\t"
75                   "add     r5, r5, #32                  \n\t"
76                   "add     r4, r4, #16                  \n\t"
77                   "add     r6, r6, r6, lsr #5           \n\t"
78                   "add     r5, r5, r5, lsr #6           \n\t"
79                   "add     r4, r4, r4, lsr #5           \n\t"
80                   "add     r6, r7, r6, lsr #5           \n\t"
81                   "add     r5, ip, r5, lsr #6           \n\t"
82                   "add     r4, r3, r4, lsr #5           \n\t"
83                   "lsr     r6, r6, #3                   \n\t"
84                   "and     r5, r5, #0xfc                \n\t"
85                   "and     r4, r4, #0xf8                \n\t"
86                   "orr     r6, r6, r5, lsl #3           \n\t"
87                   "orr     r4, r6, r4, lsl #8           \n\t"
88                   "strh    r4, [%[dst]], #2             \n\t"
89                   "pld     [r1, #32]                    \n\t"
90                   "subs    %[count], %[count], #1       \n\t"
91                   "bne     1b                           \n\t"
92                   "b       4f                           \n\t"
93                   "3:                                   \n\t"
94                   "subs    %[count], %[count], #1       \n\t"
95                   "add     %[dst], %[dst], #2           \n\t"
96                   "bne     1b                           \n\t"
97                   "4:                                   \n\t"
98                   : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
99                   :
100                   : "memory", "cc", "r3", "r4", "r5", "r6", "r7", "ip"
101                   );
102 }
103 
S32A_Opaque_BlitRow32_arm(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)104 static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
105                                   const SkPMColor* SK_RESTRICT src,
106                                   int count, U8CPU alpha) {
107 
108     SkASSERT(255 == alpha);
109 
110     asm volatile (
111                   "cmp    %[count], #0               \n\t" /* comparing count with 0 */
112                   "beq    3f                         \n\t" /* if zero exit */
113 
114                   "mov    ip, #0xff                  \n\t" /* load the 0xff mask in ip */
115                   "orr    ip, ip, ip, lsl #16        \n\t" /* convert it to 0xff00ff in ip */
116 
117                   "cmp    %[count], #2               \n\t" /* compare count with 2 */
118                   "blt    2f                         \n\t" /* if less than 2 -> single loop */
119 
120                   /* Double Loop */
121                   "1:                                \n\t" /* <double loop> */
122                   "ldm    %[src]!, {r5,r6}           \n\t" /* load the src(s) at r5-r6 */
123                   "ldm    %[dst], {r7,r8}            \n\t" /* loading dst(s) into r7-r8 */
124                   "lsr    r4, r5, #24                \n\t" /* extracting the alpha from source and storing it to r4 */
125 
126                   /* ----------- */
127                   "and    r9, ip, r7                 \n\t" /* r9 = br masked by ip */
128                   "rsb    r4, r4, #256               \n\t" /* subtracting the alpha from 256 -> r4=scale */
129                   "and    r10, ip, r7, lsr #8        \n\t" /* r10 = ag masked by ip */
130 
131                   "mul    r9, r9, r4                 \n\t" /* br = br * scale */
132                   "mul    r10, r10, r4               \n\t" /* ag = ag * scale */
133                   "and    r9, ip, r9, lsr #8         \n\t" /* lsr br by 8 and mask it */
134 
135                   "and    r10, r10, ip, lsl #8       \n\t" /* mask ag with reverse mask */
136                   "lsr    r4, r6, #24                \n\t" /* extracting the alpha from source and storing it to r4 */
137                   "orr    r7, r9, r10                \n\t" /* br | ag*/
138 
139                   "add    r7, r5, r7                 \n\t" /* dst = src + calc dest(r7) */
140                   "rsb    r4, r4, #256               \n\t" /* subtracting the alpha from 255 -> r4=scale */
141 
142                   /* ----------- */
143                   "and    r9, ip, r8                 \n\t" /* r9 = br masked by ip */
144 
145                   "and    r10, ip, r8, lsr #8        \n\t" /* r10 = ag masked by ip */
146                   "mul    r9, r9, r4                 \n\t" /* br = br * scale */
147                   "sub    %[count], %[count], #2     \n\t"
148                   "mul    r10, r10, r4               \n\t" /* ag = ag * scale */
149 
150                   "and    r9, ip, r9, lsr #8         \n\t" /* lsr br by 8 and mask it */
151                   "and    r10, r10, ip, lsl #8       \n\t" /* mask ag with reverse mask */
152                   "cmp    %[count], #1               \n\t" /* comparing count with 1 */
153                   "orr    r8, r9, r10                \n\t" /* br | ag */
154 
155                   "add    r8, r6, r8                 \n\t" /* dst = src + calc dest(r8) */
156 
157                   /* ----------------- */
158                   "stm    %[dst]!, {r7,r8}           \n\t" /* *dst = r7, increment dst by two (each times 4) */
159                   /* ----------------- */
160 
161                   "bgt    1b                         \n\t" /* if greater than 1 -> reloop */
162                   "blt    3f                         \n\t" /* if less than 1 -> exit */
163 
164                   /* Single Loop */
165                   "2:                                \n\t" /* <single loop> */
166                   "ldr    r5, [%[src]], #4           \n\t" /* load the src pointer into r5 r5=src */
167                   "ldr    r7, [%[dst]]               \n\t" /* loading dst into r7 */
168                   "lsr    r4, r5, #24                \n\t" /* extracting the alpha from source and storing it to r4 */
169 
170                   /* ----------- */
171                   "and    r9, ip, r7                 \n\t" /* r9 = br masked by ip */
172                   "rsb    r4, r4, #256               \n\t" /* subtracting the alpha from 256 -> r4=scale */
173 
174                   "and    r10, ip, r7, lsr #8        \n\t" /* r10 = ag masked by ip */
175                   "mul    r9, r9, r4                 \n\t" /* br = br * scale */
176                   "mul    r10, r10, r4               \n\t" /* ag = ag * scale */
177                   "and    r9, ip, r9, lsr #8         \n\t" /* lsr br by 8 and mask it */
178 
179                   "and    r10, r10, ip, lsl #8       \n\t" /* mask ag */
180                   "orr    r7, r9, r10                \n\t" /* br | ag */
181 
182                   "add    r7, r5, r7                 \n\t" /* *dst = src + calc dest(r7) */
183 
184                   /* ----------------- */
185                   "str    r7, [%[dst]], #4           \n\t" /* *dst = r7, increment dst by one (times 4) */
186                   /* ----------------- */
187 
188                   "3:                                \n\t" /* <exit> */
189                   : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
190                   :
191                   : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "ip", "memory"
192                   );
193 }
194 
195 /*
196  * ARM asm version of S32A_Blend_BlitRow32
197  */
S32A_Blend_BlitRow32_arm(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)198 void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
199                               const SkPMColor* SK_RESTRICT src,
200                               int count, U8CPU alpha) {
201     asm volatile (
202                   "cmp    %[count], #0               \n\t" /* comparing count with 0 */
203                   "beq    3f                         \n\t" /* if zero exit */
204 
205                   "mov    r12, #0xff                 \n\t" /* load the 0xff mask in r12 */
206                   "orr    r12, r12, r12, lsl #16     \n\t" /* convert it to 0xff00ff in r12 */
207 
208                   /* src1,2_scale */
209                   "add    %[alpha], %[alpha], #1     \n\t" /* loading %[alpha]=src_scale=alpha+1 */
210 
211                   "cmp    %[count], #2               \n\t" /* comparing count with 2 */
212                   "blt    2f                         \n\t" /* if less than 2 -> single loop */
213 
214                   /* Double Loop */
215                   "1:                                \n\t" /* <double loop> */
216                   "ldm    %[src]!, {r5, r6}          \n\t" /* loading src pointers into r5 and r6 */
217                   "ldm    %[dst], {r7, r8}           \n\t" /* loading dst pointers into r7 and r8 */
218 
219                   /* dst1_scale and dst2_scale*/
220                   "lsr    r9, r5, #24                \n\t" /* src >> 24 */
221                   "lsr    r10, r6, #24               \n\t" /* src >> 24 */
222 #ifdef SK_ARM_HAS_EDSP
223                   "smulbb r9, r9, %[alpha]           \n\t" /* r9 = SkMulS16 r9 with src_scale */
224                   "smulbb r10, r10, %[alpha]         \n\t" /* r10 = SkMulS16 r10 with src_scale */
225 #else
226                   "mul    r9, r9, %[alpha]           \n\t" /* r9 = SkMulS16 r9 with src_scale */
227                   "mul    r10, r10, %[alpha]         \n\t" /* r10 = SkMulS16 r10 with src_scale */
228 #endif
229                   "lsr    r9, r9, #8                 \n\t" /* r9 >> 8 */
230                   "lsr    r10, r10, #8               \n\t" /* r10 >> 8 */
231                   "rsb    r9, r9, #256               \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
232                   "rsb    r10, r10, #256             \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
233 
234                   /* ---------------------- */
235 
236                   /* src1, src1_scale */
237                   "and    r11, r12, r5, lsr #8       \n\t" /* ag = r11 = r5 masked by r12 lsr by #8 */
238                   "and    r4, r12, r5                \n\t" /* rb = r4 = r5 masked by r12 */
239                   "mul    r11, r11, %[alpha]         \n\t" /* ag = r11 times src_scale */
240                   "mul    r4, r4, %[alpha]           \n\t" /* rb = r4 times src_scale */
241                   "and    r11, r11, r12, lsl #8      \n\t" /* ag masked by reverse mask (r12) */
242                   "and    r4, r12, r4, lsr #8        \n\t" /* rb masked by mask (r12) */
243                   "orr    r5, r11, r4                \n\t" /* r5 = (src1, src_scale) */
244 
245                   /* dst1, dst1_scale */
246                   "and    r11, r12, r7, lsr #8       \n\t" /* ag = r11 = r7 masked by r12 lsr by #8 */
247                   "and    r4, r12, r7                \n\t" /* rb = r4 = r7 masked by r12 */
248                   "mul    r11, r11, r9               \n\t" /* ag = r11 times dst_scale (r9) */
249                   "mul    r4, r4, r9                 \n\t" /* rb = r4 times dst_scale (r9) */
250                   "and    r11, r11, r12, lsl #8      \n\t" /* ag masked by reverse mask (r12) */
251                   "and    r4, r12, r4, lsr #8        \n\t" /* rb masked by mask (r12) */
252                   "orr    r9, r11, r4                \n\t" /* r9 = (dst1, dst_scale) */
253 
254                   /* ---------------------- */
255                   "add    r9, r5, r9                 \n\t" /* *dst = src plus dst both scaled */
256                   /* ---------------------- */
257 
258                   /* ====================== */
259 
260                   /* src2, src2_scale */
261                   "and    r11, r12, r6, lsr #8       \n\t" /* ag = r11 = r6 masked by r12 lsr by #8 */
262                   "and    r4, r12, r6                \n\t" /* rb = r4 = r6 masked by r12 */
263                   "mul    r11, r11, %[alpha]         \n\t" /* ag = r11 times src_scale */
264                   "mul    r4, r4, %[alpha]           \n\t" /* rb = r4 times src_scale */
265                   "and    r11, r11, r12, lsl #8      \n\t" /* ag masked by reverse mask (r12) */
266                   "and    r4, r12, r4, lsr #8        \n\t" /* rb masked by mask (r12) */
267                   "orr    r6, r11, r4                \n\t" /* r6 = (src2, src_scale) */
268 
269                   /* dst2, dst2_scale */
270                   "and    r11, r12, r8, lsr #8       \n\t" /* ag = r11 = r8 masked by r12 lsr by #8 */
271                   "and    r4, r12, r8                \n\t" /* rb = r4 = r8 masked by r12 */
272                   "mul    r11, r11, r10              \n\t" /* ag = r11 times dst_scale (r10) */
273                   "mul    r4, r4, r10                \n\t" /* rb = r4 times dst_scale (r6) */
274                   "and    r11, r11, r12, lsl #8      \n\t" /* ag masked by reverse mask (r12) */
275                   "and    r4, r12, r4, lsr #8        \n\t" /* rb masked by mask (r12) */
276                   "orr    r10, r11, r4               \n\t" /* r10 = (dst2, dst_scale) */
277 
278                   "sub    %[count], %[count], #2     \n\t" /* decrease count by 2 */
279                   /* ---------------------- */
280                   "add    r10, r6, r10               \n\t" /* *dst = src plus dst both scaled */
281                   /* ---------------------- */
282                   "cmp    %[count], #1               \n\t" /* compare count with 1 */
283                   /* ----------------- */
284                   "stm    %[dst]!, {r9, r10}         \n\t" /* copy r9 and r10 to r7 and r8 respectively */
285                   /* ----------------- */
286 
287                   "bgt    1b                         \n\t" /* if %[count] greater than 1 reloop */
288                   "blt    3f                         \n\t" /* if %[count] less than 1 exit */
289                                                            /* else get into the single loop */
290                   /* Single Loop */
291                   "2:                                \n\t" /* <single loop> */
292                   "ldr    r5, [%[src]], #4           \n\t" /* loading src pointer into r5: r5=src */
293                   "ldr    r7, [%[dst]]               \n\t" /* loading dst pointer into r7: r7=dst */
294 
295                   "lsr    r6, r5, #24                \n\t" /* src >> 24 */
296                   "and    r8, r12, r5, lsr #8        \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
297 #ifdef SK_ARM_HAS_EDSP
298                   "smulbb r6, r6, %[alpha]           \n\t" /* r6 = SkMulS16 with src_scale */
299 #else
300                   "mul    r6, r6, %[alpha]           \n\t" /* r6 = SkMulS16 with src_scale */
301 #endif
302                   "and    r9, r12, r5                \n\t" /* rb = r9 = r5 masked by r12 */
303                   "lsr    r6, r6, #8                 \n\t" /* r6 >> 8 */
304                   "mul    r8, r8, %[alpha]           \n\t" /* ag = r8 times scale */
305                   "rsb    r6, r6, #256               \n\t" /* r6 = 255 - r6 + 1 */
306 
307                   /* src, src_scale */
308                   "mul    r9, r9, %[alpha]           \n\t" /* rb = r9 times scale */
309                   "and    r8, r8, r12, lsl #8        \n\t" /* ag masked by reverse mask (r12) */
310                   "and    r9, r12, r9, lsr #8        \n\t" /* rb masked by mask (r12) */
311                   "orr    r10, r8, r9                \n\t" /* r10 = (scr, src_scale) */
312 
313                   /* dst, dst_scale */
314                   "and    r8, r12, r7, lsr #8        \n\t" /* ag = r8 = r7 masked by r12 lsr by #8 */
315                   "and    r9, r12, r7                \n\t" /* rb = r9 = r7 masked by r12 */
316                   "mul    r8, r8, r6                 \n\t" /* ag = r8 times scale (r6) */
317                   "mul    r9, r9, r6                 \n\t" /* rb = r9 times scale (r6) */
318                   "and    r8, r8, r12, lsl #8        \n\t" /* ag masked by reverse mask (r12) */
319                   "and    r9, r12, r9, lsr #8        \n\t" /* rb masked by mask (r12) */
320                   "orr    r7, r8, r9                 \n\t" /* r7 = (dst, dst_scale) */
321 
322                   "add    r10, r7, r10               \n\t" /* *dst = src plus dst both scaled */
323 
324                   /* ----------------- */
325                   "str    r10, [%[dst]], #4          \n\t" /* *dst = r10, postincrement dst by one (times 4) */
326                   /* ----------------- */
327 
328                   "3:                                \n\t" /* <exit> */
329                   : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count), [alpha] "+r" (alpha)
330                   :
331                   : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "memory"
332                   );
333 
334 }
335 
336 ///////////////////////////////////////////////////////////////////////////////
337 
338 static const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm[] = {
339     // no dither
340     // NOTE: For the functions below, we don't have a special version
341     //       that assumes that each source pixel is opaque. But our S32A is
342     //       still faster than the default, so use it.
343     S32A_D565_Opaque,   // S32_D565_Opaque
344     NULL,               // S32_D565_Blend
345     S32A_D565_Opaque,   // S32A_D565_Opaque
346     NULL,               // S32A_D565_Blend
347 
348     // dither
349     NULL,   // S32_D565_Opaque_Dither
350     NULL,   // S32_D565_Blend_Dither
351     NULL,   // S32A_D565_Opaque_Dither
352     NULL,   // S32A_D565_Blend_Dither
353 };
354 
355 static const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = {
356     NULL,   // S32_Opaque,
357     NULL,   // S32_Blend,
358     S32A_Opaque_BlitRow32_arm,   // S32A_Opaque,
359     S32A_Blend_BlitRow32_arm     // S32A_Blend
360 };
361 
362 #endif // USE_ARM_CODE
363 
PlatformProcs565(unsigned flags)364 SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) {
365     return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags];
366 }
367 
PlatformProcs32(unsigned flags)368 SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
369     return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags];
370 }
371 
372 ///////////////////////////////////////////////////////////////////////////////
373 #define Color32_arm  NULL
PlatformColorProc()374 SkBlitRow::ColorProc SkBlitRow::PlatformColorProc() {
375     return SK_ARM_NEON_WRAP(Color32_arm);
376 }
377 
PlatformColorRectProcFactory()378 SkBlitRow::ColorRectProc PlatformColorRectProcFactory() {
379     return NULL;
380 }
381