1 /*
2 * Copyright 2012 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkBlitRow.h"
9 #include "SkColorPriv.h"
10 #include "SkDither.h"
11 #include "SkMathPriv.h"
12 #include "SkUtils.h"
13 #include "SkUtilsArm.h"
14
15 #include "SkCachePreload_arm.h"
16
17 // Define USE_NEON_CODE to indicate that we need to build NEON routines
18 #define USE_NEON_CODE (!SK_ARM_NEON_IS_NONE)
19
20 // Define USE_ARM_CODE to indicate that we need to build ARM routines
21 #define USE_ARM_CODE (!SK_ARM_NEON_IS_ALWAYS)
22
23 #if USE_NEON_CODE
24 #include "SkBlitRow_opts_arm_neon.h"
25 #endif
26
27 #if USE_ARM_CODE
28
S32A_D565_Opaque(uint16_t * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha,int,int)29 static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst,
30 const SkPMColor* SK_RESTRICT src, int count,
31 U8CPU alpha, int /*x*/, int /*y*/) {
32 SkASSERT(255 == alpha);
33
34 asm volatile (
35 "1: \n\t"
36 "ldr r3, [%[src]], #4 \n\t"
37 "cmp r3, #0xff000000 \n\t"
38 "blo 2f \n\t"
39 "and r4, r3, #0x0000f8 \n\t"
40 "and r5, r3, #0x00fc00 \n\t"
41 "and r6, r3, #0xf80000 \n\t"
42 "pld [r1, #32] \n\t"
43 "lsl r3, r4, #8 \n\t"
44 "orr r3, r3, r5, lsr #5 \n\t"
45 "orr r3, r3, r6, lsr #19 \n\t"
46 "subs %[count], %[count], #1 \n\t"
47 "strh r3, [%[dst]], #2 \n\t"
48 "bne 1b \n\t"
49 "b 4f \n\t"
50 "2: \n\t"
51 "lsrs r7, r3, #24 \n\t"
52 "beq 3f \n\t"
53 "ldrh r4, [%[dst]] \n\t"
54 "rsb r7, r7, #255 \n\t"
55 "and r6, r4, #0x001f \n\t"
56 #if SK_ARM_ARCH == 6
57 "lsl r5, r4, #21 \n\t"
58 "lsr r5, r5, #26 \n\t"
59 #else
60 "ubfx r5, r4, #5, #6 \n\t"
61 #endif
62 "pld [r0, #16] \n\t"
63 "lsr r4, r4, #11 \n\t"
64 #ifdef SK_ARM_HAS_EDSP
65 "smulbb r6, r6, r7 \n\t"
66 "smulbb r5, r5, r7 \n\t"
67 "smulbb r4, r4, r7 \n\t"
68 #else
69 "mul r6, r6, r7 \n\t"
70 "mul r5, r5, r7 \n\t"
71 "mul r4, r4, r7 \n\t"
72 #endif
73 "uxtb r7, r3, ROR #16 \n\t"
74 "uxtb ip, r3, ROR #8 \n\t"
75 "and r3, r3, #0xff \n\t"
76 "add r6, r6, #16 \n\t"
77 "add r5, r5, #32 \n\t"
78 "add r4, r4, #16 \n\t"
79 "add r6, r6, r6, lsr #5 \n\t"
80 "add r5, r5, r5, lsr #6 \n\t"
81 "add r4, r4, r4, lsr #5 \n\t"
82 "add r6, r7, r6, lsr #5 \n\t"
83 "add r5, ip, r5, lsr #6 \n\t"
84 "add r4, r3, r4, lsr #5 \n\t"
85 "lsr r6, r6, #3 \n\t"
86 "and r5, r5, #0xfc \n\t"
87 "and r4, r4, #0xf8 \n\t"
88 "orr r6, r6, r5, lsl #3 \n\t"
89 "orr r4, r6, r4, lsl #8 \n\t"
90 "strh r4, [%[dst]], #2 \n\t"
91 "pld [r1, #32] \n\t"
92 "subs %[count], %[count], #1 \n\t"
93 "bne 1b \n\t"
94 "b 4f \n\t"
95 "3: \n\t"
96 "subs %[count], %[count], #1 \n\t"
97 "add %[dst], %[dst], #2 \n\t"
98 "bne 1b \n\t"
99 "4: \n\t"
100 : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
101 :
102 : "memory", "cc", "r3", "r4", "r5", "r6", "r7", "ip"
103 );
104 }
105
S32A_Opaque_BlitRow32_arm(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)106 static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
107 const SkPMColor* SK_RESTRICT src,
108 int count, U8CPU alpha) {
109
110 SkASSERT(255 == alpha);
111
112 asm volatile (
113 "cmp %[count], #0 \n\t" /* comparing count with 0 */
114 "beq 3f \n\t" /* if zero exit */
115
116 "mov ip, #0xff \n\t" /* load the 0xff mask in ip */
117 "orr ip, ip, ip, lsl #16 \n\t" /* convert it to 0xff00ff in ip */
118
119 "cmp %[count], #2 \n\t" /* compare count with 2 */
120 "blt 2f \n\t" /* if less than 2 -> single loop */
121
122 /* Double Loop */
123 "1: \n\t" /* <double loop> */
124 "ldm %[src]!, {r5,r6} \n\t" /* load the src(s) at r5-r6 */
125 "ldm %[dst], {r7,r8} \n\t" /* loading dst(s) into r7-r8 */
126 "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
127
128 /* ----------- */
129 "and r9, ip, r7 \n\t" /* r9 = br masked by ip */
130 "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */
131 "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */
132
133 "mul r9, r9, r4 \n\t" /* br = br * scale */
134 "mul r10, r10, r4 \n\t" /* ag = ag * scale */
135 "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
136
137 "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */
138 "lsr r4, r6, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
139 "orr r7, r9, r10 \n\t" /* br | ag*/
140
141 "add r7, r5, r7 \n\t" /* dst = src + calc dest(r7) */
142 "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 255 -> r4=scale */
143
144 /* ----------- */
145 "and r9, ip, r8 \n\t" /* r9 = br masked by ip */
146
147 "and r10, ip, r8, lsr #8 \n\t" /* r10 = ag masked by ip */
148 "mul r9, r9, r4 \n\t" /* br = br * scale */
149 "sub %[count], %[count], #2 \n\t"
150 "mul r10, r10, r4 \n\t" /* ag = ag * scale */
151
152 "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
153 "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */
154 "cmp %[count], #1 \n\t" /* comparing count with 1 */
155 "orr r8, r9, r10 \n\t" /* br | ag */
156
157 "add r8, r6, r8 \n\t" /* dst = src + calc dest(r8) */
158
159 /* ----------------- */
160 "stm %[dst]!, {r7,r8} \n\t" /* *dst = r7, increment dst by two (each times 4) */
161 /* ----------------- */
162
163 "bgt 1b \n\t" /* if greater than 1 -> reloop */
164 "blt 3f \n\t" /* if less than 1 -> exit */
165
166 /* Single Loop */
167 "2: \n\t" /* <single loop> */
168 "ldr r5, [%[src]], #4 \n\t" /* load the src pointer into r5 r5=src */
169 "ldr r7, [%[dst]] \n\t" /* loading dst into r7 */
170 "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
171
172 /* ----------- */
173 "and r9, ip, r7 \n\t" /* r9 = br masked by ip */
174 "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */
175
176 "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */
177 "mul r9, r9, r4 \n\t" /* br = br * scale */
178 "mul r10, r10, r4 \n\t" /* ag = ag * scale */
179 "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
180
181 "and r10, r10, ip, lsl #8 \n\t" /* mask ag */
182 "orr r7, r9, r10 \n\t" /* br | ag */
183
184 "add r7, r5, r7 \n\t" /* *dst = src + calc dest(r7) */
185
186 /* ----------------- */
187 "str r7, [%[dst]], #4 \n\t" /* *dst = r7, increment dst by one (times 4) */
188 /* ----------------- */
189
190 "3: \n\t" /* <exit> */
191 : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
192 :
193 : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "ip", "memory"
194 );
195 }
196
197 /*
198 * ARM asm version of S32A_Blend_BlitRow32
199 */
S32A_Blend_BlitRow32_arm(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)200 void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
201 const SkPMColor* SK_RESTRICT src,
202 int count, U8CPU alpha) {
203 asm volatile (
204 "cmp %[count], #0 \n\t" /* comparing count with 0 */
205 "beq 3f \n\t" /* if zero exit */
206
207 "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */
208 "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */
209
210 /* src1,2_scale */
211 "add %[alpha], %[alpha], #1 \n\t" /* loading %[alpha]=src_scale=alpha+1 */
212
213 "cmp %[count], #2 \n\t" /* comparing count with 2 */
214 "blt 2f \n\t" /* if less than 2 -> single loop */
215
216 /* Double Loop */
217 "1: \n\t" /* <double loop> */
218 "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */
219 "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */
220
221 /* dst1_scale and dst2_scale*/
222 "lsr r9, r5, #24 \n\t" /* src >> 24 */
223 "lsr r10, r6, #24 \n\t" /* src >> 24 */
224 #ifdef SK_ARM_HAS_EDSP
225 "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
226 "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
227 #else
228 "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
229 "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
230 #endif
231 "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
232 "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
233 "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
234 "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
235
236 /* ---------------------- */
237
238 /* src1, src1_scale */
239 "and r11, r12, r5, lsr #8 \n\t" /* ag = r11 = r5 masked by r12 lsr by #8 */
240 "and r4, r12, r5 \n\t" /* rb = r4 = r5 masked by r12 */
241 "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */
242 "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */
243 "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
244 "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
245 "orr r5, r11, r4 \n\t" /* r5 = (src1, src_scale) */
246
247 /* dst1, dst1_scale */
248 "and r11, r12, r7, lsr #8 \n\t" /* ag = r11 = r7 masked by r12 lsr by #8 */
249 "and r4, r12, r7 \n\t" /* rb = r4 = r7 masked by r12 */
250 "mul r11, r11, r9 \n\t" /* ag = r11 times dst_scale (r9) */
251 "mul r4, r4, r9 \n\t" /* rb = r4 times dst_scale (r9) */
252 "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
253 "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
254 "orr r9, r11, r4 \n\t" /* r9 = (dst1, dst_scale) */
255
256 /* ---------------------- */
257 "add r9, r5, r9 \n\t" /* *dst = src plus dst both scaled */
258 /* ---------------------- */
259
260 /* ====================== */
261
262 /* src2, src2_scale */
263 "and r11, r12, r6, lsr #8 \n\t" /* ag = r11 = r6 masked by r12 lsr by #8 */
264 "and r4, r12, r6 \n\t" /* rb = r4 = r6 masked by r12 */
265 "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */
266 "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */
267 "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
268 "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
269 "orr r6, r11, r4 \n\t" /* r6 = (src2, src_scale) */
270
271 /* dst2, dst2_scale */
272 "and r11, r12, r8, lsr #8 \n\t" /* ag = r11 = r8 masked by r12 lsr by #8 */
273 "and r4, r12, r8 \n\t" /* rb = r4 = r8 masked by r12 */
274 "mul r11, r11, r10 \n\t" /* ag = r11 times dst_scale (r10) */
275 "mul r4, r4, r10 \n\t" /* rb = r4 times dst_scale (r6) */
276 "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
277 "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
278 "orr r10, r11, r4 \n\t" /* r10 = (dst2, dst_scale) */
279
280 "sub %[count], %[count], #2 \n\t" /* decrease count by 2 */
281 /* ---------------------- */
282 "add r10, r6, r10 \n\t" /* *dst = src plus dst both scaled */
283 /* ---------------------- */
284 "cmp %[count], #1 \n\t" /* compare count with 1 */
285 /* ----------------- */
286 "stm %[dst]!, {r9, r10} \n\t" /* copy r9 and r10 to r7 and r8 respectively */
287 /* ----------------- */
288
289 "bgt 1b \n\t" /* if %[count] greater than 1 reloop */
290 "blt 3f \n\t" /* if %[count] less than 1 exit */
291 /* else get into the single loop */
292 /* Single Loop */
293 "2: \n\t" /* <single loop> */
294 "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */
295 "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */
296
297 "lsr r6, r5, #24 \n\t" /* src >> 24 */
298 "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
299 #ifdef SK_ARM_HAS_EDSP
300 "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
301 #else
302 "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
303 #endif
304 "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
305 "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
306 "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
307 "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */
308
309 /* src, src_scale */
310 "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */
311 "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
312 "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */
313 "orr r10, r8, r9 \n\t" /* r10 = (scr, src_scale) */
314
315 /* dst, dst_scale */
316 "and r8, r12, r7, lsr #8 \n\t" /* ag = r8 = r7 masked by r12 lsr by #8 */
317 "and r9, r12, r7 \n\t" /* rb = r9 = r7 masked by r12 */
318 "mul r8, r8, r6 \n\t" /* ag = r8 times scale (r6) */
319 "mul r9, r9, r6 \n\t" /* rb = r9 times scale (r6) */
320 "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
321 "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */
322 "orr r7, r8, r9 \n\t" /* r7 = (dst, dst_scale) */
323
324 "add r10, r7, r10 \n\t" /* *dst = src plus dst both scaled */
325
326 /* ----------------- */
327 "str r10, [%[dst]], #4 \n\t" /* *dst = r10, postincrement dst by one (times 4) */
328 /* ----------------- */
329
330 "3: \n\t" /* <exit> */
331 : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count), [alpha] "+r" (alpha)
332 :
333 : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "memory"
334 );
335
336 }
337
338 ///////////////////////////////////////////////////////////////////////////////
339
340 static const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm[] = {
341 // no dither
342 // NOTE: For the functions below, we don't have a special version
343 // that assumes that each source pixel is opaque. But our S32A is
344 // still faster than the default, so use it.
345 S32A_D565_Opaque, // S32_D565_Opaque
346 NULL, // S32_D565_Blend
347 S32A_D565_Opaque, // S32A_D565_Opaque
348 NULL, // S32A_D565_Blend
349
350 // dither
351 NULL, // S32_D565_Opaque_Dither
352 NULL, // S32_D565_Blend_Dither
353 NULL, // S32A_D565_Opaque_Dither
354 NULL, // S32A_D565_Blend_Dither
355 };
356
357 static const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = {
358 NULL, // S32_Opaque,
359 NULL, // S32_Blend,
360 S32A_Opaque_BlitRow32_arm, // S32A_Opaque,
361 S32A_Blend_BlitRow32_arm // S32A_Blend
362 };
363
364 #endif // USE_ARM_CODE
365
PlatformProcs565(unsigned flags)366 SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) {
367 return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags];
368 }
369
PlatformProcs32(unsigned flags)370 SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
371 return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags];
372 }
373
374 ///////////////////////////////////////////////////////////////////////////////
375 #define Color32_arm NULL
PlatformColorProc()376 SkBlitRow::ColorProc SkBlitRow::PlatformColorProc() {
377 return SK_ARM_NEON_WRAP(Color32_arm);
378 }
379