1 /*
2 * AltiVec optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
5 *
6 * This software is provided 'as-is', without any express or implied
7 * warranty. In no event will the authors be held liable for any damages
8 * arising from the use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 */
22
23 /* This file is included by jdmerge-altivec.c */
24
25
jsimd_h2v1_merged_upsample_altivec(JDIMENSION output_width,JSAMPIMAGE input_buf,JDIMENSION in_row_group_ctr,JSAMPARRAY output_buf)26 void jsimd_h2v1_merged_upsample_altivec(JDIMENSION output_width,
27 JSAMPIMAGE input_buf,
28 JDIMENSION in_row_group_ctr,
29 JSAMPARRAY output_buf)
30 {
31 JSAMPROW outptr, inptr0, inptr1, inptr2;
32 int pitch = output_width * RGB_PIXELSIZE, num_cols, yloop;
33 #if __BIG_ENDIAN__
34 int offset;
35 #endif
36 unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
37
38 __vector unsigned char rgb0, rgb1, rgb2, rgbx0, rgbx1, rgbx2, rgbx3,
39 y, cb, cr;
40 #if __BIG_ENDIAN__
41 __vector unsigned char edgel, edgeh, edges, out0, out1, out2, out3;
42 #if RGB_PIXELSIZE == 4
43 __vector unsigned char out4;
44 #endif
45 #endif
46 #if RGB_PIXELSIZE == 4
47 __vector unsigned char rgb3;
48 #endif
49 __vector short rg0, rg1, rg2, rg3, bx0, bx1, bx2, bx3, ye, yo, cbl, cbh,
50 crl, crh, r_yl, r_yh, g_yl, g_yh, b_yl, b_yh, g_y0w, g_y1w, g_y2w, g_y3w,
51 rl, rh, gl, gh, bl, bh, re, ro, ge, go, be, bo;
52 __vector int g_y0, g_y1, g_y2, g_y3;
53
54 /* Constants
55 * NOTE: The >> 1 is to compensate for the fact that vec_madds() returns 17
56 * high-order bits, not 16.
57 */
58 __vector short pw_f0402 = { __8X(F_0_402 >> 1) },
59 pw_mf0228 = { __8X(-F_0_228 >> 1) },
60 pw_mf0344_f0285 = { __4X2(-F_0_344, F_0_285) },
61 pw_one = { __8X(1) }, pw_255 = { __8X(255) },
62 pw_cj = { __8X(CENTERJSAMPLE) };
63 __vector int pd_onehalf = { __4X(ONE_HALF) };
64 __vector unsigned char pb_zero = { __16X(0) },
65 #if __BIG_ENDIAN__
66 shift_pack_index =
67 { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
68 even_index =
69 { 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, 0, 28, 0, 30 },
70 odd_index =
71 { 0, 17, 0, 19, 0, 21, 0, 23, 0, 25, 0, 27, 0, 29, 0, 31 };
72 #else
73 shift_pack_index =
74 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
75 even_index =
76 { 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, 0, 28, 0, 30, 0 },
77 odd_index =
78 { 17, 0, 19, 0, 21, 0, 23, 0, 25, 0, 27, 0, 29, 0, 31, 0 };
79 #endif
80
81 inptr0 = input_buf[0][in_row_group_ctr];
82 inptr1 = input_buf[1][in_row_group_ctr];
83 inptr2 = input_buf[2][in_row_group_ctr];
84 outptr = output_buf[0];
85
86 for (num_cols = pitch; num_cols > 0; inptr1 += 16, inptr2 += 16) {
87
88 cb = vec_ld(0, inptr1);
89 /* NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
90 * support unsigned vectors.
91 */
92 cbl = (__vector signed short)VEC_UNPACKHU(cb);
93 cbh = (__vector signed short)VEC_UNPACKLU(cb);
94 cbl = vec_sub(cbl, pw_cj);
95 cbh = vec_sub(cbh, pw_cj);
96
97 cr = vec_ld(0, inptr2);
98 crl = (__vector signed short)VEC_UNPACKHU(cr);
99 crh = (__vector signed short)VEC_UNPACKLU(cr);
100 crl = vec_sub(crl, pw_cj);
101 crh = vec_sub(crh, pw_cj);
102
103 /* (Original)
104 * R = Y + 1.40200 * Cr
105 * G = Y - 0.34414 * Cb - 0.71414 * Cr
106 * B = Y + 1.77200 * Cb
107 *
108 * (This implementation)
109 * R = Y + 0.40200 * Cr + Cr
110 * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
111 * B = Y - 0.22800 * Cb + Cb + Cb
112 */
113 b_yl = vec_add(cbl, cbl);
114 b_yh = vec_add(cbh, cbh);
115 b_yl = vec_madds(b_yl, pw_mf0228, pw_one);
116 b_yh = vec_madds(b_yh, pw_mf0228, pw_one);
117 b_yl = vec_sra(b_yl, (__vector unsigned short)pw_one);
118 b_yh = vec_sra(b_yh, (__vector unsigned short)pw_one);
119 b_yl = vec_add(b_yl, cbl);
120 b_yh = vec_add(b_yh, cbh);
121 b_yl = vec_add(b_yl, cbl);
122 b_yh = vec_add(b_yh, cbh);
123
124 r_yl = vec_add(crl, crl);
125 r_yh = vec_add(crh, crh);
126 r_yl = vec_madds(r_yl, pw_f0402, pw_one);
127 r_yh = vec_madds(r_yh, pw_f0402, pw_one);
128 r_yl = vec_sra(r_yl, (__vector unsigned short)pw_one);
129 r_yh = vec_sra(r_yh, (__vector unsigned short)pw_one);
130 r_yl = vec_add(r_yl, crl);
131 r_yh = vec_add(r_yh, crh);
132
133 g_y0w = vec_mergeh(cbl, crl);
134 g_y1w = vec_mergel(cbl, crl);
135 g_y0 = vec_msums(g_y0w, pw_mf0344_f0285, pd_onehalf);
136 g_y1 = vec_msums(g_y1w, pw_mf0344_f0285, pd_onehalf);
137 g_y2w = vec_mergeh(cbh, crh);
138 g_y3w = vec_mergel(cbh, crh);
139 g_y2 = vec_msums(g_y2w, pw_mf0344_f0285, pd_onehalf);
140 g_y3 = vec_msums(g_y3w, pw_mf0344_f0285, pd_onehalf);
141 /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
142 * each dword into a new 16-bit vector, which is the equivalent of
143 * descaling the 32-bit results (right-shifting by 16 bits) and then
144 * packing them.
145 */
146 g_yl = vec_perm((__vector short)g_y0, (__vector short)g_y1,
147 shift_pack_index);
148 g_yh = vec_perm((__vector short)g_y2, (__vector short)g_y3,
149 shift_pack_index);
150 g_yl = vec_sub(g_yl, crl);
151 g_yh = vec_sub(g_yh, crh);
152
153 for (yloop = 0; yloop < 2 && num_cols > 0; yloop++,
154 num_cols -= RGB_PIXELSIZE * 16,
155 outptr += RGB_PIXELSIZE * 16, inptr0 += 16) {
156
157 y = vec_ld(0, inptr0);
158 ye = (__vector signed short)vec_perm(pb_zero, y, even_index);
159 yo = (__vector signed short)vec_perm(pb_zero, y, odd_index);
160
161 if (yloop == 0) {
162 be = vec_add(b_yl, ye);
163 bo = vec_add(b_yl, yo);
164 re = vec_add(r_yl, ye);
165 ro = vec_add(r_yl, yo);
166 ge = vec_add(g_yl, ye);
167 go = vec_add(g_yl, yo);
168 } else {
169 be = vec_add(b_yh, ye);
170 bo = vec_add(b_yh, yo);
171 re = vec_add(r_yh, ye);
172 ro = vec_add(r_yh, yo);
173 ge = vec_add(g_yh, ye);
174 go = vec_add(g_yh, yo);
175 }
176
177 rl = vec_mergeh(re, ro);
178 rh = vec_mergel(re, ro);
179 gl = vec_mergeh(ge, go);
180 gh = vec_mergel(ge, go);
181 bl = vec_mergeh(be, bo);
182 bh = vec_mergel(be, bo);
183
184 rg0 = vec_mergeh(rl, gl);
185 bx0 = vec_mergeh(bl, pw_255);
186 rg1 = vec_mergel(rl, gl);
187 bx1 = vec_mergel(bl, pw_255);
188 rg2 = vec_mergeh(rh, gh);
189 bx2 = vec_mergeh(bh, pw_255);
190 rg3 = vec_mergel(rh, gh);
191 bx3 = vec_mergel(bh, pw_255);
192
193 rgbx0 = vec_packsu(rg0, bx0);
194 rgbx1 = vec_packsu(rg1, bx1);
195 rgbx2 = vec_packsu(rg2, bx2);
196 rgbx3 = vec_packsu(rg3, bx3);
197
198 #if RGB_PIXELSIZE == 3
199 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
200 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
201 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
202 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
203 *
204 * rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
205 * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
206 * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
207 */
208 rgb0 = vec_perm(rgbx0, rgbx1, (__vector unsigned char)RGB_INDEX0);
209 rgb1 = vec_perm(rgbx1, rgbx2, (__vector unsigned char)RGB_INDEX1);
210 rgb2 = vec_perm(rgbx2, rgbx3, (__vector unsigned char)RGB_INDEX2);
211 #else
212 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
213 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
214 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
215 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
216 *
217 * rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
218 * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
219 * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
220 * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
221 */
222 rgb0 = vec_perm(rgbx0, rgbx0, (__vector unsigned char)RGB_INDEX);
223 rgb1 = vec_perm(rgbx1, rgbx1, (__vector unsigned char)RGB_INDEX);
224 rgb2 = vec_perm(rgbx2, rgbx2, (__vector unsigned char)RGB_INDEX);
225 rgb3 = vec_perm(rgbx3, rgbx3, (__vector unsigned char)RGB_INDEX);
226 #endif
227
228 #if __BIG_ENDIAN__
229 offset = (size_t)outptr & 15;
230 if (offset) {
231 __vector unsigned char unaligned_shift_index;
232 int bytes = num_cols + offset;
233
234 if (bytes < (RGB_PIXELSIZE + 1) * 16 && (bytes & 15)) {
235 /* Slow path to prevent buffer overwrite. Since there is no way to
236 * write a partial AltiVec register, overwrite would occur on the
237 * last chunk of the last image row if the right edge is not on a
238 * 16-byte boundary. It could also occur on other rows if the bytes
239 * per row is low enough. Since we can't determine whether we're on
240 * the last image row, we have to assume every row is the last.
241 */
242 vec_st(rgb0, 0, tmpbuf);
243 vec_st(rgb1, 16, tmpbuf);
244 vec_st(rgb2, 32, tmpbuf);
245 #if RGB_PIXELSIZE == 4
246 vec_st(rgb3, 48, tmpbuf);
247 #endif
248 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
249 } else {
250 /* Fast path */
251 unaligned_shift_index = vec_lvsl(0, outptr);
252 edgel = vec_ld(0, outptr);
253 edgeh = vec_ld(min(num_cols - 1, RGB_PIXELSIZE * 16), outptr);
254 edges = vec_perm(edgeh, edgel, unaligned_shift_index);
255 unaligned_shift_index = vec_lvsr(0, outptr);
256 out0 = vec_perm(edges, rgb0, unaligned_shift_index);
257 out1 = vec_perm(rgb0, rgb1, unaligned_shift_index);
258 out2 = vec_perm(rgb1, rgb2, unaligned_shift_index);
259 #if RGB_PIXELSIZE == 4
260 out3 = vec_perm(rgb2, rgb3, unaligned_shift_index);
261 out4 = vec_perm(rgb3, edges, unaligned_shift_index);
262 #else
263 out3 = vec_perm(rgb2, edges, unaligned_shift_index);
264 #endif
265 vec_st(out0, 0, outptr);
266 if (bytes > 16)
267 vec_st(out1, 16, outptr);
268 if (bytes > 32)
269 vec_st(out2, 32, outptr);
270 if (bytes > 48)
271 vec_st(out3, 48, outptr);
272 #if RGB_PIXELSIZE == 4
273 if (bytes > 64)
274 vec_st(out4, 64, outptr);
275 #endif
276 }
277 } else {
278 #endif /* __BIG_ENDIAN__ */
279 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) {
280 /* Slow path */
281 VEC_ST(rgb0, 0, tmpbuf);
282 VEC_ST(rgb1, 16, tmpbuf);
283 VEC_ST(rgb2, 32, tmpbuf);
284 #if RGB_PIXELSIZE == 4
285 VEC_ST(rgb3, 48, tmpbuf);
286 #endif
287 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
288 } else {
289 /* Fast path */
290 VEC_ST(rgb0, 0, outptr);
291 if (num_cols > 16)
292 VEC_ST(rgb1, 16, outptr);
293 if (num_cols > 32)
294 VEC_ST(rgb2, 32, outptr);
295 #if RGB_PIXELSIZE == 4
296 if (num_cols > 48)
297 VEC_ST(rgb3, 48, outptr);
298 #endif
299 }
300 #if __BIG_ENDIAN__
301 }
302 #endif
303 }
304 }
305 }
306
307
jsimd_h2v2_merged_upsample_altivec(JDIMENSION output_width,JSAMPIMAGE input_buf,JDIMENSION in_row_group_ctr,JSAMPARRAY output_buf)308 void jsimd_h2v2_merged_upsample_altivec(JDIMENSION output_width,
309 JSAMPIMAGE input_buf,
310 JDIMENSION in_row_group_ctr,
311 JSAMPARRAY output_buf)
312 {
313 JSAMPROW inptr, outptr;
314
315 inptr = input_buf[0][in_row_group_ctr];
316 outptr = output_buf[0];
317
318 input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2];
319 jsimd_h2v1_merged_upsample_altivec(output_width, input_buf, in_row_group_ctr,
320 output_buf);
321
322 input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2 + 1];
323 output_buf[0] = output_buf[1];
324 jsimd_h2v1_merged_upsample_altivec(output_width, input_buf, in_row_group_ctr,
325 output_buf);
326
327 input_buf[0][in_row_group_ctr] = inptr;
328 output_buf[0] = outptr;
329 }
330