1 /*
2 * AltiVec optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
5 *
6 * This software is provided 'as-is', without any express or implied
7 * warranty. In no event will the authors be held liable for any damages
8 * arising from the use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 */
22
23 /* This file is included by jdmerge-altivec.c */
24
25
jsimd_h2v1_merged_upsample_altivec(JDIMENSION output_width,JSAMPIMAGE input_buf,JDIMENSION in_row_group_ctr,JSAMPARRAY output_buf)26 void jsimd_h2v1_merged_upsample_altivec (JDIMENSION output_width,
27 JSAMPIMAGE input_buf,
28 JDIMENSION in_row_group_ctr,
29 JSAMPARRAY output_buf)
30 {
31 JSAMPROW outptr, inptr0, inptr1, inptr2;
32 int pitch = output_width * RGB_PIXELSIZE, num_cols, yloop;
33 #if __BIG_ENDIAN__
34 int offset;
35 #endif
36 unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
37
38 __vector unsigned char rgb0, rgb1, rgb2, rgbx0, rgbx1, rgbx2, rgbx3,
39 y, cb, cr;
40 #if __BIG_ENDIAN__
41 __vector unsigned char edgel, edgeh, edges, out0, out1, out2, out3;
42 #if RGB_PIXELSIZE == 4
43 __vector unsigned char out4;
44 #endif
45 #endif
46 #if RGB_PIXELSIZE == 4
47 __vector unsigned char rgb3;
48 #endif
49 __vector short rg0, rg1, rg2, rg3, bx0, bx1, bx2, bx3, ye, yo, cbl, cbh,
50 crl, crh, r_yl, r_yh, g_yl, g_yh, b_yl, b_yh, g_y0w, g_y1w, g_y2w, g_y3w,
51 rl, rh, gl, gh, bl, bh, re, ro, ge, go, be, bo;
52 __vector int g_y0, g_y1, g_y2, g_y3;
53
54 /* Constants
55 * NOTE: The >> 1 is to compensate for the fact that vec_madds() returns 17
56 * high-order bits, not 16.
57 */
58 __vector short pw_f0402 = { __8X(F_0_402 >> 1) },
59 pw_mf0228 = { __8X(-F_0_228 >> 1) },
60 pw_mf0344_f0285 = { __4X2(-F_0_344, F_0_285) },
61 pw_one = { __8X(1) }, pw_255 = { __8X(255) },
62 pw_cj = { __8X(CENTERJSAMPLE) };
63 __vector int pd_onehalf = { __4X(ONE_HALF) };
64 __vector unsigned char pb_zero = { __16X(0) },
65 #if __BIG_ENDIAN__
66 shift_pack_index = {0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29},
67 even_index = {0,16,0,18,0,20,0,22,0,24,0,26,0,28,0,30},
68 odd_index = {0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31};
69 #else
70 shift_pack_index = {2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31},
71 even_index = {16,0,18,0,20,0,22,0,24,0,26,0,28,0,30,0},
72 odd_index = {17,0,19,0,21,0,23,0,25,0,27,0,29,0,31,0};
73 #endif
74
75 inptr0 = input_buf[0][in_row_group_ctr];
76 inptr1 = input_buf[1][in_row_group_ctr];
77 inptr2 = input_buf[2][in_row_group_ctr];
78 outptr = output_buf[0];
79
80 for (num_cols = pitch; num_cols > 0; inptr1 += 16, inptr2 += 16) {
81
82 cb = vec_ld(0, inptr1);
83 /* NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
84 * support unsigned vectors.
85 */
86 cbl = (__vector signed short)VEC_UNPACKHU(cb);
87 cbh = (__vector signed short)VEC_UNPACKLU(cb);
88 cbl = vec_sub(cbl, pw_cj);
89 cbh = vec_sub(cbh, pw_cj);
90
91 cr = vec_ld(0, inptr2);
92 crl = (__vector signed short)VEC_UNPACKHU(cr);
93 crh = (__vector signed short)VEC_UNPACKLU(cr);
94 crl = vec_sub(crl, pw_cj);
95 crh = vec_sub(crh, pw_cj);
96
97 /* (Original)
98 * R = Y + 1.40200 * Cr
99 * G = Y - 0.34414 * Cb - 0.71414 * Cr
100 * B = Y + 1.77200 * Cb
101 *
102 * (This implementation)
103 * R = Y + 0.40200 * Cr + Cr
104 * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
105 * B = Y - 0.22800 * Cb + Cb + Cb
106 */
107 b_yl = vec_add(cbl, cbl);
108 b_yh = vec_add(cbh, cbh);
109 b_yl = vec_madds(b_yl, pw_mf0228, pw_one);
110 b_yh = vec_madds(b_yh, pw_mf0228, pw_one);
111 b_yl = vec_sra(b_yl, (__vector unsigned short)pw_one);
112 b_yh = vec_sra(b_yh, (__vector unsigned short)pw_one);
113 b_yl = vec_add(b_yl, cbl);
114 b_yh = vec_add(b_yh, cbh);
115 b_yl = vec_add(b_yl, cbl);
116 b_yh = vec_add(b_yh, cbh);
117
118 r_yl = vec_add(crl, crl);
119 r_yh = vec_add(crh, crh);
120 r_yl = vec_madds(r_yl, pw_f0402, pw_one);
121 r_yh = vec_madds(r_yh, pw_f0402, pw_one);
122 r_yl = vec_sra(r_yl, (__vector unsigned short)pw_one);
123 r_yh = vec_sra(r_yh, (__vector unsigned short)pw_one);
124 r_yl = vec_add(r_yl, crl);
125 r_yh = vec_add(r_yh, crh);
126
127 g_y0w = vec_mergeh(cbl, crl);
128 g_y1w = vec_mergel(cbl, crl);
129 g_y0 = vec_msums(g_y0w, pw_mf0344_f0285, pd_onehalf);
130 g_y1 = vec_msums(g_y1w, pw_mf0344_f0285, pd_onehalf);
131 g_y2w = vec_mergeh(cbh, crh);
132 g_y3w = vec_mergel(cbh, crh);
133 g_y2 = vec_msums(g_y2w, pw_mf0344_f0285, pd_onehalf);
134 g_y3 = vec_msums(g_y3w, pw_mf0344_f0285, pd_onehalf);
135 /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
136 * each dword into a new 16-bit vector, which is the equivalent of
137 * descaling the 32-bit results (right-shifting by 16 bits) and then
138 * packing them.
139 */
140 g_yl = vec_perm((__vector short)g_y0, (__vector short)g_y1,
141 shift_pack_index);
142 g_yh = vec_perm((__vector short)g_y2, (__vector short)g_y3,
143 shift_pack_index);
144 g_yl = vec_sub(g_yl, crl);
145 g_yh = vec_sub(g_yh, crh);
146
147 for (yloop = 0; yloop < 2 && num_cols > 0; yloop++,
148 num_cols -= RGB_PIXELSIZE * 16,
149 outptr += RGB_PIXELSIZE * 16, inptr0 += 16) {
150
151 y = vec_ld(0, inptr0);
152 ye = (__vector signed short)vec_perm(pb_zero, y, even_index);
153 yo = (__vector signed short)vec_perm(pb_zero, y, odd_index);
154
155 if (yloop == 0) {
156 be = vec_add(b_yl, ye);
157 bo = vec_add(b_yl, yo);
158 re = vec_add(r_yl, ye);
159 ro = vec_add(r_yl, yo);
160 ge = vec_add(g_yl, ye);
161 go = vec_add(g_yl, yo);
162 } else {
163 be = vec_add(b_yh, ye);
164 bo = vec_add(b_yh, yo);
165 re = vec_add(r_yh, ye);
166 ro = vec_add(r_yh, yo);
167 ge = vec_add(g_yh, ye);
168 go = vec_add(g_yh, yo);
169 }
170
171 rl = vec_mergeh(re, ro);
172 rh = vec_mergel(re, ro);
173 gl = vec_mergeh(ge, go);
174 gh = vec_mergel(ge, go);
175 bl = vec_mergeh(be, bo);
176 bh = vec_mergel(be, bo);
177
178 rg0 = vec_mergeh(rl, gl);
179 bx0 = vec_mergeh(bl, pw_255);
180 rg1 = vec_mergel(rl, gl);
181 bx1 = vec_mergel(bl, pw_255);
182 rg2 = vec_mergeh(rh, gh);
183 bx2 = vec_mergeh(bh, pw_255);
184 rg3 = vec_mergel(rh, gh);
185 bx3 = vec_mergel(bh, pw_255);
186
187 rgbx0 = vec_packsu(rg0, bx0);
188 rgbx1 = vec_packsu(rg1, bx1);
189 rgbx2 = vec_packsu(rg2, bx2);
190 rgbx3 = vec_packsu(rg3, bx3);
191
192 #if RGB_PIXELSIZE == 3
193 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
194 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
195 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
196 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
197 *
198 * rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
199 * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
200 * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
201 */
202 rgb0 = vec_perm(rgbx0, rgbx1, (__vector unsigned char)RGB_INDEX0);
203 rgb1 = vec_perm(rgbx1, rgbx2, (__vector unsigned char)RGB_INDEX1);
204 rgb2 = vec_perm(rgbx2, rgbx3, (__vector unsigned char)RGB_INDEX2);
205 #else
206 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
207 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
208 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
209 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
210 *
211 * rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
212 * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
213 * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
214 * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
215 */
216 rgb0 = vec_perm(rgbx0, rgbx0, (__vector unsigned char)RGB_INDEX);
217 rgb1 = vec_perm(rgbx1, rgbx1, (__vector unsigned char)RGB_INDEX);
218 rgb2 = vec_perm(rgbx2, rgbx2, (__vector unsigned char)RGB_INDEX);
219 rgb3 = vec_perm(rgbx3, rgbx3, (__vector unsigned char)RGB_INDEX);
220 #endif
221
222 #if __BIG_ENDIAN__
223 offset = (size_t)outptr & 15;
224 if (offset) {
225 __vector unsigned char unaligned_shift_index;
226 int bytes = num_cols + offset;
227
228 if (bytes < (RGB_PIXELSIZE + 1) * 16 && (bytes & 15)) {
229 /* Slow path to prevent buffer overwrite. Since there is no way to
230 * write a partial AltiVec register, overwrite would occur on the
231 * last chunk of the last image row if the right edge is not on a
232 * 16-byte boundary. It could also occur on other rows if the bytes
233 * per row is low enough. Since we can't determine whether we're on
234 * the last image row, we have to assume every row is the last.
235 */
236 vec_st(rgb0, 0, tmpbuf);
237 vec_st(rgb1, 16, tmpbuf);
238 vec_st(rgb2, 32, tmpbuf);
239 #if RGB_PIXELSIZE == 4
240 vec_st(rgb3, 48, tmpbuf);
241 #endif
242 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
243 } else {
244 /* Fast path */
245 unaligned_shift_index = vec_lvsl(0, outptr);
246 edgel = vec_ld(0, outptr);
247 edgeh = vec_ld(min(num_cols - 1, RGB_PIXELSIZE * 16), outptr);
248 edges = vec_perm(edgeh, edgel, unaligned_shift_index);
249 unaligned_shift_index = vec_lvsr(0, outptr);
250 out0 = vec_perm(edges, rgb0, unaligned_shift_index);
251 out1 = vec_perm(rgb0, rgb1, unaligned_shift_index);
252 out2 = vec_perm(rgb1, rgb2, unaligned_shift_index);
253 #if RGB_PIXELSIZE == 4
254 out3 = vec_perm(rgb2, rgb3, unaligned_shift_index);
255 out4 = vec_perm(rgb3, edges, unaligned_shift_index);
256 #else
257 out3 = vec_perm(rgb2, edges, unaligned_shift_index);
258 #endif
259 vec_st(out0, 0, outptr);
260 if (bytes > 16)
261 vec_st(out1, 16, outptr);
262 if (bytes > 32)
263 vec_st(out2, 32, outptr);
264 if (bytes > 48)
265 vec_st(out3, 48, outptr);
266 #if RGB_PIXELSIZE == 4
267 if (bytes > 64)
268 vec_st(out4, 64, outptr);
269 #endif
270 }
271 } else {
272 #endif /* __BIG_ENDIAN__ */
273 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) {
274 /* Slow path */
275 VEC_ST(rgb0, 0, tmpbuf);
276 VEC_ST(rgb1, 16, tmpbuf);
277 VEC_ST(rgb2, 32, tmpbuf);
278 #if RGB_PIXELSIZE == 4
279 VEC_ST(rgb3, 48, tmpbuf);
280 #endif
281 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
282 } else {
283 /* Fast path */
284 VEC_ST(rgb0, 0, outptr);
285 if (num_cols > 16)
286 VEC_ST(rgb1, 16, outptr);
287 if (num_cols > 32)
288 VEC_ST(rgb2, 32, outptr);
289 #if RGB_PIXELSIZE == 4
290 if (num_cols > 48)
291 VEC_ST(rgb3, 48, outptr);
292 #endif
293 }
294 #if __BIG_ENDIAN__
295 }
296 #endif
297 }
298 }
299 }
300
301
jsimd_h2v2_merged_upsample_altivec(JDIMENSION output_width,JSAMPIMAGE input_buf,JDIMENSION in_row_group_ctr,JSAMPARRAY output_buf)302 void jsimd_h2v2_merged_upsample_altivec (JDIMENSION output_width,
303 JSAMPIMAGE input_buf,
304 JDIMENSION in_row_group_ctr,
305 JSAMPARRAY output_buf)
306 {
307 JSAMPROW inptr, outptr;
308
309 inptr = input_buf[0][in_row_group_ctr];
310 outptr = output_buf[0];
311
312 input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2];
313 jsimd_h2v1_merged_upsample_altivec(output_width, input_buf, in_row_group_ctr,
314 output_buf);
315
316 input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2 + 1];
317 output_buf[0] = output_buf[1];
318 jsimd_h2v1_merged_upsample_altivec(output_width, input_buf, in_row_group_ctr,
319 output_buf);
320
321 input_buf[0][in_row_group_ctr] = inptr;
322 output_buf[0] = outptr;
323 }
324