1 /*
2 * AltiVec optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
5 *
6 * This software is provided 'as-is', without any express or implied
7 * warranty. In no event will the authors be held liable for any damages
8 * arising from the use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 */
22
23 /* This file is included by jdcolor-altivec.c */
24
25
jsimd_ycc_rgb_convert_altivec(JDIMENSION out_width,JSAMPIMAGE input_buf,JDIMENSION input_row,JSAMPARRAY output_buf,int num_rows)26 void jsimd_ycc_rgb_convert_altivec(JDIMENSION out_width, JSAMPIMAGE input_buf,
27 JDIMENSION input_row, JSAMPARRAY output_buf,
28 int num_rows)
29 {
30 JSAMPROW outptr, inptr0, inptr1, inptr2;
31 int pitch = out_width * RGB_PIXELSIZE, num_cols;
32 #if __BIG_ENDIAN__
33 int offset;
34 #endif
35 unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
36
37 __vector unsigned char rgb0, rgb1, rgb2, rgbx0, rgbx1, rgbx2, rgbx3,
38 y, cb, cr;
39 #if __BIG_ENDIAN__
40 __vector unsigned char edgel, edgeh, edges, out0, out1, out2, out3;
41 #if RGB_PIXELSIZE == 4
42 __vector unsigned char out4;
43 #endif
44 #endif
45 #if RGB_PIXELSIZE == 4
46 __vector unsigned char rgb3;
47 #endif
48 __vector short rg0, rg1, rg2, rg3, bx0, bx1, bx2, bx3, yl, yh, cbl, cbh,
49 crl, crh, rl, rh, gl, gh, bl, bh, g0w, g1w, g2w, g3w;
50 __vector int g0, g1, g2, g3;
51
52 /* Constants
53 * NOTE: The >> 1 is to compensate for the fact that vec_madds() returns 17
54 * high-order bits, not 16.
55 */
56 __vector short pw_f0402 = { __8X(F_0_402 >> 1) },
57 pw_mf0228 = { __8X(-F_0_228 >> 1) },
58 pw_mf0344_f0285 = { __4X2(-F_0_344, F_0_285) },
59 pw_one = { __8X(1) }, pw_255 = { __8X(255) },
60 pw_cj = { __8X(CENTERJSAMPLE) };
61 __vector int pd_onehalf = { __4X(ONE_HALF) };
62 __vector unsigned char pb_zero = { __16X(0) },
63 #if __BIG_ENDIAN__
64 shift_pack_index =
65 { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
66 #else
67 shift_pack_index =
68 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
69 #endif
70
71 while (--num_rows >= 0) {
72 inptr0 = input_buf[0][input_row];
73 inptr1 = input_buf[1][input_row];
74 inptr2 = input_buf[2][input_row];
75 input_row++;
76 outptr = *output_buf++;
77
78 for (num_cols = pitch; num_cols > 0;
79 num_cols -= RGB_PIXELSIZE * 16, outptr += RGB_PIXELSIZE * 16,
80 inptr0 += 16, inptr1 += 16, inptr2 += 16) {
81
82 y = vec_ld(0, inptr0);
83 /* NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
84 * support unsigned vectors.
85 */
86 yl = (__vector signed short)VEC_UNPACKHU(y);
87 yh = (__vector signed short)VEC_UNPACKLU(y);
88
89 cb = vec_ld(0, inptr1);
90 cbl = (__vector signed short)VEC_UNPACKHU(cb);
91 cbh = (__vector signed short)VEC_UNPACKLU(cb);
92 cbl = vec_sub(cbl, pw_cj);
93 cbh = vec_sub(cbh, pw_cj);
94
95 cr = vec_ld(0, inptr2);
96 crl = (__vector signed short)VEC_UNPACKHU(cr);
97 crh = (__vector signed short)VEC_UNPACKLU(cr);
98 crl = vec_sub(crl, pw_cj);
99 crh = vec_sub(crh, pw_cj);
100
101 /* (Original)
102 * R = Y + 1.40200 * Cr
103 * G = Y - 0.34414 * Cb - 0.71414 * Cr
104 * B = Y + 1.77200 * Cb
105 *
106 * (This implementation)
107 * R = Y + 0.40200 * Cr + Cr
108 * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
109 * B = Y - 0.22800 * Cb + Cb + Cb
110 */
111 bl = vec_add(cbl, cbl);
112 bh = vec_add(cbh, cbh);
113 bl = vec_madds(bl, pw_mf0228, pw_one);
114 bh = vec_madds(bh, pw_mf0228, pw_one);
115 bl = vec_sra(bl, (__vector unsigned short)pw_one);
116 bh = vec_sra(bh, (__vector unsigned short)pw_one);
117 bl = vec_add(bl, cbl);
118 bh = vec_add(bh, cbh);
119 bl = vec_add(bl, cbl);
120 bh = vec_add(bh, cbh);
121 bl = vec_add(bl, yl);
122 bh = vec_add(bh, yh);
123
124 rl = vec_add(crl, crl);
125 rh = vec_add(crh, crh);
126 rl = vec_madds(rl, pw_f0402, pw_one);
127 rh = vec_madds(rh, pw_f0402, pw_one);
128 rl = vec_sra(rl, (__vector unsigned short)pw_one);
129 rh = vec_sra(rh, (__vector unsigned short)pw_one);
130 rl = vec_add(rl, crl);
131 rh = vec_add(rh, crh);
132 rl = vec_add(rl, yl);
133 rh = vec_add(rh, yh);
134
135 g0w = vec_mergeh(cbl, crl);
136 g1w = vec_mergel(cbl, crl);
137 g0 = vec_msums(g0w, pw_mf0344_f0285, pd_onehalf);
138 g1 = vec_msums(g1w, pw_mf0344_f0285, pd_onehalf);
139 g2w = vec_mergeh(cbh, crh);
140 g3w = vec_mergel(cbh, crh);
141 g2 = vec_msums(g2w, pw_mf0344_f0285, pd_onehalf);
142 g3 = vec_msums(g3w, pw_mf0344_f0285, pd_onehalf);
143 /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
144 * each dword into a new 16-bit vector, which is the equivalent of
145 * descaling the 32-bit results (right-shifting by 16 bits) and then
146 * packing them.
147 */
148 gl = vec_perm((__vector short)g0, (__vector short)g1, shift_pack_index);
149 gh = vec_perm((__vector short)g2, (__vector short)g3, shift_pack_index);
150 gl = vec_sub(gl, crl);
151 gh = vec_sub(gh, crh);
152 gl = vec_add(gl, yl);
153 gh = vec_add(gh, yh);
154
155 rg0 = vec_mergeh(rl, gl);
156 bx0 = vec_mergeh(bl, pw_255);
157 rg1 = vec_mergel(rl, gl);
158 bx1 = vec_mergel(bl, pw_255);
159 rg2 = vec_mergeh(rh, gh);
160 bx2 = vec_mergeh(bh, pw_255);
161 rg3 = vec_mergel(rh, gh);
162 bx3 = vec_mergel(bh, pw_255);
163
164 rgbx0 = vec_packsu(rg0, bx0);
165 rgbx1 = vec_packsu(rg1, bx1);
166 rgbx2 = vec_packsu(rg2, bx2);
167 rgbx3 = vec_packsu(rg3, bx3);
168
169 #if RGB_PIXELSIZE == 3
170 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
171 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
172 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
173 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
174 *
175 * rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
176 * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
177 * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
178 */
179 rgb0 = vec_perm(rgbx0, rgbx1, (__vector unsigned char)RGB_INDEX0);
180 rgb1 = vec_perm(rgbx1, rgbx2, (__vector unsigned char)RGB_INDEX1);
181 rgb2 = vec_perm(rgbx2, rgbx3, (__vector unsigned char)RGB_INDEX2);
182 #else
183 /* rgbx0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 X0 B1 X1 B2 X2 B3 X3
184 * rgbx1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 X4 B5 X5 B6 X6 B7 X7
185 * rgbx2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 X8 B9 X9 Ba Xa Bb Xb
186 * rgbx3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Xc Bd Xd Be Xe Bf Xf
187 *
188 * rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
189 * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
190 * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
191 * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
192 */
193 rgb0 = vec_perm(rgbx0, rgbx0, (__vector unsigned char)RGB_INDEX);
194 rgb1 = vec_perm(rgbx1, rgbx1, (__vector unsigned char)RGB_INDEX);
195 rgb2 = vec_perm(rgbx2, rgbx2, (__vector unsigned char)RGB_INDEX);
196 rgb3 = vec_perm(rgbx3, rgbx3, (__vector unsigned char)RGB_INDEX);
197 #endif
198
199 #if __BIG_ENDIAN__
200 offset = (size_t)outptr & 15;
201 if (offset) {
202 __vector unsigned char unaligned_shift_index;
203 int bytes = num_cols + offset;
204
205 if (bytes < (RGB_PIXELSIZE + 1) * 16 && (bytes & 15)) {
206 /* Slow path to prevent buffer overwrite. Since there is no way to
207 * write a partial AltiVec register, overwrite would occur on the
208 * last chunk of the last image row if the right edge is not on a
209 * 16-byte boundary. It could also occur on other rows if the bytes
210 * per row is low enough. Since we can't determine whether we're on
211 * the last image row, we have to assume every row is the last.
212 */
213 vec_st(rgb0, 0, tmpbuf);
214 vec_st(rgb1, 16, tmpbuf);
215 vec_st(rgb2, 32, tmpbuf);
216 #if RGB_PIXELSIZE == 4
217 vec_st(rgb3, 48, tmpbuf);
218 #endif
219 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
220 } else {
221 /* Fast path */
222 unaligned_shift_index = vec_lvsl(0, outptr);
223 edgel = vec_ld(0, outptr);
224 edgeh = vec_ld(min(num_cols - 1, RGB_PIXELSIZE * 16), outptr);
225 edges = vec_perm(edgeh, edgel, unaligned_shift_index);
226 unaligned_shift_index = vec_lvsr(0, outptr);
227 out0 = vec_perm(edges, rgb0, unaligned_shift_index);
228 out1 = vec_perm(rgb0, rgb1, unaligned_shift_index);
229 out2 = vec_perm(rgb1, rgb2, unaligned_shift_index);
230 #if RGB_PIXELSIZE == 4
231 out3 = vec_perm(rgb2, rgb3, unaligned_shift_index);
232 out4 = vec_perm(rgb3, edges, unaligned_shift_index);
233 #else
234 out3 = vec_perm(rgb2, edges, unaligned_shift_index);
235 #endif
236 vec_st(out0, 0, outptr);
237 if (bytes > 16)
238 vec_st(out1, 16, outptr);
239 if (bytes > 32)
240 vec_st(out2, 32, outptr);
241 if (bytes > 48)
242 vec_st(out3, 48, outptr);
243 #if RGB_PIXELSIZE == 4
244 if (bytes > 64)
245 vec_st(out4, 64, outptr);
246 #endif
247 }
248 } else {
249 #endif /* __BIG_ENDIAN__ */
250 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) {
251 /* Slow path */
252 VEC_ST(rgb0, 0, tmpbuf);
253 VEC_ST(rgb1, 16, tmpbuf);
254 VEC_ST(rgb2, 32, tmpbuf);
255 #if RGB_PIXELSIZE == 4
256 VEC_ST(rgb3, 48, tmpbuf);
257 #endif
258 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16));
259 } else {
260 /* Fast path */
261 VEC_ST(rgb0, 0, outptr);
262 if (num_cols > 16)
263 VEC_ST(rgb1, 16, outptr);
264 if (num_cols > 32)
265 VEC_ST(rgb2, 32, outptr);
266 #if RGB_PIXELSIZE == 4
267 if (num_cols > 48)
268 VEC_ST(rgb3, 48, outptr);
269 #endif
270 }
271 #if __BIG_ENDIAN__
272 }
273 #endif
274 }
275 }
276 }
277