1 /*
2 * Loongson MMI optimizations for libjpeg-turbo
3 *
4 * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5 * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
6 * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
7 * All Rights Reserved.
8 * Authors: ZhuChen <zhuchen@loongson.cn>
9 * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
10 * CaiWanwei <caiwanwei@loongson.cn>
11 *
12 * Based on the x86 SIMD extension for IJG JPEG library
13 * Copyright (C) 1999-2006, MIYASAKA Masaru.
14 *
15 * This software is provided 'as-is', without any express or implied
16 * warranty. In no event will the authors be held liable for any damages
17 * arising from the use of this software.
18 *
19 * Permission is granted to anyone to use this software for any purpose,
20 * including commercial applications, and to alter it and redistribute it
21 * freely, subject to the following restrictions:
22 *
23 * 1. The origin of this software must not be misrepresented; you must not
24 * claim that you wrote the original software. If you use this software
25 * in a product, an acknowledgment in the product documentation would be
26 * appreciated but is not required.
27 * 2. Altered source versions must be plainly marked as such, and must not be
28 * misrepresented as being the original software.
29 * 3. This notice may not be removed or altered from any source distribution.
30 */
31
32 /* This file is included by jdcolor-mmi.c */
33
34
35 #if RGB_RED == 0
36 #define mmA mm0
37 #define mmB mm1
38 #elif RGB_GREEN == 0
39 #define mmA mm2
40 #define mmB mm3
41 #elif RGB_BLUE == 0
42 #define mmA mm4
43 #define mmB mm5
44 #else
45 #define mmA mm6
46 #define mmB mm7
47 #endif
48
49 #if RGB_RED == 1
50 #define mmC mm0
51 #define mmD mm1
52 #elif RGB_GREEN == 1
53 #define mmC mm2
54 #define mmD mm3
55 #elif RGB_BLUE == 1
56 #define mmC mm4
57 #define mmD mm5
58 #else
59 #define mmC mm6
60 #define mmD mm7
61 #endif
62
63 #if RGB_RED == 2
64 #define mmE mm0
65 #define mmF mm1
66 #elif RGB_GREEN == 2
67 #define mmE mm2
68 #define mmF mm3
69 #elif RGB_BLUE == 2
70 #define mmE mm4
71 #define mmF mm5
72 #else
73 #define mmE mm6
74 #define mmF mm7
75 #endif
76
77 #if RGB_RED == 3
78 #define mmG mm0
79 #define mmH mm1
80 #elif RGB_GREEN == 3
81 #define mmG mm2
82 #define mmH mm3
83 #elif RGB_BLUE == 3
84 #define mmG mm4
85 #define mmH mm5
86 #else
87 #define mmG mm6
88 #define mmH mm7
89 #endif
90
91
jsimd_ycc_rgb_convert_mmi(JDIMENSION out_width,JSAMPIMAGE input_buf,JDIMENSION input_row,JSAMPARRAY output_buf,int num_rows)92 void jsimd_ycc_rgb_convert_mmi(JDIMENSION out_width, JSAMPIMAGE input_buf,
93 JDIMENSION input_row, JSAMPARRAY output_buf,
94 int num_rows)
95 {
96 JSAMPROW outptr, inptr0, inptr1, inptr2;
97 int num_cols, col;
98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7;
99 __m64 mm8, wk[2];
100
101 while (--num_rows >= 0) {
102 inptr0 = input_buf[0][input_row];
103 inptr1 = input_buf[1][input_row];
104 inptr2 = input_buf[2][input_row];
105 input_row++;
106 outptr = *output_buf++;
107
108 for (num_cols = out_width; num_cols > 0; num_cols -= 8,
109 inptr0 += 8, inptr1 += 8, inptr2 += 8) {
110
111 mm5 = _mm_load_si64((__m64 *)inptr1);
112 mm1 = _mm_load_si64((__m64 *)inptr2);
113 mm8 = _mm_load_si64((__m64 *)inptr0);
114 mm4 = 0;
115 mm7 = 0;
116 mm4 = _mm_cmpeq_pi16(mm4, mm4);
117 mm7 = _mm_cmpeq_pi16(mm7, mm7);
118 mm4 = _mm_srli_pi16(mm4, BYTE_BIT);
119 mm7 = _mm_slli_pi16(mm7, 7); /* mm7={0xFF80 0xFF80 0xFF80 0xFF80} */
120 mm0 = mm4; /* mm0=mm4={0xFF 0x00 0xFF 0x00 ..} */
121
122 mm4 = _mm_and_si64(mm4, mm5); /* mm4=Cb(0246)=CbE */
123 mm5 = _mm_srli_pi16(mm5, BYTE_BIT); /* mm5=Cb(1357)=CbO */
124 mm0 = _mm_and_si64(mm0, mm1); /* mm0=Cr(0246)=CrE */
125 mm1 = _mm_srli_pi16(mm1, BYTE_BIT); /* mm1=Cr(1357)=CrO */
126 mm4 = _mm_add_pi16(mm4, mm7);
127 mm5 = _mm_add_pi16(mm5, mm7);
128 mm0 = _mm_add_pi16(mm0, mm7);
129 mm1 = _mm_add_pi16(mm1, mm7);
130
131 /* (Original)
132 * R = Y + 1.40200 * Cr
133 * G = Y - 0.34414 * Cb - 0.71414 * Cr
134 * B = Y + 1.77200 * Cb
135 *
136 * (This implementation)
137 * R = Y + 0.40200 * Cr + Cr
138 * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
139 * B = Y - 0.22800 * Cb + Cb + Cb
140 */
141
142 mm2 = mm4; /* mm2 = CbE */
143 mm3 = mm5; /* mm3 = CbO */
144 mm4 = _mm_add_pi16(mm4, mm4); /* mm4 = 2*CbE */
145 mm5 = _mm_add_pi16(mm5, mm5); /* mm5 = 2*CbO */
146 mm6 = mm0; /* mm6 = CrE */
147 mm7 = mm1; /* mm7 = CrO */
148 mm0 = _mm_add_pi16(mm0, mm0); /* mm0 = 2*CrE */
149 mm1 = _mm_add_pi16(mm1, mm1); /* mm1 = 2*CrO */
150
151 mm4 = _mm_mulhi_pi16(mm4, PW_MF0228); /* mm4=(2*CbE * -FIX(0.22800) */
152 mm5 = _mm_mulhi_pi16(mm5, PW_MF0228); /* mm5=(2*CbO * -FIX(0.22800) */
153 mm0 = _mm_mulhi_pi16(mm0, PW_F0402); /* mm0=(2*CrE * FIX(0.40200)) */
154 mm1 = _mm_mulhi_pi16(mm1, PW_F0402); /* mm1=(2*CrO * FIX(0.40200)) */
155
156 mm4 = _mm_add_pi16(mm4, PW_ONE);
157 mm5 = _mm_add_pi16(mm5, PW_ONE);
158 mm4 = _mm_srai_pi16(mm4, 1); /* mm4=(CbE * -FIX(0.22800)) */
159 mm5 = _mm_srai_pi16(mm5, 1); /* mm5=(CbO * -FIX(0.22800)) */
160 mm0 = _mm_add_pi16(mm0, PW_ONE);
161 mm1 = _mm_add_pi16(mm1, PW_ONE);
162 mm0 = _mm_srai_pi16(mm0, 1); /* mm0=(CrE * FIX(0.40200)) */
163 mm1 = _mm_srai_pi16(mm1, 1); /* mm1=(CrO * FIX(0.40200)) */
164
165 mm4 = _mm_add_pi16(mm4, mm2);
166 mm5 = _mm_add_pi16(mm5, mm3);
167 mm4 = _mm_add_pi16(mm4, mm2); /* mm4=(CbE * FIX(1.77200))=(B-Y)E */
168 mm5 = _mm_add_pi16(mm5, mm3); /* mm5=(CbO * FIX(1.77200))=(B-Y)O */
169 mm0 = _mm_add_pi16(mm0, mm6); /* mm0=(CrE * FIX(1.40200))=(R-Y)E */
170 mm1 = _mm_add_pi16(mm1, mm7); /* mm1=(CrO * FIX(1.40200))=(R-Y)O */
171
172 wk[0] = mm4; /* wk(0)=(B-Y)E */
173 wk[1] = mm5; /* wk(1)=(B-Y)O */
174
175 mm4 = mm2;
176 mm5 = mm3;
177 mm2 = _mm_unpacklo_pi16(mm2, mm6);
178 mm4 = _mm_unpackhi_pi16(mm4, mm6);
179 mm2 = _mm_madd_pi16(mm2, PW_MF0344_F0285);
180 mm4 = _mm_madd_pi16(mm4, PW_MF0344_F0285);
181 mm3 = _mm_unpacklo_pi16(mm3, mm7);
182 mm5 = _mm_unpackhi_pi16(mm5, mm7);
183 mm3 = _mm_madd_pi16(mm3, PW_MF0344_F0285);
184 mm5 = _mm_madd_pi16(mm5, PW_MF0344_F0285);
185
186 mm2 = _mm_add_pi32(mm2, PD_ONEHALF);
187 mm4 = _mm_add_pi32(mm4, PD_ONEHALF);
188 mm2 = _mm_srai_pi32(mm2, SCALEBITS);
189 mm4 = _mm_srai_pi32(mm4, SCALEBITS);
190 mm3 = _mm_add_pi32(mm3, PD_ONEHALF);
191 mm5 = _mm_add_pi32(mm5, PD_ONEHALF);
192 mm3 = _mm_srai_pi32(mm3, SCALEBITS);
193 mm5 = _mm_srai_pi32(mm5, SCALEBITS);
194
195 mm2 = _mm_packs_pi32(mm2, mm4); /* mm2=CbE*-FIX(0.344)+CrE*FIX(0.285) */
196 mm3 = _mm_packs_pi32(mm3, mm5); /* mm3=CbO*-FIX(0.344)+CrO*FIX(0.285) */
197 mm2 = _mm_sub_pi16(mm2, mm6); /* mm2=CbE*-FIX(0.344)+CrE*-FIX(0.714)=(G-Y)E */
198 mm3 = _mm_sub_pi16(mm3, mm7); /* mm3=CbO*-FIX(0.344)+CrO*-FIX(0.714)=(G-Y)O */
199
200 mm5 = mm8; /* mm5=Y(01234567) */
201
202 mm4 = _mm_cmpeq_pi16(mm4, mm4);
203 mm4 = _mm_srli_pi16(mm4, BYTE_BIT); /* mm4={0xFF 0x00 0xFF 0x00 ..} */
204 mm4 = _mm_and_si64(mm4, mm5); /* mm4=Y(0246)=YE */
205 mm5 = _mm_srli_pi16(mm5, BYTE_BIT); /* mm5=Y(1357)=YO */
206
207 mm0 = _mm_add_pi16(mm0, mm4); /* mm0=((R-Y)E+YE)=RE=(R0 R2 R4 R6) */
208 mm1 = _mm_add_pi16(mm1, mm5); /* mm1=((R-Y)O+YO)=RO=(R1 R3 R5 R7) */
209 mm0 = _mm_packs_pu16(mm0, mm0); /* mm0=(R0 R2 R4 R6 ** ** ** **) */
210 mm1 = _mm_packs_pu16(mm1, mm1); /* mm1=(R1 R3 R5 R7 ** ** ** **) */
211
212 mm2 = _mm_add_pi16(mm2, mm4); /* mm2=((G-Y)E+YE)=GE=(G0 G2 G4 G6) */
213 mm3 = _mm_add_pi16(mm3, mm5); /* mm3=((G-Y)O+YO)=GO=(G1 G3 G5 G7) */
214 mm2 = _mm_packs_pu16(mm2, mm2); /* mm2=(G0 G2 G4 G6 ** ** ** **) */
215 mm3 = _mm_packs_pu16(mm3, mm3); /* mm3=(G1 G3 G5 G7 ** ** ** **) */
216
217 mm4 = _mm_add_pi16(mm4, wk[0]); /* mm4=(YE+(B-Y)E)=BE=(B0 B2 B4 B6) */
218 mm5 = _mm_add_pi16(mm5, wk[1]); /* mm5=(YO+(B-Y)O)=BO=(B1 B3 B5 B7) */
219 mm4 = _mm_packs_pu16(mm4, mm4); /* mm4=(B0 B2 B4 B6 ** ** ** **) */
220 mm5 = _mm_packs_pu16(mm5, mm5); /* mm5=(B1 B3 B5 B7 ** ** ** **) */
221
222 #if RGB_PIXELSIZE == 3
223
224 /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
225 /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
226 mmA = _mm_unpacklo_pi8(mmA, mmC); /* mmA=(00 10 02 12 04 14 06 16) */
227 mmE = _mm_unpacklo_pi8(mmE, mmB); /* mmE=(20 01 22 03 24 05 26 07) */
228 mmD = _mm_unpacklo_pi8(mmD, mmF); /* mmD=(11 21 13 23 15 25 17 27) */
229
230 mmG = mmA;
231 mmH = mmA;
232 mmA = _mm_unpacklo_pi16(mmA, mmE); /* mmA=(00 10 20 01 02 12 22 03) */
233 mmG = _mm_unpackhi_pi16(mmG, mmE); /* mmG=(04 14 24 05 06 16 26 07) */
234
235 mmH = _mm_srli_si64(mmH, 2 * BYTE_BIT);
236 mmE = _mm_srli_si64(mmE, 2 * BYTE_BIT);
237
238 mmC = mmD;
239 mmB = mmD;
240 mmD = _mm_unpacklo_pi16(mmD, mmH); /* mmD=(11 21 02 12 13 23 04 14) */
241 mmC = _mm_unpackhi_pi16(mmC, mmH); /* mmC=(15 25 06 16 17 27 -- --) */
242
243 mmB = _mm_srli_si64(mmB, 2 * BYTE_BIT); /* mmB=(13 23 15 25 17 27 -- --) */
244
245 mmF = mmE;
246 mmE = _mm_unpacklo_pi16(mmE, mmB); /* mmE=(22 03 13 23 24 05 15 25) */
247 mmF = _mm_unpackhi_pi16(mmF, mmB); /* mmF=(26 07 17 27 -- -- -- --) */
248
249 mmA = _mm_unpacklo_pi32(mmA, mmD); /* mmA=(00 10 20 01 11 21 02 12) */
250 mmE = _mm_unpacklo_pi32(mmE, mmG); /* mmE=(22 03 13 23 04 14 24 05) */
251 mmC = _mm_unpacklo_pi32(mmC, mmF); /* mmC=(15 25 06 16 26 07 17 27) */
252
253 if (num_cols >= 8) {
254 _mm_store_si64((__m64 *)outptr, mmA);
255 _mm_store_si64((__m64 *)(outptr + 8), mmE);
256 _mm_store_si64((__m64 *)(outptr + 16), mmC);
257 outptr += RGB_PIXELSIZE * 8;
258 } else {
259 col = num_cols * 3;
260 asm(".set noreorder\r\n"
261
262 "li $8, 16\r\n"
263 "move $9, %4\r\n"
264 "mov.s $f4, %1\r\n"
265 "mov.s $f6, %3\r\n"
266 "move $10, %5\r\n"
267 "bltu $9, $8, 1f\r\n"
268 "nop \r\n"
269 "gssdlc1 $f4, 7($10)\r\n"
270 "gssdrc1 $f4, 0($10)\r\n"
271 "gssdlc1 $f6, 7+8($10)\r\n"
272 "gssdrc1 $f6, 8($10)\r\n"
273 "mov.s $f4, %2\r\n"
274 "subu $9, $9, 16\r\n"
275 "daddu $10, $10, 16\r\n"
276 "b 2f\r\n"
277 "nop \r\n"
278
279 "1: \r\n"
280 "li $8, 8\r\n" /* st8 */
281 "bltu $9, $8, 2f\r\n"
282 "nop \r\n"
283 "gssdlc1 $f4, 7($10)\r\n"
284 "gssdrc1 $f4, ($10)\r\n"
285 "mov.s $f4, %3\r\n"
286 "subu $9, $9, 8\r\n"
287 "daddu $10, $10, 8\r\n"
288
289 "2: \r\n"
290 "li $8, 4\r\n" /* st4 */
291 "mfc1 $11, $f4\r\n"
292 "bltu $9, $8, 3f\r\n"
293 "nop \r\n"
294 "swl $11, 3($10)\r\n"
295 "swr $11, 0($10)\r\n"
296 "li $8, 32\r\n"
297 "mtc1 $8, $f6\r\n"
298 "dsrl $f4, $f4, $f6\r\n"
299 "mfc1 $11, $f4\r\n"
300 "subu $9, $9, 4\r\n"
301 "daddu $10, $10, 4\r\n"
302
303 "3: \r\n"
304 "li $8, 2\r\n" /* st2 */
305 "bltu $9, $8, 4f\r\n"
306 "nop \r\n"
307 "ush $11, 0($10)\r\n"
308 "srl $11, 16\r\n"
309 "subu $9, $9, 2\r\n"
310 "daddu $10, $10, 2\r\n"
311
312 "4: \r\n"
313 "li $8, 1\r\n" /* st1 */
314 "bltu $9, $8, 5f\r\n"
315 "nop \r\n"
316 "sb $11, 0($10)\r\n"
317
318 "5: \r\n"
319 "nop \r\n" /* end */
320 : "=m" (*outptr)
321 : "f" (mmA), "f" (mmC), "f" (mmE), "r" (col), "r" (outptr)
322 : "$f4", "$f6", "$8", "$9", "$10", "$11", "memory"
323 );
324 }
325
326 #else /* RGB_PIXELSIZE == 4 */
327
328 #ifdef RGBX_FILLER_0XFF
329 mm6 = _mm_cmpeq_pi8(mm6, mm6);
330 mm7 = _mm_cmpeq_pi8(mm7, mm7);
331 #else
332 mm6 = _mm_xor_si64(mm6, mm6);
333 mm7 = _mm_xor_si64(mm7, mm7);
334 #endif
335 /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
336 /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
337 /* mmE=(20 22 24 26 ** ** ** **), mmF=(21 23 25 27 ** ** ** **) */
338 /* mmG=(30 32 34 36 ** ** ** **), mmH=(31 33 35 37 ** ** ** **) */
339
340 mmA = _mm_unpacklo_pi8(mmA, mmC); /* mmA=(00 10 02 12 04 14 06 16) */
341 mmE = _mm_unpacklo_pi8(mmE, mmG); /* mmE=(20 30 22 32 24 34 26 36) */
342 mmB = _mm_unpacklo_pi8(mmB, mmD); /* mmB=(01 11 03 13 05 15 07 17) */
343 mmF = _mm_unpacklo_pi8(mmF, mmH); /* mmF=(21 31 23 33 25 35 27 37) */
344
345 mmC = mmA;
346 mmA = _mm_unpacklo_pi16(mmA, mmE); /* mmA=(00 10 20 30 02 12 22 32) */
347 mmC = _mm_unpackhi_pi16(mmC, mmE); /* mmC=(04 14 24 34 06 16 26 36) */
348 mmG = mmB;
349 mmB = _mm_unpacklo_pi16(mmB, mmF); /* mmB=(01 11 21 31 03 13 23 33) */
350 mmG = _mm_unpackhi_pi16(mmG, mmF); /* mmG=(05 15 25 35 07 17 27 37) */
351
352 mmD = mmA;
353 mmA = _mm_unpacklo_pi32(mmA, mmB); /* mmA=(00 10 20 30 01 11 21 31) */
354 mmD = _mm_unpackhi_pi32(mmD, mmB); /* mmD=(02 12 22 32 03 13 23 33) */
355 mmH = mmC;
356 mmC = _mm_unpacklo_pi32(mmC, mmG); /* mmC=(04 14 24 34 05 15 25 35) */
357 mmH = _mm_unpackhi_pi32(mmH, mmG); /* mmH=(06 16 26 36 07 17 27 37) */
358
359 if (num_cols >= 8) {
360 _mm_store_si64((__m64 *)outptr, mmA);
361 _mm_store_si64((__m64 *)(outptr + 8), mmD);
362 _mm_store_si64((__m64 *)(outptr + 16), mmC);
363 _mm_store_si64((__m64 *)(outptr + 24), mmH);
364 outptr += RGB_PIXELSIZE * 8;
365 } else {
366 col = num_cols;
367 asm(".set noreorder\r\n" /* st16 */
368
369 "li $8, 4\r\n"
370 "move $9, %6\r\n"
371 "move $10, %7\r\n"
372 "mov.s $f4, %2\r\n"
373 "mov.s $f6, %4\r\n"
374 "bltu $9, $8, 1f\r\n"
375 "nop \r\n"
376 "gssdlc1 $f4, 7($10)\r\n"
377 "gssdrc1 $f4, ($10)\r\n"
378 "gssdlc1 $f6, 7+8($10)\r\n"
379 "gssdrc1 $f6, 8($10)\r\n"
380 "mov.s $f4, %3\r\n"
381 "mov.s $f6, %5\r\n"
382 "subu $9, $9, 4\r\n"
383 "daddu $10, $10, 16\r\n"
384
385 "1: \r\n"
386 "li $8, 2\r\n" /* st8 */
387 "bltu $9, $8, 2f\r\n"
388 "nop \r\n"
389 "gssdlc1 $f4, 7($10)\r\n"
390 "gssdrc1 $f4, 0($10)\r\n"
391 "mov.s $f4, $f6\r\n"
392 "subu $9, $9, 2\r\n"
393 "daddu $10, $10, 8\r\n"
394
395 "2: \r\n"
396 "li $8, 1\r\n" /* st4 */
397 "bltu $9, $8, 3f\r\n"
398 "nop \r\n"
399 "gsswlc1 $f4, 3($10)\r\n"
400 "gsswrc1 $f4, 0($10)\r\n"
401
402 "3: \r\n"
403 "li %1, 0\r\n" /* end */
404 : "=m" (*outptr), "=r" (col)
405 : "f" (mmA), "f" (mmC), "f" (mmD), "f" (mmH), "r" (col),
406 "r" (outptr)
407 : "$f4", "$f6", "$8", "$9", "$10", "memory"
408 );
409 }
410
411 #endif
412
413 }
414 }
415 }
416
417 #undef mmA
418 #undef mmB
419 #undef mmC
420 #undef mmD
421 #undef mmE
422 #undef mmF
423 #undef mmG
424 #undef mmH
425