1 /*
2 * Loongson MMI optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2014, 2018, 2020, D. R. Commander. All Rights Reserved.
5 * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
6 * All Rights Reserved.
7 * Authors: ZhuChen <zhuchen@loongson.cn>
8 * CaiWanwei <caiwanwei@loongson.cn>
9 * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
10 *
11 * Based on the x86 SIMD extension for IJG JPEG library
12 * Copyright (C) 1999-2006, MIYASAKA Masaru.
13 *
14 * This software is provided 'as-is', without any express or implied
15 * warranty. In no event will the authors be held liable for any damages
16 * arising from the use of this software.
17 *
18 * Permission is granted to anyone to use this software for any purpose,
19 * including commercial applications, and to alter it and redistribute it
20 * freely, subject to the following restrictions:
21 *
22 * 1. The origin of this software must not be misrepresented; you must not
23 * claim that you wrote the original software. If you use this software
24 * in a product, an acknowledgment in the product documentation would be
25 * appreciated but is not required.
26 * 2. Altered source versions must be plainly marked as such, and must not be
27 * misrepresented as being the original software.
28 * 3. This notice may not be removed or altered from any source distribution.
29 */
30
31 /* ACCURATE INTEGER FORWARD DCT */
32
33 #include "jsimd_mmi.h"
34
35
36 #define CONST_BITS 13
37 #define PASS1_BITS 2
38 #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
39 #define DESCALE_P2 (CONST_BITS + PASS1_BITS)
40
41 #define FIX_0_298 ((short)2446) /* FIX(0.298631336) */
42 #define FIX_0_390 ((short)3196) /* FIX(0.390180644) */
43 #define FIX_0_541 ((short)4433) /* FIX(0.541196100) */
44 #define FIX_0_765 ((short)6270) /* FIX(0.765366865) */
45 #define FIX_0_899 ((short)7373) /* FIX(0.899976223) */
46 #define FIX_1_175 ((short)9633) /* FIX(1.175875602) */
47 #define FIX_1_501 ((short)12299) /* FIX(1.501321110) */
48 #define FIX_1_847 ((short)15137) /* FIX(1.847759065) */
49 #define FIX_1_961 ((short)16069) /* FIX(1.961570560) */
50 #define FIX_2_053 ((short)16819) /* FIX(2.053119869) */
51 #define FIX_2_562 ((short)20995) /* FIX(2.562915447) */
52 #define FIX_3_072 ((short)25172) /* FIX(3.072711026) */
53
54 enum const_index {
55 index_PW_F130_F054,
56 index_PW_F054_MF130,
57 index_PW_MF078_F117,
58 index_PW_F117_F078,
59 index_PW_MF060_MF089,
60 index_PW_MF089_F060,
61 index_PW_MF050_MF256,
62 index_PW_MF256_F050,
63 index_PD_DESCALE_P1,
64 index_PD_DESCALE_P2,
65 index_PW_DESCALE_P2X
66 };
67
68 static uint64_t const_value[] = {
69 _uint64_set_pi16(FIX_0_541, (FIX_0_541 + FIX_0_765),
70 FIX_0_541, (FIX_0_541 + FIX_0_765)),
71 _uint64_set_pi16((FIX_0_541 - FIX_1_847), FIX_0_541,
72 (FIX_0_541 - FIX_1_847), FIX_0_541),
73 _uint64_set_pi16(FIX_1_175, (FIX_1_175 - FIX_1_961),
74 FIX_1_175, (FIX_1_175 - FIX_1_961)),
75 _uint64_set_pi16((FIX_1_175 - FIX_0_390), FIX_1_175,
76 (FIX_1_175 - FIX_0_390), FIX_1_175),
77 _uint64_set_pi16(-FIX_0_899, (FIX_0_298 - FIX_0_899),
78 -FIX_0_899, (FIX_0_298 - FIX_0_899)),
79 _uint64_set_pi16((FIX_1_501 - FIX_0_899), -FIX_0_899,
80 (FIX_1_501 - FIX_0_899), -FIX_0_899),
81 _uint64_set_pi16(-FIX_2_562, (FIX_2_053 - FIX_2_562),
82 -FIX_2_562, (FIX_2_053 - FIX_2_562)),
83 _uint64_set_pi16((FIX_3_072 - FIX_2_562), -FIX_2_562,
84 (FIX_3_072 - FIX_2_562), -FIX_2_562),
85 _uint64_set_pi32((1 << (DESCALE_P1 - 1)), (1 << (DESCALE_P1 - 1))),
86 _uint64_set_pi32((1 << (DESCALE_P2 - 1)), (1 << (DESCALE_P2 - 1))),
87 _uint64_set_pi16((1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)),
88 (1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)))
89 };
90
91 #define PW_F130_F054 get_const_value(index_PW_F130_F054)
92 #define PW_F054_MF130 get_const_value(index_PW_F054_MF130)
93 #define PW_MF078_F117 get_const_value(index_PW_MF078_F117)
94 #define PW_F117_F078 get_const_value(index_PW_F117_F078)
95 #define PW_MF060_MF089 get_const_value(index_PW_MF060_MF089)
96 #define PW_MF089_F060 get_const_value(index_PW_MF089_F060)
97 #define PW_MF050_MF256 get_const_value(index_PW_MF050_MF256)
98 #define PW_MF256_F050 get_const_value(index_PW_MF256_F050)
99 #define PD_DESCALE_P1 get_const_value(index_PD_DESCALE_P1)
100 #define PD_DESCALE_P2 get_const_value(index_PD_DESCALE_P2)
101 #define PW_DESCALE_P2X get_const_value(index_PW_DESCALE_P2X)
102
103
104 #define DO_FDCT_COMMON(PASS) { \
105 __m64 tmp1312l, tmp1312h, tmp47l, tmp47h, tmp4l, tmp4h, tmp7l, tmp7h; \
106 __m64 tmp56l, tmp56h, tmp5l, tmp5h, tmp6l, tmp6h; \
107 __m64 out1l, out1h, out2l, out2h, out3l, out3h; \
108 __m64 out5l, out5h, out6l, out6h, out7l, out7h; \
109 __m64 z34l, z34h, z3l, z3h, z4l, z4h, z3, z4; \
110 \
111 /* (Original) \
112 * z1 = (tmp12 + tmp13) * 0.541196100; \
113 * out2 = z1 + tmp13 * 0.765366865; \
114 * out6 = z1 + tmp12 * -1.847759065; \
115 * \
116 * (This implementation) \
117 * out2 = tmp13 * (0.541196100 + 0.765366865) + tmp12 * 0.541196100; \
118 * out6 = tmp13 * 0.541196100 + tmp12 * (0.541196100 - 1.847759065); \
119 */ \
120 \
121 tmp1312l = _mm_unpacklo_pi16(tmp13, tmp12); \
122 tmp1312h = _mm_unpackhi_pi16(tmp13, tmp12); \
123 \
124 out2l = _mm_madd_pi16(tmp1312l, PW_F130_F054); \
125 out2h = _mm_madd_pi16(tmp1312h, PW_F130_F054); \
126 out6l = _mm_madd_pi16(tmp1312l, PW_F054_MF130); \
127 out6h = _mm_madd_pi16(tmp1312h, PW_F054_MF130); \
128 \
129 out2l = _mm_add_pi32(out2l, PD_DESCALE_P##PASS); \
130 out2h = _mm_add_pi32(out2h, PD_DESCALE_P##PASS); \
131 out2l = _mm_srai_pi32(out2l, DESCALE_P##PASS); \
132 out2h = _mm_srai_pi32(out2h, DESCALE_P##PASS); \
133 \
134 out6l = _mm_add_pi32(out6l, PD_DESCALE_P##PASS); \
135 out6h = _mm_add_pi32(out6h, PD_DESCALE_P##PASS); \
136 out6l = _mm_srai_pi32(out6l, DESCALE_P##PASS); \
137 out6h = _mm_srai_pi32(out6h, DESCALE_P##PASS); \
138 \
139 out2 = _mm_packs_pi32(out2l, out2h); \
140 out6 = _mm_packs_pi32(out6l, out6h); \
141 \
142 /* Odd part */ \
143 \
144 z3 = _mm_add_pi16(tmp4, tmp6); \
145 z4 = _mm_add_pi16(tmp5, tmp7); \
146 \
147 /* (Original) \
148 * z5 = (z3 + z4) * 1.175875602; \
149 * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \
150 * z3 += z5; z4 += z5; \
151 * \
152 * (This implementation) \
153 * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \
154 * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \
155 */ \
156 \
157 z34l = _mm_unpacklo_pi16(z3, z4); \
158 z34h = _mm_unpackhi_pi16(z3, z4); \
159 z3l = _mm_madd_pi16(z34l, PW_MF078_F117); \
160 z3h = _mm_madd_pi16(z34h, PW_MF078_F117); \
161 z4l = _mm_madd_pi16(z34l, PW_F117_F078); \
162 z4h = _mm_madd_pi16(z34h, PW_F117_F078); \
163 \
164 /* (Original) \
165 * z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; \
166 * tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; \
167 * tmp6 = tmp6 * 3.072711026; tmp7 = tmp7 * 1.501321110; \
168 * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \
169 * out7 = tmp4 + z1 + z3; out5 = tmp5 + z2 + z4; \
170 * out3 = tmp6 + z2 + z3; out1 = tmp7 + z1 + z4; \
171 * \
172 * (This implementation) \
173 * tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223; \
174 * tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; \
175 * tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); \
176 * tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223); \
177 * out7 = tmp4 + z3; out5 = tmp5 + z4; \
178 * out3 = tmp6 + z3; out1 = tmp7 + z4; \
179 */ \
180 \
181 tmp47l = _mm_unpacklo_pi16(tmp4, tmp7); \
182 tmp47h = _mm_unpackhi_pi16(tmp4, tmp7); \
183 \
184 tmp4l = _mm_madd_pi16(tmp47l, PW_MF060_MF089); \
185 tmp4h = _mm_madd_pi16(tmp47h, PW_MF060_MF089); \
186 tmp7l = _mm_madd_pi16(tmp47l, PW_MF089_F060); \
187 tmp7h = _mm_madd_pi16(tmp47h, PW_MF089_F060); \
188 \
189 out7l = _mm_add_pi32(tmp4l, z3l); \
190 out7h = _mm_add_pi32(tmp4h, z3h); \
191 out1l = _mm_add_pi32(tmp7l, z4l); \
192 out1h = _mm_add_pi32(tmp7h, z4h); \
193 \
194 out7l = _mm_add_pi32(out7l, PD_DESCALE_P##PASS); \
195 out7h = _mm_add_pi32(out7h, PD_DESCALE_P##PASS); \
196 out7l = _mm_srai_pi32(out7l, DESCALE_P##PASS); \
197 out7h = _mm_srai_pi32(out7h, DESCALE_P##PASS); \
198 \
199 out1l = _mm_add_pi32(out1l, PD_DESCALE_P##PASS); \
200 out1h = _mm_add_pi32(out1h, PD_DESCALE_P##PASS); \
201 out1l = _mm_srai_pi32(out1l, DESCALE_P##PASS); \
202 out1h = _mm_srai_pi32(out1h, DESCALE_P##PASS); \
203 \
204 out7 = _mm_packs_pi32(out7l, out7h); \
205 out1 = _mm_packs_pi32(out1l, out1h); \
206 \
207 tmp56l = _mm_unpacklo_pi16(tmp5, tmp6); \
208 tmp56h = _mm_unpackhi_pi16(tmp5, tmp6); \
209 \
210 tmp5l = _mm_madd_pi16(tmp56l, PW_MF050_MF256); \
211 tmp5h = _mm_madd_pi16(tmp56h, PW_MF050_MF256); \
212 tmp6l = _mm_madd_pi16(tmp56l, PW_MF256_F050); \
213 tmp6h = _mm_madd_pi16(tmp56h, PW_MF256_F050); \
214 \
215 out5l = _mm_add_pi32(tmp5l, z4l); \
216 out5h = _mm_add_pi32(tmp5h, z4h); \
217 out3l = _mm_add_pi32(tmp6l, z3l); \
218 out3h = _mm_add_pi32(tmp6h, z3h); \
219 \
220 out5l = _mm_add_pi32(out5l, PD_DESCALE_P##PASS); \
221 out5h = _mm_add_pi32(out5h, PD_DESCALE_P##PASS); \
222 out5l = _mm_srai_pi32(out5l, DESCALE_P##PASS); \
223 out5h = _mm_srai_pi32(out5h, DESCALE_P##PASS); \
224 \
225 out3l = _mm_add_pi32(out3l, PD_DESCALE_P##PASS); \
226 out3h = _mm_add_pi32(out3h, PD_DESCALE_P##PASS); \
227 out3l = _mm_srai_pi32(out3l, DESCALE_P##PASS); \
228 out3h = _mm_srai_pi32(out3h, DESCALE_P##PASS); \
229 \
230 out5 = _mm_packs_pi32(out5l, out5h); \
231 out3 = _mm_packs_pi32(out3l, out3h); \
232 }
233
234 #define DO_FDCT_PASS1() { \
235 __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
236 __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
237 __m64 col0, col1, col2, col3, col4, col5, col6, col7; \
238 __m64 tmp10, tmp11; \
239 \
240 row0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
241 row0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4]); /* (04 05 06 07) */ \
242 row1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
243 row1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4]); /* (14 15 16 17) */ \
244 row2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
245 row2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4]); /* (24 25 26 27) */ \
246 row3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
247 row3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4]); /* (34 35 36 37) */ \
248 \
249 /* Transpose coefficients */ \
250 \
251 row23a = _mm_unpacklo_pi16(row2l, row3l); /* row23a=(20 30 21 31) */ \
252 row23b = _mm_unpackhi_pi16(row2l, row3l); /* row23b=(22 32 23 33) */ \
253 row23c = _mm_unpacklo_pi16(row2h, row3h); /* row23c=(24 34 25 35) */ \
254 row23d = _mm_unpackhi_pi16(row2h, row3h); /* row23d=(26 36 27 37) */ \
255 \
256 row01a = _mm_unpacklo_pi16(row0l, row1l); /* row01a=(00 10 01 11) */ \
257 row01b = _mm_unpackhi_pi16(row0l, row1l); /* row01b=(02 12 03 13) */ \
258 row01c = _mm_unpacklo_pi16(row0h, row1h); /* row01c=(04 14 05 15) */ \
259 row01d = _mm_unpackhi_pi16(row0h, row1h); /* row01d=(06 16 07 17) */ \
260 \
261 col0 = _mm_unpacklo_pi32(row01a, row23a); /* col0=(00 10 20 30) */ \
262 col1 = _mm_unpackhi_pi32(row01a, row23a); /* col1=(01 11 21 31) */ \
263 col6 = _mm_unpacklo_pi32(row01d, row23d); /* col6=(06 16 26 36) */ \
264 col7 = _mm_unpackhi_pi32(row01d, row23d); /* col7=(07 17 27 37) */ \
265 \
266 tmp6 = _mm_sub_pi16(col1, col6); /* tmp6=col1-col6 */ \
267 tmp7 = _mm_sub_pi16(col0, col7); /* tmp7=col0-col7 */ \
268 tmp1 = _mm_add_pi16(col1, col6); /* tmp1=col1+col6 */ \
269 tmp0 = _mm_add_pi16(col0, col7); /* tmp0=col0+col7 */ \
270 \
271 col2 = _mm_unpacklo_pi32(row01b, row23b); /* col2=(02 12 22 32) */ \
272 col3 = _mm_unpackhi_pi32(row01b, row23b); /* col3=(03 13 23 33) */ \
273 col4 = _mm_unpacklo_pi32(row01c, row23c); /* col4=(04 14 24 34) */ \
274 col5 = _mm_unpackhi_pi32(row01c, row23c); /* col5=(05 15 25 35) */ \
275 \
276 tmp3 = _mm_add_pi16(col3, col4); /* tmp3=col3+col4 */ \
277 tmp2 = _mm_add_pi16(col2, col5); /* tmp2=col2+col5 */ \
278 tmp4 = _mm_sub_pi16(col3, col4); /* tmp4=col3-col4 */ \
279 tmp5 = _mm_sub_pi16(col2, col5); /* tmp5=col2-col5 */ \
280 \
281 /* Even part */ \
282 \
283 tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
284 tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
285 tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
286 tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
287 \
288 out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
289 out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
290 out0 = _mm_slli_pi16(out0, PASS1_BITS); \
291 out4 = _mm_slli_pi16(out4, PASS1_BITS); \
292 \
293 DO_FDCT_COMMON(1) \
294 \
295 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
296 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4], out4); \
297 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
298 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4], out5); \
299 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
300 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4], out6); \
301 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
302 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4], out7); \
303 }
304
305 #define DO_FDCT_PASS2() { \
306 __m64 col0l, col0h, col1l, col1h, col2l, col2h, col3l, col3h; \
307 __m64 col01a, col01b, col01c, col01d, col23a, col23b, col23c, col23d; \
308 __m64 row0, row1, row2, row3, row4, row5, row6, row7; \
309 __m64 tmp10, tmp11; \
310 \
311 col0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
312 col1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
313 col2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
314 col3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
315 col0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 4]); /* (40 50 60 70) */ \
316 col1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 5]); /* (41 51 61 71) */ \
317 col2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 6]); /* (42 52 62 72) */ \
318 col3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 7]); /* (43 53 63 73) */ \
319 \
320 /* Transpose coefficients */ \
321 \
322 col23a = _mm_unpacklo_pi16(col2l, col3l); /* col23a=(02 03 12 13) */ \
323 col23b = _mm_unpackhi_pi16(col2l, col3l); /* col23b=(22 23 32 33) */ \
324 col23c = _mm_unpacklo_pi16(col2h, col3h); /* col23c=(42 43 52 53) */ \
325 col23d = _mm_unpackhi_pi16(col2h, col3h); /* col23d=(62 63 72 73) */ \
326 \
327 col01a = _mm_unpacklo_pi16(col0l, col1l); /* col01a=(00 01 10 11) */ \
328 col01b = _mm_unpackhi_pi16(col0l, col1l); /* col01b=(20 21 30 31) */ \
329 col01c = _mm_unpacklo_pi16(col0h, col1h); /* col01c=(40 41 50 51) */ \
330 col01d = _mm_unpackhi_pi16(col0h, col1h); /* col01d=(60 61 70 71) */ \
331 \
332 row0 = _mm_unpacklo_pi32(col01a, col23a); /* row0=(00 01 02 03) */ \
333 row1 = _mm_unpackhi_pi32(col01a, col23a); /* row1=(10 11 12 13) */ \
334 row6 = _mm_unpacklo_pi32(col01d, col23d); /* row6=(60 61 62 63) */ \
335 row7 = _mm_unpackhi_pi32(col01d, col23d); /* row7=(70 71 72 73) */ \
336 \
337 tmp6 = _mm_sub_pi16(row1, row6); /* tmp6=row1-row6 */ \
338 tmp7 = _mm_sub_pi16(row0, row7); /* tmp7=row0-row7 */ \
339 tmp1 = _mm_add_pi16(row1, row6); /* tmp1=row1+row6 */ \
340 tmp0 = _mm_add_pi16(row0, row7); /* tmp0=row0+row7 */ \
341 \
342 row2 = _mm_unpacklo_pi32(col01b, col23b); /* row2=(20 21 22 23) */ \
343 row3 = _mm_unpackhi_pi32(col01b, col23b); /* row3=(30 31 32 33) */ \
344 row4 = _mm_unpacklo_pi32(col01c, col23c); /* row4=(40 41 42 43) */ \
345 row5 = _mm_unpackhi_pi32(col01c, col23c); /* row5=(50 51 52 53) */ \
346 \
347 tmp3 = _mm_add_pi16(row3, row4); /* tmp3=row3+row4 */ \
348 tmp2 = _mm_add_pi16(row2, row5); /* tmp2=row2+row5 */ \
349 tmp4 = _mm_sub_pi16(row3, row4); /* tmp4=row3-row4 */ \
350 tmp5 = _mm_sub_pi16(row2, row5); /* tmp5=row2-row5 */ \
351 \
352 /* Even part */ \
353 \
354 tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
355 tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
356 tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
357 tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
358 \
359 out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
360 out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
361 \
362 out0 = _mm_add_pi16(out0, PW_DESCALE_P2X); \
363 out4 = _mm_add_pi16(out4, PW_DESCALE_P2X); \
364 out0 = _mm_srai_pi16(out0, PASS1_BITS); \
365 out4 = _mm_srai_pi16(out4, PASS1_BITS); \
366 \
367 DO_FDCT_COMMON(2) \
368 \
369 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
370 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
371 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
372 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
373 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 4], out4); \
374 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 5], out5); \
375 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 6], out6); \
376 _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 7], out7); \
377 }
378
jsimd_fdct_islow_mmi(DCTELEM * data)379 void jsimd_fdct_islow_mmi(DCTELEM *data)
380 {
381 __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
382 __m64 out0, out1, out2, out3, out4, out5, out6, out7;
383 __m64 tmp12, tmp13;
384 DCTELEM *dataptr = data;
385
386 /* Pass 1: process rows. */
387
388 DO_FDCT_PASS1()
389 dataptr += DCTSIZE * 4;
390 DO_FDCT_PASS1()
391
392 /* Pass 2: process columns. */
393
394 dataptr = data;
395 DO_FDCT_PASS2()
396 dataptr += 4;
397 DO_FDCT_PASS2()
398 }
399