• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * jidctint-neon.c - accurate integer IDCT (Arm Neon)
3  *
4  * Copyright (C) 2020, Arm Limited.  All Rights Reserved.
5  * Copyright (C) 2020, D. R. Commander.  All Rights Reserved.
6  *
7  * This software is provided 'as-is', without any express or implied
8  * warranty.  In no event will the authors be held liable for any damages
9  * arising from the use of this software.
10  *
11  * Permission is granted to anyone to use this software for any purpose,
12  * including commercial applications, and to alter it and redistribute it
13  * freely, subject to the following restrictions:
14  *
15  * 1. The origin of this software must not be misrepresented; you must not
16  *    claim that you wrote the original software. If you use this software
17  *    in a product, an acknowledgment in the product documentation would be
18  *    appreciated but is not required.
19  * 2. Altered source versions must be plainly marked as such, and must not be
20  *    misrepresented as being the original software.
21  * 3. This notice may not be removed or altered from any source distribution.
22  */
23 
24 #define JPEG_INTERNALS
25 #include "jconfigint.h"
26 #include "../../jinclude.h"
27 #include "../../jpeglib.h"
28 #include "../../jsimd.h"
29 #include "../../jdct.h"
30 #include "../../jsimddct.h"
31 #include "../jsimd.h"
32 #include "align.h"
33 #include "neon-compat.h"
34 
35 #include <arm_neon.h>
36 
37 
38 #define CONST_BITS  13
39 #define PASS1_BITS  2
40 
41 #define DESCALE_P1  (CONST_BITS - PASS1_BITS)
42 #define DESCALE_P2  (CONST_BITS + PASS1_BITS + 3)
43 
44 /* The computation of the inverse DCT requires the use of constants known at
45  * compile time.  Scaled integer constants are used to avoid floating-point
46  * arithmetic:
47  *    0.298631336 =  2446 * 2^-13
48  *    0.390180644 =  3196 * 2^-13
49  *    0.541196100 =  4433 * 2^-13
50  *    0.765366865 =  6270 * 2^-13
51  *    0.899976223 =  7373 * 2^-13
52  *    1.175875602 =  9633 * 2^-13
53  *    1.501321110 = 12299 * 2^-13
54  *    1.847759065 = 15137 * 2^-13
55  *    1.961570560 = 16069 * 2^-13
56  *    2.053119869 = 16819 * 2^-13
57  *    2.562915447 = 20995 * 2^-13
58  *    3.072711026 = 25172 * 2^-13
59  */
60 
61 #define F_0_298  2446
62 #define F_0_390  3196
63 #define F_0_541  4433
64 #define F_0_765  6270
65 #define F_0_899  7373
66 #define F_1_175  9633
67 #define F_1_501  12299
68 #define F_1_847  15137
69 #define F_1_961  16069
70 #define F_2_053  16819
71 #define F_2_562  20995
72 #define F_3_072  25172
73 
74 #define F_1_175_MINUS_1_961  (F_1_175 - F_1_961)
75 #define F_1_175_MINUS_0_390  (F_1_175 - F_0_390)
76 #define F_0_541_MINUS_1_847  (F_0_541 - F_1_847)
77 #define F_3_072_MINUS_2_562  (F_3_072 - F_2_562)
78 #define F_0_298_MINUS_0_899  (F_0_298 - F_0_899)
79 #define F_1_501_MINUS_0_899  (F_1_501 - F_0_899)
80 #define F_2_053_MINUS_2_562  (F_2_053 - F_2_562)
81 #define F_0_541_PLUS_0_765   (F_0_541 + F_0_765)
82 
83 
84 ALIGN(16) static const int16_t jsimd_idct_islow_neon_consts[] = {
85   F_0_899,             F_0_541,
86   F_2_562,             F_0_298_MINUS_0_899,
87   F_1_501_MINUS_0_899, F_2_053_MINUS_2_562,
88   F_0_541_PLUS_0_765,  F_1_175,
89   F_1_175_MINUS_0_390, F_0_541_MINUS_1_847,
90   F_3_072_MINUS_2_562, F_1_175_MINUS_1_961,
91   0, 0, 0, 0
92 };
93 
94 
95 /* Forward declaration of regular and sparse IDCT helper functions */
96 
97 static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
98                                                   int16x4_t row1,
99                                                   int16x4_t row2,
100                                                   int16x4_t row3,
101                                                   int16x4_t row4,
102                                                   int16x4_t row5,
103                                                   int16x4_t row6,
104                                                   int16x4_t row7,
105                                                   int16x4_t quant_row0,
106                                                   int16x4_t quant_row1,
107                                                   int16x4_t quant_row2,
108                                                   int16x4_t quant_row3,
109                                                   int16x4_t quant_row4,
110                                                   int16x4_t quant_row5,
111                                                   int16x4_t quant_row6,
112                                                   int16x4_t quant_row7,
113                                                   int16_t *workspace_1,
114                                                   int16_t *workspace_2);
115 
116 static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
117                                                  int16x4_t row1,
118                                                  int16x4_t row2,
119                                                  int16x4_t row3,
120                                                  int16x4_t quant_row0,
121                                                  int16x4_t quant_row1,
122                                                  int16x4_t quant_row2,
123                                                  int16x4_t quant_row3,
124                                                  int16_t *workspace_1,
125                                                  int16_t *workspace_2);
126 
127 static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
128                                                   JSAMPARRAY output_buf,
129                                                   JDIMENSION output_col,
130                                                   unsigned buf_offset);
131 
132 static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
133                                                  JSAMPARRAY output_buf,
134                                                  JDIMENSION output_col,
135                                                  unsigned buf_offset);
136 
137 
138 /* Perform dequantization and inverse DCT on one block of coefficients.  For
139  * reference, the C implementation (jpeg_idct_slow()) can be found in
140  * jidctint.c.
141  *
142  * Optimization techniques used for fast data access:
143  *
144  * In each pass, the inverse DCT is computed for the left and right 4x8 halves
145  * of the DCT block.  This avoids spilling due to register pressure, and the
146  * increased granularity allows for an optimized calculation depending on the
147  * values of the DCT coefficients.  Between passes, intermediate data is stored
148  * in 4x8 workspace buffers.
149  *
150  * Transposing the 8x8 DCT block after each pass can be achieved by transposing
151  * each of the four 4x4 quadrants and swapping quadrants 1 and 2 (refer to the
152  * diagram below.)  Swapping quadrants is cheap, since the second pass can just
153  * swap the workspace buffer pointers.
154  *
155  *      +-------+-------+                   +-------+-------+
156  *      |       |       |                   |       |       |
157  *      |   0   |   1   |                   |   0   |   2   |
158  *      |       |       |    transpose      |       |       |
159  *      +-------+-------+     ------>       +-------+-------+
160  *      |       |       |                   |       |       |
161  *      |   2   |   3   |                   |   1   |   3   |
162  *      |       |       |                   |       |       |
163  *      +-------+-------+                   +-------+-------+
164  *
165  * Optimization techniques used to accelerate the inverse DCT calculation:
166  *
167  * In a DCT coefficient block, the coefficients are increasingly likely to be 0
168  * as you move diagonally from top left to bottom right.  If whole rows of
169  * coefficients are 0, then the inverse DCT calculation can be simplified.  On
170  * the first pass of the inverse DCT, we test for three special cases before
171  * defaulting to a full "regular" inverse DCT:
172  *
173  * 1) Coefficients in rows 4-7 are all zero.  In this case, we perform a
174  *    "sparse" simplified inverse DCT on rows 0-3.
175  * 2) AC coefficients (rows 1-7) are all zero.  In this case, the inverse DCT
176  *    result is equal to the dequantized DC coefficients.
177  * 3) AC and DC coefficients are all zero.  In this case, the inverse DCT
178  *    result is all zero.  For the left 4x8 half, this is handled identically
179  *    to Case 2 above.  For the right 4x8 half, we do no work and signal that
180  *    the "sparse" algorithm is required for the second pass.
181  *
182  * In the second pass, only a single special case is tested: whether the AC and
183  * DC coefficients were all zero in the right 4x8 block during the first pass
184  * (refer to Case 3 above.)  If this is the case, then a "sparse" variant of
185  * the second pass is performed for both the left and right halves of the DCT
186  * block.  (The transposition after the first pass means that the right 4x8
187  * block during the first pass becomes rows 4-7 during the second pass.)
188  */
189 
jsimd_idct_islow_neon(void * dct_table,JCOEFPTR coef_block,JSAMPARRAY output_buf,JDIMENSION output_col)190 void jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
191                            JSAMPARRAY output_buf, JDIMENSION output_col)
192 {
193   ISLOW_MULT_TYPE *quantptr = dct_table;
194 
195   int16_t workspace_l[8 * DCTSIZE / 2];
196   int16_t workspace_r[8 * DCTSIZE / 2];
197 
198   /* Compute IDCT first pass on left 4x8 coefficient block. */
199 
200   /* Load DCT coefficients in left 4x8 block. */
201   int16x4_t row0 = vld1_s16(coef_block + 0 * DCTSIZE);
202   int16x4_t row1 = vld1_s16(coef_block + 1 * DCTSIZE);
203   int16x4_t row2 = vld1_s16(coef_block + 2 * DCTSIZE);
204   int16x4_t row3 = vld1_s16(coef_block + 3 * DCTSIZE);
205   int16x4_t row4 = vld1_s16(coef_block + 4 * DCTSIZE);
206   int16x4_t row5 = vld1_s16(coef_block + 5 * DCTSIZE);
207   int16x4_t row6 = vld1_s16(coef_block + 6 * DCTSIZE);
208   int16x4_t row7 = vld1_s16(coef_block + 7 * DCTSIZE);
209 
210   /* Load quantization table for left 4x8 block. */
211   int16x4_t quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE);
212   int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
213   int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
214   int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
215   int16x4_t quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE);
216   int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
217   int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
218   int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);
219 
220   /* Construct bitmap to test if DCT coefficients in left 4x8 block are 0. */
221   int16x4_t bitmap = vorr_s16(row7, row6);
222   bitmap = vorr_s16(bitmap, row5);
223   bitmap = vorr_s16(bitmap, row4);
224   int64_t bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
225 
226   if (bitmap_rows_4567 == 0) {
227     bitmap = vorr_s16(bitmap, row3);
228     bitmap = vorr_s16(bitmap, row2);
229     bitmap = vorr_s16(bitmap, row1);
230     int64_t left_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
231 
232     if (left_ac_bitmap == 0) {
233       int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
234       int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
235       /* Store 4x4 blocks to workspace, transposing in the process. */
236       vst4_s16(workspace_l, quadrant);
237       vst4_s16(workspace_r, quadrant);
238     } else {
239       jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
240                                     quant_row1, quant_row2, quant_row3,
241                                     workspace_l, workspace_r);
242     }
243   } else {
244     jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
245                                    row6, row7, quant_row0, quant_row1,
246                                    quant_row2, quant_row3, quant_row4,
247                                    quant_row5, quant_row6, quant_row7,
248                                    workspace_l, workspace_r);
249   }
250 
251   /* Compute IDCT first pass on right 4x8 coefficient block. */
252 
253   /* Load DCT coefficients in right 4x8 block. */
254   row0 = vld1_s16(coef_block + 0 * DCTSIZE + 4);
255   row1 = vld1_s16(coef_block + 1 * DCTSIZE + 4);
256   row2 = vld1_s16(coef_block + 2 * DCTSIZE + 4);
257   row3 = vld1_s16(coef_block + 3 * DCTSIZE + 4);
258   row4 = vld1_s16(coef_block + 4 * DCTSIZE + 4);
259   row5 = vld1_s16(coef_block + 5 * DCTSIZE + 4);
260   row6 = vld1_s16(coef_block + 6 * DCTSIZE + 4);
261   row7 = vld1_s16(coef_block + 7 * DCTSIZE + 4);
262 
263   /* Load quantization table for right 4x8 block. */
264   quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE + 4);
265   quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
266   quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
267   quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
268   quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE + 4);
269   quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
270   quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
271   quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);
272 
273   /* Construct bitmap to test if DCT coefficients in right 4x8 block are 0. */
274   bitmap = vorr_s16(row7, row6);
275   bitmap = vorr_s16(bitmap, row5);
276   bitmap = vorr_s16(bitmap, row4);
277   bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
278   bitmap = vorr_s16(bitmap, row3);
279   bitmap = vorr_s16(bitmap, row2);
280   bitmap = vorr_s16(bitmap, row1);
281   int64_t right_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
282 
283   /* If this remains non-zero, a "regular" second pass will be performed. */
284   int64_t right_ac_dc_bitmap = 1;
285 
286   if (right_ac_bitmap == 0) {
287     bitmap = vorr_s16(bitmap, row0);
288     right_ac_dc_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
289 
290     if (right_ac_dc_bitmap != 0) {
291       int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
292       int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
293       /* Store 4x4 blocks to workspace, transposing in the process. */
294       vst4_s16(workspace_l + 4 * DCTSIZE / 2, quadrant);
295       vst4_s16(workspace_r + 4 * DCTSIZE / 2, quadrant);
296     }
297   } else {
298     if (bitmap_rows_4567 == 0) {
299       jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
300                                     quant_row1, quant_row2, quant_row3,
301                                     workspace_l + 4 * DCTSIZE / 2,
302                                     workspace_r + 4 * DCTSIZE / 2);
303     } else {
304       jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
305                                      row6, row7, quant_row0, quant_row1,
306                                      quant_row2, quant_row3, quant_row4,
307                                      quant_row5, quant_row6, quant_row7,
308                                      workspace_l + 4 * DCTSIZE / 2,
309                                      workspace_r + 4 * DCTSIZE / 2);
310     }
311   }
312 
313   /* Second pass: compute IDCT on rows in workspace. */
314 
315   /* If all coefficients in right 4x8 block are 0, use "sparse" second pass. */
316   if (right_ac_dc_bitmap == 0) {
317     jsimd_idct_islow_pass2_sparse(workspace_l, output_buf, output_col, 0);
318     jsimd_idct_islow_pass2_sparse(workspace_r, output_buf, output_col, 4);
319   } else {
320     jsimd_idct_islow_pass2_regular(workspace_l, output_buf, output_col, 0);
321     jsimd_idct_islow_pass2_regular(workspace_r, output_buf, output_col, 4);
322   }
323 }
324 
325 
326 /* Perform dequantization and the first pass of the accurate inverse DCT on a
327  * 4x8 block of coefficients.  (To process the full 8x8 DCT block, this
328  * function-- or some other optimized variant-- needs to be called for both the
329  * left and right 4x8 blocks.)
330  *
331  * This "regular" version assumes that no optimization can be made to the IDCT
332  * calculation, since no useful set of AC coefficients is all 0.
333  *
334  * The original C implementation of the accurate IDCT (jpeg_idct_slow()) can be
335  * found in jidctint.c.  Algorithmic changes made here are documented inline.
336  */
337 
jsimd_idct_islow_pass1_regular(int16x4_t row0,int16x4_t row1,int16x4_t row2,int16x4_t row3,int16x4_t row4,int16x4_t row5,int16x4_t row6,int16x4_t row7,int16x4_t quant_row0,int16x4_t quant_row1,int16x4_t quant_row2,int16x4_t quant_row3,int16x4_t quant_row4,int16x4_t quant_row5,int16x4_t quant_row6,int16x4_t quant_row7,int16_t * workspace_1,int16_t * workspace_2)338 static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
339                                                   int16x4_t row1,
340                                                   int16x4_t row2,
341                                                   int16x4_t row3,
342                                                   int16x4_t row4,
343                                                   int16x4_t row5,
344                                                   int16x4_t row6,
345                                                   int16x4_t row7,
346                                                   int16x4_t quant_row0,
347                                                   int16x4_t quant_row1,
348                                                   int16x4_t quant_row2,
349                                                   int16x4_t quant_row3,
350                                                   int16x4_t quant_row4,
351                                                   int16x4_t quant_row5,
352                                                   int16x4_t quant_row6,
353                                                   int16x4_t quant_row7,
354                                                   int16_t *workspace_1,
355                                                   int16_t *workspace_2)
356 {
357   /* Load constants for IDCT computation. */
358 #ifdef HAVE_VLD1_S16_X3
359   const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
360 #else
361   const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
362   const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
363   const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
364   const int16x4x3_t consts = { { consts1, consts2, consts3 } };
365 #endif
366 
367   /* Even part */
368   int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
369   int16x4_t z3_s16 = vmul_s16(row6, quant_row6);
370 
371   int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
372   int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
373   tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
374   tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
375 
376   z2_s16 = vmul_s16(row0, quant_row0);
377   z3_s16 = vmul_s16(row4, quant_row4);
378 
379   int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
380   int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
381 
382   int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
383   int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
384   int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
385   int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
386 
387   /* Odd part */
388   int16x4_t tmp0_s16 = vmul_s16(row7, quant_row7);
389   int16x4_t tmp1_s16 = vmul_s16(row5, quant_row5);
390   int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
391   int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
392 
393   z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
394   int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
395 
396   /* Implementation as per jpeg_idct_islow() in jidctint.c:
397    *   z5 = (z3 + z4) * 1.175875602;
398    *   z3 = z3 * -1.961570560;  z4 = z4 * -0.390180644;
399    *   z3 += z5;  z4 += z5;
400    *
401    * This implementation:
402    *   z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
403    *   z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
404    */
405 
406   int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
407   int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
408   z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
409   z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
410 
411   /* Implementation as per jpeg_idct_islow() in jidctint.c:
412    *   z1 = tmp0 + tmp3;  z2 = tmp1 + tmp2;
413    *   tmp0 = tmp0 * 0.298631336;  tmp1 = tmp1 * 2.053119869;
414    *   tmp2 = tmp2 * 3.072711026;  tmp3 = tmp3 * 1.501321110;
415    *   z1 = z1 * -0.899976223;  z2 = z2 * -2.562915447;
416    *   tmp0 += z1 + z3;  tmp1 += z2 + z4;
417    *   tmp2 += z2 + z3;  tmp3 += z1 + z4;
418    *
419    * This implementation:
420    *   tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
421    *   tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
422    *   tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
423    *   tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
424    *   tmp0 += z3;  tmp1 += z4;
425    *   tmp2 += z3;  tmp3 += z4;
426    */
427 
428   tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
429   tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
430   tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
431   tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
432 
433   tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
434   tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
435   tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
436   tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
437 
438   tmp0 = vaddq_s32(tmp0, z3);
439   tmp1 = vaddq_s32(tmp1, z4);
440   tmp2 = vaddq_s32(tmp2, z3);
441   tmp3 = vaddq_s32(tmp3, z4);
442 
443   /* Final output stage: descale and narrow to 16-bit. */
444   int16x4x4_t rows_0123 = { {
445     vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
446     vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
447     vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
448     vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
449   } };
450   int16x4x4_t rows_4567 = { {
451     vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
452     vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
453     vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
454     vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
455   } };
456 
457   /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
458    * (VST4 transposes the blocks.  We need to operate on rows in the next
459    * pass.)
460    */
461   vst4_s16(workspace_1, rows_0123);
462   vst4_s16(workspace_2, rows_4567);
463 }
464 
465 
466 /* Perform dequantization and the first pass of the accurate inverse DCT on a
467  * 4x8 block of coefficients.
468  *
469  * This "sparse" version assumes that the AC coefficients in rows 4-7 are all
470  * 0.  This simplifies the IDCT calculation, accelerating overall performance.
471  */
472 
jsimd_idct_islow_pass1_sparse(int16x4_t row0,int16x4_t row1,int16x4_t row2,int16x4_t row3,int16x4_t quant_row0,int16x4_t quant_row1,int16x4_t quant_row2,int16x4_t quant_row3,int16_t * workspace_1,int16_t * workspace_2)473 static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
474                                                  int16x4_t row1,
475                                                  int16x4_t row2,
476                                                  int16x4_t row3,
477                                                  int16x4_t quant_row0,
478                                                  int16x4_t quant_row1,
479                                                  int16x4_t quant_row2,
480                                                  int16x4_t quant_row3,
481                                                  int16_t *workspace_1,
482                                                  int16_t *workspace_2)
483 {
484   /* Load constants for IDCT computation. */
485 #ifdef HAVE_VLD1_S16_X3
486   const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
487 #else
488   const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
489   const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
490   const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
491   const int16x4x3_t consts = { { consts1, consts2, consts3 } };
492 #endif
493 
494   /* Even part (z3 is all 0) */
495   int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
496 
497   int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
498   int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
499 
500   z2_s16 = vmul_s16(row0, quant_row0);
501   int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
502   int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
503 
504   int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
505   int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
506   int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
507   int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
508 
509   /* Odd part (tmp0 and tmp1 are both all 0) */
510   int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
511   int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
512 
513   int16x4_t z3_s16 = tmp2_s16;
514   int16x4_t z4_s16 = tmp3_s16;
515 
516   int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
517   int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
518   z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
519   z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
520 
521   tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
522   tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
523   tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
524   tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
525 
526   /* Final output stage: descale and narrow to 16-bit. */
527   int16x4x4_t rows_0123 = { {
528     vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
529     vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
530     vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
531     vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
532   } };
533   int16x4x4_t rows_4567 = { {
534     vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
535     vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
536     vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
537     vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
538   } };
539 
540   /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
541    * (VST4 transposes the blocks.  We need to operate on rows in the next
542    * pass.)
543    */
544   vst4_s16(workspace_1, rows_0123);
545   vst4_s16(workspace_2, rows_4567);
546 }
547 
548 
549 /* Perform the second pass of the accurate inverse DCT on a 4x8 block of
550  * coefficients.  (To process the full 8x8 DCT block, this function-- or some
551  * other optimized variant-- needs to be called for both the right and left 4x8
552  * blocks.)
553  *
554  * This "regular" version assumes that no optimization can be made to the IDCT
555  * calculation, since no useful set of coefficient values are all 0 after the
556  * first pass.
557  *
558  * Again, the original C implementation of the accurate IDCT (jpeg_idct_slow())
559  * can be found in jidctint.c.  Algorithmic changes made here are documented
560  * inline.
561  */
562 
jsimd_idct_islow_pass2_regular(int16_t * workspace,JSAMPARRAY output_buf,JDIMENSION output_col,unsigned buf_offset)563 static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
564                                                   JSAMPARRAY output_buf,
565                                                   JDIMENSION output_col,
566                                                   unsigned buf_offset)
567 {
568   /* Load constants for IDCT computation. */
569 #ifdef HAVE_VLD1_S16_X3
570   const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
571 #else
572   const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
573   const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
574   const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
575   const int16x4x3_t consts = { { consts1, consts2, consts3 } };
576 #endif
577 
578   /* Even part */
579   int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
580   int16x4_t z3_s16 = vld1_s16(workspace + 6 * DCTSIZE / 2);
581 
582   int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
583   int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
584   tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
585   tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
586 
587   z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
588   z3_s16 = vld1_s16(workspace + 4 * DCTSIZE / 2);
589 
590   int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
591   int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
592 
593   int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
594   int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
595   int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
596   int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
597 
598   /* Odd part */
599   int16x4_t tmp0_s16 = vld1_s16(workspace + 7 * DCTSIZE / 2);
600   int16x4_t tmp1_s16 = vld1_s16(workspace + 5 * DCTSIZE / 2);
601   int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
602   int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
603 
604   z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
605   int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
606 
607   /* Implementation as per jpeg_idct_islow() in jidctint.c:
608    *   z5 = (z3 + z4) * 1.175875602;
609    *   z3 = z3 * -1.961570560;  z4 = z4 * -0.390180644;
610    *   z3 += z5;  z4 += z5;
611    *
612    * This implementation:
613    *   z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
614    *   z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
615    */
616 
617   int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
618   int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
619   z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
620   z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
621 
622   /* Implementation as per jpeg_idct_islow() in jidctint.c:
623    *   z1 = tmp0 + tmp3;  z2 = tmp1 + tmp2;
624    *   tmp0 = tmp0 * 0.298631336;  tmp1 = tmp1 * 2.053119869;
625    *   tmp2 = tmp2 * 3.072711026;  tmp3 = tmp3 * 1.501321110;
626    *   z1 = z1 * -0.899976223;  z2 = z2 * -2.562915447;
627    *   tmp0 += z1 + z3;  tmp1 += z2 + z4;
628    *   tmp2 += z2 + z3;  tmp3 += z1 + z4;
629    *
630    * This implementation:
631    *   tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
632    *   tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
633    *   tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
634    *   tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
635    *   tmp0 += z3;  tmp1 += z4;
636    *   tmp2 += z3;  tmp3 += z4;
637    */
638 
639   tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
640   tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
641   tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
642   tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
643 
644   tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
645   tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
646   tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
647   tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
648 
649   tmp0 = vaddq_s32(tmp0, z3);
650   tmp1 = vaddq_s32(tmp1, z4);
651   tmp2 = vaddq_s32(tmp2, z3);
652   tmp3 = vaddq_s32(tmp3, z4);
653 
654   /* Final output stage: descale and narrow to 16-bit. */
655   int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
656                                        vaddhn_s32(tmp12, tmp1));
657   int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
658                                        vaddhn_s32(tmp13, tmp0));
659   int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
660                                        vsubhn_s32(tmp11, tmp2));
661   int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
662                                        vsubhn_s32(tmp10, tmp3));
663   /* Descale and narrow to 8-bit. */
664   int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
665   int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
666   int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
667   int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
668   /* Clamp to range [0-255]. */
669   uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
670                                  vdup_n_u8(CENTERJSAMPLE));
671   uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
672                                  vdup_n_u8(CENTERJSAMPLE));
673   uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
674                                  vdup_n_u8(CENTERJSAMPLE));
675   uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
676                                  vdup_n_u8(CENTERJSAMPLE));
677 
678   /* Transpose 4x8 block and store to memory.  (Zipping adjacent columns
679    * together allows us to store 16-bit elements.)
680    */
681   uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
682   uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
683   uint16x4x4_t cols_01_23_45_67 = { {
684     vreinterpret_u16_u8(cols_01_23.val[0]),
685     vreinterpret_u16_u8(cols_01_23.val[1]),
686     vreinterpret_u16_u8(cols_45_67.val[0]),
687     vreinterpret_u16_u8(cols_45_67.val[1])
688   } };
689 
690   JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
691   JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
692   JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
693   JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
694   /* VST4 of 16-bit elements completes the transpose. */
695   vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
696   vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
697   vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
698   vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
699 }
700 
701 
702 /* Performs the second pass of the accurate inverse DCT on a 4x8 block
703  * of coefficients.
704  *
705  * This "sparse" version assumes that the coefficient values (after the first
706  * pass) in rows 4-7 are all 0.  This simplifies the IDCT calculation,
707  * accelerating overall performance.
708  */
709 
jsimd_idct_islow_pass2_sparse(int16_t * workspace,JSAMPARRAY output_buf,JDIMENSION output_col,unsigned buf_offset)710 static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
711                                                  JSAMPARRAY output_buf,
712                                                  JDIMENSION output_col,
713                                                  unsigned buf_offset)
714 {
715   /* Load constants for IDCT computation. */
716 #ifdef HAVE_VLD1_S16_X3
717   const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
718 #else
719   const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
720   const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
721   const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
722   const int16x4x3_t consts = { { consts1, consts2, consts3 } };
723 #endif
724 
725   /* Even part (z3 is all 0) */
726   int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
727 
728   int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
729   int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
730 
731   z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
732   int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
733   int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
734 
735   int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
736   int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
737   int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
738   int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
739 
740   /* Odd part (tmp0 and tmp1 are both all 0) */
741   int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
742   int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
743 
744   int16x4_t z3_s16 = tmp2_s16;
745   int16x4_t z4_s16 = tmp3_s16;
746 
747   int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
748   z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
749   int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
750   z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
751 
752   tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
753   tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
754   tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
755   tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
756 
757   /* Final output stage: descale and narrow to 16-bit. */
758   int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
759                                        vaddhn_s32(tmp12, tmp1));
760   int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
761                                        vaddhn_s32(tmp13, tmp0));
762   int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
763                                        vsubhn_s32(tmp11, tmp2));
764   int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
765                                        vsubhn_s32(tmp10, tmp3));
766   /* Descale and narrow to 8-bit. */
767   int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
768   int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
769   int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
770   int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
771   /* Clamp to range [0-255]. */
772   uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
773                                  vdup_n_u8(CENTERJSAMPLE));
774   uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
775                                  vdup_n_u8(CENTERJSAMPLE));
776   uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
777                                  vdup_n_u8(CENTERJSAMPLE));
778   uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
779                                  vdup_n_u8(CENTERJSAMPLE));
780 
781   /* Transpose 4x8 block and store to memory.  (Zipping adjacent columns
782    * together allows us to store 16-bit elements.)
783    */
784   uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
785   uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
786   uint16x4x4_t cols_01_23_45_67 = { {
787     vreinterpret_u16_u8(cols_01_23.val[0]),
788     vreinterpret_u16_u8(cols_01_23.val[1]),
789     vreinterpret_u16_u8(cols_45_67.val[0]),
790     vreinterpret_u16_u8(cols_45_67.val[1])
791   } };
792 
793   JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
794   JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
795   JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
796   JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
797   /* VST4 of 16-bit elements completes the transpose. */
798   vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
799   vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
800   vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
801   vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
802 }
803