1 /*
2 * jchuff-neon.c - Huffman entropy encoding (32-bit Arm Neon)
3 *
4 * Copyright (C) 2020, Arm Limited. All Rights Reserved.
5 *
6 * This software is provided 'as-is', without any express or implied
7 * warranty. In no event will the authors be held liable for any damages
8 * arising from the use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 *
22 * NOTE: All referenced figures are from
23 * Recommendation ITU-T T.81 (1992) | ISO/IEC 10918-1:1994.
24 */
25
26 #define JPEG_INTERNALS
27 #include "../../../jinclude.h"
28 #include "../../../jpeglib.h"
29 #include "../../../jsimd.h"
30 #include "../../../jdct.h"
31 #include "../../../jsimddct.h"
32 #include "../../jsimd.h"
33 #include "../jchuff.h"
34 #include "neon-compat.h"
35
36 #include <limits.h>
37
38 #include <arm_neon.h>
39
40
jsimd_huff_encode_one_block_neon(void * state,JOCTET * buffer,JCOEFPTR block,int last_dc_val,c_derived_tbl * dctbl,c_derived_tbl * actbl)41 JOCTET *jsimd_huff_encode_one_block_neon(void *state, JOCTET *buffer,
42 JCOEFPTR block, int last_dc_val,
43 c_derived_tbl *dctbl,
44 c_derived_tbl *actbl)
45 {
46 uint8_t block_nbits[DCTSIZE2];
47 uint16_t block_diff[DCTSIZE2];
48
49 /* Load rows of coefficients from DCT block in zig-zag order. */
50
51 /* Compute DC coefficient difference value. (F.1.1.5.1) */
52 int16x8_t row0 = vdupq_n_s16(block[0] - last_dc_val);
53 row0 = vld1q_lane_s16(block + 1, row0, 1);
54 row0 = vld1q_lane_s16(block + 8, row0, 2);
55 row0 = vld1q_lane_s16(block + 16, row0, 3);
56 row0 = vld1q_lane_s16(block + 9, row0, 4);
57 row0 = vld1q_lane_s16(block + 2, row0, 5);
58 row0 = vld1q_lane_s16(block + 3, row0, 6);
59 row0 = vld1q_lane_s16(block + 10, row0, 7);
60
61 int16x8_t row1 = vld1q_dup_s16(block + 17);
62 row1 = vld1q_lane_s16(block + 24, row1, 1);
63 row1 = vld1q_lane_s16(block + 32, row1, 2);
64 row1 = vld1q_lane_s16(block + 25, row1, 3);
65 row1 = vld1q_lane_s16(block + 18, row1, 4);
66 row1 = vld1q_lane_s16(block + 11, row1, 5);
67 row1 = vld1q_lane_s16(block + 4, row1, 6);
68 row1 = vld1q_lane_s16(block + 5, row1, 7);
69
70 int16x8_t row2 = vld1q_dup_s16(block + 12);
71 row2 = vld1q_lane_s16(block + 19, row2, 1);
72 row2 = vld1q_lane_s16(block + 26, row2, 2);
73 row2 = vld1q_lane_s16(block + 33, row2, 3);
74 row2 = vld1q_lane_s16(block + 40, row2, 4);
75 row2 = vld1q_lane_s16(block + 48, row2, 5);
76 row2 = vld1q_lane_s16(block + 41, row2, 6);
77 row2 = vld1q_lane_s16(block + 34, row2, 7);
78
79 int16x8_t row3 = vld1q_dup_s16(block + 27);
80 row3 = vld1q_lane_s16(block + 20, row3, 1);
81 row3 = vld1q_lane_s16(block + 13, row3, 2);
82 row3 = vld1q_lane_s16(block + 6, row3, 3);
83 row3 = vld1q_lane_s16(block + 7, row3, 4);
84 row3 = vld1q_lane_s16(block + 14, row3, 5);
85 row3 = vld1q_lane_s16(block + 21, row3, 6);
86 row3 = vld1q_lane_s16(block + 28, row3, 7);
87
88 int16x8_t abs_row0 = vabsq_s16(row0);
89 int16x8_t abs_row1 = vabsq_s16(row1);
90 int16x8_t abs_row2 = vabsq_s16(row2);
91 int16x8_t abs_row3 = vabsq_s16(row3);
92
93 int16x8_t row0_lz = vclzq_s16(abs_row0);
94 int16x8_t row1_lz = vclzq_s16(abs_row1);
95 int16x8_t row2_lz = vclzq_s16(abs_row2);
96 int16x8_t row3_lz = vclzq_s16(abs_row3);
97
98 /* Compute number of bits required to represent each coefficient. */
99 uint8x8_t row0_nbits = vsub_u8(vdup_n_u8(16),
100 vmovn_u16(vreinterpretq_u16_s16(row0_lz)));
101 uint8x8_t row1_nbits = vsub_u8(vdup_n_u8(16),
102 vmovn_u16(vreinterpretq_u16_s16(row1_lz)));
103 uint8x8_t row2_nbits = vsub_u8(vdup_n_u8(16),
104 vmovn_u16(vreinterpretq_u16_s16(row2_lz)));
105 uint8x8_t row3_nbits = vsub_u8(vdup_n_u8(16),
106 vmovn_u16(vreinterpretq_u16_s16(row3_lz)));
107
108 vst1_u8(block_nbits + 0 * DCTSIZE, row0_nbits);
109 vst1_u8(block_nbits + 1 * DCTSIZE, row1_nbits);
110 vst1_u8(block_nbits + 2 * DCTSIZE, row2_nbits);
111 vst1_u8(block_nbits + 3 * DCTSIZE, row3_nbits);
112
113 uint16x8_t row0_mask =
114 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row0, 15)),
115 vnegq_s16(row0_lz));
116 uint16x8_t row1_mask =
117 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row1, 15)),
118 vnegq_s16(row1_lz));
119 uint16x8_t row2_mask =
120 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row2, 15)),
121 vnegq_s16(row2_lz));
122 uint16x8_t row3_mask =
123 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row3, 15)),
124 vnegq_s16(row3_lz));
125
126 uint16x8_t row0_diff = veorq_u16(vreinterpretq_u16_s16(abs_row0), row0_mask);
127 uint16x8_t row1_diff = veorq_u16(vreinterpretq_u16_s16(abs_row1), row1_mask);
128 uint16x8_t row2_diff = veorq_u16(vreinterpretq_u16_s16(abs_row2), row2_mask);
129 uint16x8_t row3_diff = veorq_u16(vreinterpretq_u16_s16(abs_row3), row3_mask);
130
131 /* Store diff values for rows 0, 1, 2, and 3. */
132 vst1q_u16(block_diff + 0 * DCTSIZE, row0_diff);
133 vst1q_u16(block_diff + 1 * DCTSIZE, row1_diff);
134 vst1q_u16(block_diff + 2 * DCTSIZE, row2_diff);
135 vst1q_u16(block_diff + 3 * DCTSIZE, row3_diff);
136
137 /* Load last four rows of coefficients from DCT block in zig-zag order. */
138 int16x8_t row4 = vld1q_dup_s16(block + 35);
139 row4 = vld1q_lane_s16(block + 42, row4, 1);
140 row4 = vld1q_lane_s16(block + 49, row4, 2);
141 row4 = vld1q_lane_s16(block + 56, row4, 3);
142 row4 = vld1q_lane_s16(block + 57, row4, 4);
143 row4 = vld1q_lane_s16(block + 50, row4, 5);
144 row4 = vld1q_lane_s16(block + 43, row4, 6);
145 row4 = vld1q_lane_s16(block + 36, row4, 7);
146
147 int16x8_t row5 = vld1q_dup_s16(block + 29);
148 row5 = vld1q_lane_s16(block + 22, row5, 1);
149 row5 = vld1q_lane_s16(block + 15, row5, 2);
150 row5 = vld1q_lane_s16(block + 23, row5, 3);
151 row5 = vld1q_lane_s16(block + 30, row5, 4);
152 row5 = vld1q_lane_s16(block + 37, row5, 5);
153 row5 = vld1q_lane_s16(block + 44, row5, 6);
154 row5 = vld1q_lane_s16(block + 51, row5, 7);
155
156 int16x8_t row6 = vld1q_dup_s16(block + 58);
157 row6 = vld1q_lane_s16(block + 59, row6, 1);
158 row6 = vld1q_lane_s16(block + 52, row6, 2);
159 row6 = vld1q_lane_s16(block + 45, row6, 3);
160 row6 = vld1q_lane_s16(block + 38, row6, 4);
161 row6 = vld1q_lane_s16(block + 31, row6, 5);
162 row6 = vld1q_lane_s16(block + 39, row6, 6);
163 row6 = vld1q_lane_s16(block + 46, row6, 7);
164
165 int16x8_t row7 = vld1q_dup_s16(block + 53);
166 row7 = vld1q_lane_s16(block + 60, row7, 1);
167 row7 = vld1q_lane_s16(block + 61, row7, 2);
168 row7 = vld1q_lane_s16(block + 54, row7, 3);
169 row7 = vld1q_lane_s16(block + 47, row7, 4);
170 row7 = vld1q_lane_s16(block + 55, row7, 5);
171 row7 = vld1q_lane_s16(block + 62, row7, 6);
172 row7 = vld1q_lane_s16(block + 63, row7, 7);
173
174 int16x8_t abs_row4 = vabsq_s16(row4);
175 int16x8_t abs_row5 = vabsq_s16(row5);
176 int16x8_t abs_row6 = vabsq_s16(row6);
177 int16x8_t abs_row7 = vabsq_s16(row7);
178
179 int16x8_t row4_lz = vclzq_s16(abs_row4);
180 int16x8_t row5_lz = vclzq_s16(abs_row5);
181 int16x8_t row6_lz = vclzq_s16(abs_row6);
182 int16x8_t row7_lz = vclzq_s16(abs_row7);
183
184 /* Compute number of bits required to represent each coefficient. */
185 uint8x8_t row4_nbits = vsub_u8(vdup_n_u8(16),
186 vmovn_u16(vreinterpretq_u16_s16(row4_lz)));
187 uint8x8_t row5_nbits = vsub_u8(vdup_n_u8(16),
188 vmovn_u16(vreinterpretq_u16_s16(row5_lz)));
189 uint8x8_t row6_nbits = vsub_u8(vdup_n_u8(16),
190 vmovn_u16(vreinterpretq_u16_s16(row6_lz)));
191 uint8x8_t row7_nbits = vsub_u8(vdup_n_u8(16),
192 vmovn_u16(vreinterpretq_u16_s16(row7_lz)));
193
194 vst1_u8(block_nbits + 4 * DCTSIZE, row4_nbits);
195 vst1_u8(block_nbits + 5 * DCTSIZE, row5_nbits);
196 vst1_u8(block_nbits + 6 * DCTSIZE, row6_nbits);
197 vst1_u8(block_nbits + 7 * DCTSIZE, row7_nbits);
198
199 uint16x8_t row4_mask =
200 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row4, 15)),
201 vnegq_s16(row4_lz));
202 uint16x8_t row5_mask =
203 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row5, 15)),
204 vnegq_s16(row5_lz));
205 uint16x8_t row6_mask =
206 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row6, 15)),
207 vnegq_s16(row6_lz));
208 uint16x8_t row7_mask =
209 vshlq_u16(vreinterpretq_u16_s16(vshrq_n_s16(row7, 15)),
210 vnegq_s16(row7_lz));
211
212 uint16x8_t row4_diff = veorq_u16(vreinterpretq_u16_s16(abs_row4), row4_mask);
213 uint16x8_t row5_diff = veorq_u16(vreinterpretq_u16_s16(abs_row5), row5_mask);
214 uint16x8_t row6_diff = veorq_u16(vreinterpretq_u16_s16(abs_row6), row6_mask);
215 uint16x8_t row7_diff = veorq_u16(vreinterpretq_u16_s16(abs_row7), row7_mask);
216
217 /* Store diff values for rows 4, 5, 6, and 7. */
218 vst1q_u16(block_diff + 4 * DCTSIZE, row4_diff);
219 vst1q_u16(block_diff + 5 * DCTSIZE, row5_diff);
220 vst1q_u16(block_diff + 6 * DCTSIZE, row6_diff);
221 vst1q_u16(block_diff + 7 * DCTSIZE, row7_diff);
222
223 /* Construct bitmap to accelerate encoding of AC coefficients. A set bit
224 * means that the corresponding coefficient != 0.
225 */
226 uint8x8_t row0_nbits_gt0 = vcgt_u8(row0_nbits, vdup_n_u8(0));
227 uint8x8_t row1_nbits_gt0 = vcgt_u8(row1_nbits, vdup_n_u8(0));
228 uint8x8_t row2_nbits_gt0 = vcgt_u8(row2_nbits, vdup_n_u8(0));
229 uint8x8_t row3_nbits_gt0 = vcgt_u8(row3_nbits, vdup_n_u8(0));
230 uint8x8_t row4_nbits_gt0 = vcgt_u8(row4_nbits, vdup_n_u8(0));
231 uint8x8_t row5_nbits_gt0 = vcgt_u8(row5_nbits, vdup_n_u8(0));
232 uint8x8_t row6_nbits_gt0 = vcgt_u8(row6_nbits, vdup_n_u8(0));
233 uint8x8_t row7_nbits_gt0 = vcgt_u8(row7_nbits, vdup_n_u8(0));
234
235 /* { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 } */
236 const uint8x8_t bitmap_mask =
237 vreinterpret_u8_u64(vmov_n_u64(0x0102040810204080));
238
239 row0_nbits_gt0 = vand_u8(row0_nbits_gt0, bitmap_mask);
240 row1_nbits_gt0 = vand_u8(row1_nbits_gt0, bitmap_mask);
241 row2_nbits_gt0 = vand_u8(row2_nbits_gt0, bitmap_mask);
242 row3_nbits_gt0 = vand_u8(row3_nbits_gt0, bitmap_mask);
243 row4_nbits_gt0 = vand_u8(row4_nbits_gt0, bitmap_mask);
244 row5_nbits_gt0 = vand_u8(row5_nbits_gt0, bitmap_mask);
245 row6_nbits_gt0 = vand_u8(row6_nbits_gt0, bitmap_mask);
246 row7_nbits_gt0 = vand_u8(row7_nbits_gt0, bitmap_mask);
247
248 uint8x8_t bitmap_rows_10 = vpadd_u8(row1_nbits_gt0, row0_nbits_gt0);
249 uint8x8_t bitmap_rows_32 = vpadd_u8(row3_nbits_gt0, row2_nbits_gt0);
250 uint8x8_t bitmap_rows_54 = vpadd_u8(row5_nbits_gt0, row4_nbits_gt0);
251 uint8x8_t bitmap_rows_76 = vpadd_u8(row7_nbits_gt0, row6_nbits_gt0);
252 uint8x8_t bitmap_rows_3210 = vpadd_u8(bitmap_rows_32, bitmap_rows_10);
253 uint8x8_t bitmap_rows_7654 = vpadd_u8(bitmap_rows_76, bitmap_rows_54);
254 uint8x8_t bitmap = vpadd_u8(bitmap_rows_7654, bitmap_rows_3210);
255
256 /* Shift left to remove DC bit. */
257 bitmap = vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(bitmap), 1));
258 /* Move bitmap to 32-bit scalar registers. */
259 uint32_t bitmap_1_32 = vget_lane_u32(vreinterpret_u32_u8(bitmap), 1);
260 uint32_t bitmap_33_63 = vget_lane_u32(vreinterpret_u32_u8(bitmap), 0);
261
262 /* Set up state and bit buffer for output bitstream. */
263 working_state *state_ptr = (working_state *)state;
264 int free_bits = state_ptr->cur.free_bits;
265 size_t put_buffer = state_ptr->cur.put_buffer;
266
267 /* Encode DC coefficient. */
268
269 unsigned int nbits = block_nbits[0];
270 /* Emit Huffman-coded symbol and additional diff bits. */
271 unsigned int diff = block_diff[0];
272 PUT_CODE(dctbl->ehufco[nbits], dctbl->ehufsi[nbits], diff)
273
274 /* Encode AC coefficients. */
275
276 unsigned int r = 0; /* r = run length of zeros */
277 unsigned int i = 1; /* i = number of coefficients encoded */
278 /* Code and size information for a run length of 16 zero coefficients */
279 const unsigned int code_0xf0 = actbl->ehufco[0xf0];
280 const unsigned int size_0xf0 = actbl->ehufsi[0xf0];
281
282 while (bitmap_1_32 != 0) {
283 r = BUILTIN_CLZ(bitmap_1_32);
284 i += r;
285 bitmap_1_32 <<= r;
286 nbits = block_nbits[i];
287 diff = block_diff[i];
288 while (r > 15) {
289 /* If run length > 15, emit special run-length-16 codes. */
290 PUT_BITS(code_0xf0, size_0xf0)
291 r -= 16;
292 }
293 /* Emit Huffman symbol for run length / number of bits. (F.1.2.2.1) */
294 unsigned int rs = (r << 4) + nbits;
295 PUT_CODE(actbl->ehufco[rs], actbl->ehufsi[rs], diff)
296 i++;
297 bitmap_1_32 <<= 1;
298 }
299
300 r = 33 - i;
301 i = 33;
302
303 while (bitmap_33_63 != 0) {
304 unsigned int leading_zeros = BUILTIN_CLZ(bitmap_33_63);
305 r += leading_zeros;
306 i += leading_zeros;
307 bitmap_33_63 <<= leading_zeros;
308 nbits = block_nbits[i];
309 diff = block_diff[i];
310 while (r > 15) {
311 /* If run length > 15, emit special run-length-16 codes. */
312 PUT_BITS(code_0xf0, size_0xf0)
313 r -= 16;
314 }
315 /* Emit Huffman symbol for run length / number of bits. (F.1.2.2.1) */
316 unsigned int rs = (r << 4) + nbits;
317 PUT_CODE(actbl->ehufco[rs], actbl->ehufsi[rs], diff)
318 r = 0;
319 i++;
320 bitmap_33_63 <<= 1;
321 }
322
323 /* If the last coefficient(s) were zero, emit an end-of-block (EOB) code.
324 * The value of RS for the EOB code is 0.
325 */
326 if (i != 64) {
327 PUT_BITS(actbl->ehufco[0], actbl->ehufsi[0])
328 }
329
330 state_ptr->cur.put_buffer = put_buffer;
331 state_ptr->cur.free_bits = free_bits;
332
333 return buffer;
334 }
335