• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * jchuff-neon.c - Huffman entropy encoding (64-bit Arm Neon)
3  *
4  * Copyright (C) 2020-2021, Arm Limited.  All Rights Reserved.
5  * Copyright (C) 2020, D. R. Commander.  All Rights Reserved.
6  *
7  * This software is provided 'as-is', without any express or implied
8  * warranty.  In no event will the authors be held liable for any damages
9  * arising from the use of this software.
10  *
11  * Permission is granted to anyone to use this software for any purpose,
12  * including commercial applications, and to alter it and redistribute it
13  * freely, subject to the following restrictions:
14  *
15  * 1. The origin of this software must not be misrepresented; you must not
16  *    claim that you wrote the original software. If you use this software
17  *    in a product, an acknowledgment in the product documentation would be
18  *    appreciated but is not required.
19  * 2. Altered source versions must be plainly marked as such, and must not be
20  *    misrepresented as being the original software.
21  * 3. This notice may not be removed or altered from any source distribution.
22  *
23  * NOTE: All referenced figures are from
24  * Recommendation ITU-T T.81 (1992) | ISO/IEC 10918-1:1994.
25  */
26 
27 #define JPEG_INTERNALS
28 #include "../../../jinclude.h"
29 #include "../../../jpeglib.h"
30 #include "../../../jsimd.h"
31 #include "../../../jdct.h"
32 #include "../../../jsimddct.h"
33 #include "../../jsimd.h"
34 #include "../align.h"
35 #include "../jchuff.h"
36 #include "neon-compat.h"
37 
38 #include <limits.h>
39 
40 #include <arm_neon.h>
41 
42 
43 ALIGN(16) static const uint8_t jsimd_huff_encode_one_block_consts[] = {
44     0,   1,   2,   3,  16,  17,  32,  33,
45    18,  19,   4,   5,   6,   7,  20,  21,
46    34,  35,  48,  49, 255, 255,  50,  51,
47    36,  37,  22,  23,   8,   9,  10,  11,
48   255, 255,   6,   7,  20,  21,  34,  35,
49    48,  49, 255, 255,  50,  51,  36,  37,
50    54,  55,  40,  41,  26,  27,  12,  13,
51    14,  15,  28,  29,  42,  43,  56,  57,
52     6,   7,  20,  21,  34,  35,  48,  49,
53    50,  51,  36,  37,  22,  23,   8,   9,
54    26,  27,  12,  13, 255, 255,  14,  15,
55    28,  29,  42,  43,  56,  57, 255, 255,
56    52,  53,  54,  55,  40,  41,  26,  27,
57    12,  13, 255, 255,  14,  15,  28,  29,
58    26,  27,  40,  41,  42,  43,  28,  29,
59    14,  15,  30,  31,  44,  45,  46,  47
60 };
61 
jsimd_huff_encode_one_block_neon(void * state,JOCTET * buffer,JCOEFPTR block,int last_dc_val,c_derived_tbl * dctbl,c_derived_tbl * actbl)62 JOCTET *jsimd_huff_encode_one_block_neon(void *state, JOCTET *buffer,
63                                          JCOEFPTR block, int last_dc_val,
64                                          c_derived_tbl *dctbl,
65                                          c_derived_tbl *actbl)
66 {
67   uint16_t block_diff[DCTSIZE2];
68 
69   /* Load lookup table indices for rows of zig-zag ordering. */
70 #ifdef HAVE_VLD1Q_U8_X4
71   const uint8x16x4_t idx_rows_0123 =
72     vld1q_u8_x4(jsimd_huff_encode_one_block_consts + 0 * DCTSIZE);
73   const uint8x16x4_t idx_rows_4567 =
74     vld1q_u8_x4(jsimd_huff_encode_one_block_consts + 8 * DCTSIZE);
75 #else
76   /* GCC does not currently support intrinsics vl1dq_<type>_x4(). */
77   const uint8x16x4_t idx_rows_0123 = { {
78     vld1q_u8(jsimd_huff_encode_one_block_consts + 0 * DCTSIZE),
79     vld1q_u8(jsimd_huff_encode_one_block_consts + 2 * DCTSIZE),
80     vld1q_u8(jsimd_huff_encode_one_block_consts + 4 * DCTSIZE),
81     vld1q_u8(jsimd_huff_encode_one_block_consts + 6 * DCTSIZE)
82   } };
83   const uint8x16x4_t idx_rows_4567 = { {
84     vld1q_u8(jsimd_huff_encode_one_block_consts + 8 * DCTSIZE),
85     vld1q_u8(jsimd_huff_encode_one_block_consts + 10 * DCTSIZE),
86     vld1q_u8(jsimd_huff_encode_one_block_consts + 12 * DCTSIZE),
87     vld1q_u8(jsimd_huff_encode_one_block_consts + 14 * DCTSIZE)
88   } };
89 #endif
90 
91   /* Load 8x8 block of DCT coefficients. */
92 #ifdef HAVE_VLD1Q_U8_X4
93   const int8x16x4_t tbl_rows_0123 =
94     vld1q_s8_x4((int8_t *)(block + 0 * DCTSIZE));
95   const int8x16x4_t tbl_rows_4567 =
96     vld1q_s8_x4((int8_t *)(block + 4 * DCTSIZE));
97 #else
98   const int8x16x4_t tbl_rows_0123 = { {
99     vld1q_s8((int8_t *)(block + 0 * DCTSIZE)),
100     vld1q_s8((int8_t *)(block + 1 * DCTSIZE)),
101     vld1q_s8((int8_t *)(block + 2 * DCTSIZE)),
102     vld1q_s8((int8_t *)(block + 3 * DCTSIZE))
103   } };
104   const int8x16x4_t tbl_rows_4567 = { {
105     vld1q_s8((int8_t *)(block + 4 * DCTSIZE)),
106     vld1q_s8((int8_t *)(block + 5 * DCTSIZE)),
107     vld1q_s8((int8_t *)(block + 6 * DCTSIZE)),
108     vld1q_s8((int8_t *)(block + 7 * DCTSIZE))
109   } };
110 #endif
111 
112   /* Initialise extra lookup tables. */
113   const int8x16x4_t tbl_rows_2345 = { {
114     tbl_rows_0123.val[2], tbl_rows_0123.val[3],
115     tbl_rows_4567.val[0], tbl_rows_4567.val[1]
116   } };
117   const int8x16x3_t tbl_rows_567 =
118     { { tbl_rows_4567.val[1], tbl_rows_4567.val[2], tbl_rows_4567.val[3] } };
119 
120   /* Shuffle coefficients into zig-zag order. */
121   int16x8_t row0 =
122     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_0123, idx_rows_0123.val[0]));
123   int16x8_t row1 =
124     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_0123, idx_rows_0123.val[1]));
125   int16x8_t row2 =
126     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_2345, idx_rows_0123.val[2]));
127   int16x8_t row3 =
128     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_0123, idx_rows_0123.val[3]));
129   int16x8_t row4 =
130     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_4567, idx_rows_4567.val[0]));
131   int16x8_t row5 =
132     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_2345, idx_rows_4567.val[1]));
133   int16x8_t row6 =
134     vreinterpretq_s16_s8(vqtbl4q_s8(tbl_rows_4567, idx_rows_4567.val[2]));
135   int16x8_t row7 =
136     vreinterpretq_s16_s8(vqtbl3q_s8(tbl_rows_567, idx_rows_4567.val[3]));
137 
138   /* Compute DC coefficient difference value (F.1.1.5.1). */
139   row0 = vsetq_lane_s16(block[0] - last_dc_val, row0, 0);
140   /* Initialize AC coefficient lanes not reachable by lookup tables. */
141   row1 =
142     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_4567.val[0]),
143                                   0), row1, 2);
144   row2 =
145     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_0123.val[1]),
146                                   4), row2, 0);
147   row2 =
148     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_4567.val[2]),
149                                   0), row2, 5);
150   row5 =
151     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_0123.val[1]),
152                                   7), row5, 2);
153   row5 =
154     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_4567.val[2]),
155                                   3), row5, 7);
156   row6 =
157     vsetq_lane_s16(vgetq_lane_s16(vreinterpretq_s16_s8(tbl_rows_0123.val[3]),
158                                   7), row6, 5);
159 
160   /* DCT block is now in zig-zag order; start Huffman encoding process. */
161   int16x8_t abs_row0 = vabsq_s16(row0);
162   int16x8_t abs_row1 = vabsq_s16(row1);
163   int16x8_t abs_row2 = vabsq_s16(row2);
164   int16x8_t abs_row3 = vabsq_s16(row3);
165   int16x8_t abs_row4 = vabsq_s16(row4);
166   int16x8_t abs_row5 = vabsq_s16(row5);
167   int16x8_t abs_row6 = vabsq_s16(row6);
168   int16x8_t abs_row7 = vabsq_s16(row7);
169 
170   /* For negative coeffs: diff = abs(coeff) -1 = ~abs(coeff) */
171   uint16x8_t row0_diff =
172     vreinterpretq_u16_s16(veorq_s16(abs_row0, vshrq_n_s16(row0, 15)));
173   uint16x8_t row1_diff =
174     vreinterpretq_u16_s16(veorq_s16(abs_row1, vshrq_n_s16(row1, 15)));
175   uint16x8_t row2_diff =
176     vreinterpretq_u16_s16(veorq_s16(abs_row2, vshrq_n_s16(row2, 15)));
177   uint16x8_t row3_diff =
178     vreinterpretq_u16_s16(veorq_s16(abs_row3, vshrq_n_s16(row3, 15)));
179   uint16x8_t row4_diff =
180     vreinterpretq_u16_s16(veorq_s16(abs_row4, vshrq_n_s16(row4, 15)));
181   uint16x8_t row5_diff =
182     vreinterpretq_u16_s16(veorq_s16(abs_row5, vshrq_n_s16(row5, 15)));
183   uint16x8_t row6_diff =
184     vreinterpretq_u16_s16(veorq_s16(abs_row6, vshrq_n_s16(row6, 15)));
185   uint16x8_t row7_diff =
186     vreinterpretq_u16_s16(veorq_s16(abs_row7, vshrq_n_s16(row7, 15)));
187 
188   /* Construct bitmap to accelerate encoding of AC coefficients.  A set bit
189    * means that the corresponding coefficient != 0.
190    */
191   uint8x8_t abs_row0_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row0),
192                                                vdupq_n_u16(0)));
193   uint8x8_t abs_row1_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row1),
194                                                vdupq_n_u16(0)));
195   uint8x8_t abs_row2_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row2),
196                                                vdupq_n_u16(0)));
197   uint8x8_t abs_row3_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row3),
198                                                vdupq_n_u16(0)));
199   uint8x8_t abs_row4_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row4),
200                                                vdupq_n_u16(0)));
201   uint8x8_t abs_row5_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row5),
202                                                vdupq_n_u16(0)));
203   uint8x8_t abs_row6_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row6),
204                                                vdupq_n_u16(0)));
205   uint8x8_t abs_row7_gt0 = vmovn_u16(vcgtq_u16(vreinterpretq_u16_s16(abs_row7),
206                                                vdupq_n_u16(0)));
207 
208   /* { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 } */
209   const uint8x8_t bitmap_mask =
210     vreinterpret_u8_u64(vmov_n_u64(0x0102040810204080));
211 
212   abs_row0_gt0 = vand_u8(abs_row0_gt0, bitmap_mask);
213   abs_row1_gt0 = vand_u8(abs_row1_gt0, bitmap_mask);
214   abs_row2_gt0 = vand_u8(abs_row2_gt0, bitmap_mask);
215   abs_row3_gt0 = vand_u8(abs_row3_gt0, bitmap_mask);
216   abs_row4_gt0 = vand_u8(abs_row4_gt0, bitmap_mask);
217   abs_row5_gt0 = vand_u8(abs_row5_gt0, bitmap_mask);
218   abs_row6_gt0 = vand_u8(abs_row6_gt0, bitmap_mask);
219   abs_row7_gt0 = vand_u8(abs_row7_gt0, bitmap_mask);
220 
221   uint8x8_t bitmap_rows_10 = vpadd_u8(abs_row1_gt0, abs_row0_gt0);
222   uint8x8_t bitmap_rows_32 = vpadd_u8(abs_row3_gt0, abs_row2_gt0);
223   uint8x8_t bitmap_rows_54 = vpadd_u8(abs_row5_gt0, abs_row4_gt0);
224   uint8x8_t bitmap_rows_76 = vpadd_u8(abs_row7_gt0, abs_row6_gt0);
225   uint8x8_t bitmap_rows_3210 = vpadd_u8(bitmap_rows_32, bitmap_rows_10);
226   uint8x8_t bitmap_rows_7654 = vpadd_u8(bitmap_rows_76, bitmap_rows_54);
227   uint8x8_t bitmap_all = vpadd_u8(bitmap_rows_7654, bitmap_rows_3210);
228 
229   /* Shift left to remove DC bit. */
230   bitmap_all =
231     vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(bitmap_all), 1));
232   /* Count bits set (number of non-zero coefficients) in bitmap. */
233   unsigned int non_zero_coefficients = vaddv_u8(vcnt_u8(bitmap_all));
234   /* Move bitmap to 64-bit scalar register. */
235   uint64_t bitmap = vget_lane_u64(vreinterpret_u64_u8(bitmap_all), 0);
236 
237   /* Set up state and bit buffer for output bitstream. */
238   working_state *state_ptr = (working_state *)state;
239   int free_bits = state_ptr->cur.free_bits;
240   size_t put_buffer = state_ptr->cur.put_buffer;
241 
242   /* Encode DC coefficient. */
243 
244   /* Find nbits required to specify sign and amplitude of coefficient. */
245 #if defined(_MSC_VER) && !defined(__clang__)
246   unsigned int lz = BUILTIN_CLZ(vgetq_lane_s16(abs_row0, 0));
247 #else
248   unsigned int lz;
249   __asm__("clz %w0, %w1" : "=r"(lz) : "r"(vgetq_lane_s16(abs_row0, 0)));
250 #endif
251   unsigned int nbits = 32 - lz;
252   /* Emit Huffman-coded symbol and additional diff bits. */
253   unsigned int diff = (unsigned int)(vgetq_lane_u16(row0_diff, 0) << lz) >> lz;
254   PUT_CODE(dctbl->ehufco[nbits], dctbl->ehufsi[nbits], diff)
255 
256   /* Encode AC coefficients. */
257 
258   unsigned int r = 0;  /* r = run length of zeros */
259   unsigned int i = 1;  /* i = number of coefficients encoded */
260   /* Code and size information for a run length of 16 zero coefficients */
261   const unsigned int code_0xf0 = actbl->ehufco[0xf0];
262   const unsigned int size_0xf0 = actbl->ehufsi[0xf0];
263 
264   /* The most efficient method of computing nbits and diff depends on the
265    * number of non-zero coefficients.  If the bitmap is not too sparse (> 8
266    * non-zero AC coefficients), it is beneficial to use Neon; else we compute
267    * nbits and diff on demand using scalar code.
268    */
269   if (non_zero_coefficients > 8) {
270     uint8_t block_nbits[DCTSIZE2];
271 
272     int16x8_t row0_lz = vclzq_s16(abs_row0);
273     int16x8_t row1_lz = vclzq_s16(abs_row1);
274     int16x8_t row2_lz = vclzq_s16(abs_row2);
275     int16x8_t row3_lz = vclzq_s16(abs_row3);
276     int16x8_t row4_lz = vclzq_s16(abs_row4);
277     int16x8_t row5_lz = vclzq_s16(abs_row5);
278     int16x8_t row6_lz = vclzq_s16(abs_row6);
279     int16x8_t row7_lz = vclzq_s16(abs_row7);
280     /* Compute nbits needed to specify magnitude of each coefficient. */
281     uint8x8_t row0_nbits = vsub_u8(vdup_n_u8(16),
282                                    vmovn_u16(vreinterpretq_u16_s16(row0_lz)));
283     uint8x8_t row1_nbits = vsub_u8(vdup_n_u8(16),
284                                    vmovn_u16(vreinterpretq_u16_s16(row1_lz)));
285     uint8x8_t row2_nbits = vsub_u8(vdup_n_u8(16),
286                                    vmovn_u16(vreinterpretq_u16_s16(row2_lz)));
287     uint8x8_t row3_nbits = vsub_u8(vdup_n_u8(16),
288                                    vmovn_u16(vreinterpretq_u16_s16(row3_lz)));
289     uint8x8_t row4_nbits = vsub_u8(vdup_n_u8(16),
290                                    vmovn_u16(vreinterpretq_u16_s16(row4_lz)));
291     uint8x8_t row5_nbits = vsub_u8(vdup_n_u8(16),
292                                    vmovn_u16(vreinterpretq_u16_s16(row5_lz)));
293     uint8x8_t row6_nbits = vsub_u8(vdup_n_u8(16),
294                                    vmovn_u16(vreinterpretq_u16_s16(row6_lz)));
295     uint8x8_t row7_nbits = vsub_u8(vdup_n_u8(16),
296                                    vmovn_u16(vreinterpretq_u16_s16(row7_lz)));
297     /* Store nbits. */
298     vst1_u8(block_nbits + 0 * DCTSIZE, row0_nbits);
299     vst1_u8(block_nbits + 1 * DCTSIZE, row1_nbits);
300     vst1_u8(block_nbits + 2 * DCTSIZE, row2_nbits);
301     vst1_u8(block_nbits + 3 * DCTSIZE, row3_nbits);
302     vst1_u8(block_nbits + 4 * DCTSIZE, row4_nbits);
303     vst1_u8(block_nbits + 5 * DCTSIZE, row5_nbits);
304     vst1_u8(block_nbits + 6 * DCTSIZE, row6_nbits);
305     vst1_u8(block_nbits + 7 * DCTSIZE, row7_nbits);
306     /* Mask bits not required to specify sign and amplitude of diff. */
307     row0_diff = vshlq_u16(row0_diff, row0_lz);
308     row1_diff = vshlq_u16(row1_diff, row1_lz);
309     row2_diff = vshlq_u16(row2_diff, row2_lz);
310     row3_diff = vshlq_u16(row3_diff, row3_lz);
311     row4_diff = vshlq_u16(row4_diff, row4_lz);
312     row5_diff = vshlq_u16(row5_diff, row5_lz);
313     row6_diff = vshlq_u16(row6_diff, row6_lz);
314     row7_diff = vshlq_u16(row7_diff, row7_lz);
315     row0_diff = vshlq_u16(row0_diff, vnegq_s16(row0_lz));
316     row1_diff = vshlq_u16(row1_diff, vnegq_s16(row1_lz));
317     row2_diff = vshlq_u16(row2_diff, vnegq_s16(row2_lz));
318     row3_diff = vshlq_u16(row3_diff, vnegq_s16(row3_lz));
319     row4_diff = vshlq_u16(row4_diff, vnegq_s16(row4_lz));
320     row5_diff = vshlq_u16(row5_diff, vnegq_s16(row5_lz));
321     row6_diff = vshlq_u16(row6_diff, vnegq_s16(row6_lz));
322     row7_diff = vshlq_u16(row7_diff, vnegq_s16(row7_lz));
323     /* Store diff bits. */
324     vst1q_u16(block_diff + 0 * DCTSIZE, row0_diff);
325     vst1q_u16(block_diff + 1 * DCTSIZE, row1_diff);
326     vst1q_u16(block_diff + 2 * DCTSIZE, row2_diff);
327     vst1q_u16(block_diff + 3 * DCTSIZE, row3_diff);
328     vst1q_u16(block_diff + 4 * DCTSIZE, row4_diff);
329     vst1q_u16(block_diff + 5 * DCTSIZE, row5_diff);
330     vst1q_u16(block_diff + 6 * DCTSIZE, row6_diff);
331     vst1q_u16(block_diff + 7 * DCTSIZE, row7_diff);
332 
333     while (bitmap != 0) {
334       r = BUILTIN_CLZLL(bitmap);
335       i += r;
336       bitmap <<= r;
337       nbits = block_nbits[i];
338       diff = block_diff[i];
339       while (r > 15) {
340         /* If run length > 15, emit special run-length-16 codes. */
341         PUT_BITS(code_0xf0, size_0xf0)
342         r -= 16;
343       }
344       /* Emit Huffman symbol for run length / number of bits. (F.1.2.2.1) */
345       unsigned int rs = (r << 4) + nbits;
346       PUT_CODE(actbl->ehufco[rs], actbl->ehufsi[rs], diff)
347       i++;
348       bitmap <<= 1;
349     }
350   } else if (bitmap != 0) {
351     uint16_t block_abs[DCTSIZE2];
352     /* Store absolute value of coefficients. */
353     vst1q_u16(block_abs + 0 * DCTSIZE, vreinterpretq_u16_s16(abs_row0));
354     vst1q_u16(block_abs + 1 * DCTSIZE, vreinterpretq_u16_s16(abs_row1));
355     vst1q_u16(block_abs + 2 * DCTSIZE, vreinterpretq_u16_s16(abs_row2));
356     vst1q_u16(block_abs + 3 * DCTSIZE, vreinterpretq_u16_s16(abs_row3));
357     vst1q_u16(block_abs + 4 * DCTSIZE, vreinterpretq_u16_s16(abs_row4));
358     vst1q_u16(block_abs + 5 * DCTSIZE, vreinterpretq_u16_s16(abs_row5));
359     vst1q_u16(block_abs + 6 * DCTSIZE, vreinterpretq_u16_s16(abs_row6));
360     vst1q_u16(block_abs + 7 * DCTSIZE, vreinterpretq_u16_s16(abs_row7));
361     /* Store diff bits. */
362     vst1q_u16(block_diff + 0 * DCTSIZE, row0_diff);
363     vst1q_u16(block_diff + 1 * DCTSIZE, row1_diff);
364     vst1q_u16(block_diff + 2 * DCTSIZE, row2_diff);
365     vst1q_u16(block_diff + 3 * DCTSIZE, row3_diff);
366     vst1q_u16(block_diff + 4 * DCTSIZE, row4_diff);
367     vst1q_u16(block_diff + 5 * DCTSIZE, row5_diff);
368     vst1q_u16(block_diff + 6 * DCTSIZE, row6_diff);
369     vst1q_u16(block_diff + 7 * DCTSIZE, row7_diff);
370 
371     /* Same as above but must mask diff bits and compute nbits on demand. */
372     while (bitmap != 0) {
373       r = BUILTIN_CLZLL(bitmap);
374       i += r;
375       bitmap <<= r;
376       lz = BUILTIN_CLZ(block_abs[i]);
377       nbits = 32 - lz;
378       diff = (unsigned int)(block_diff[i] << lz) >> lz;
379       while (r > 15) {
380         /* If run length > 15, emit special run-length-16 codes. */
381         PUT_BITS(code_0xf0, size_0xf0)
382         r -= 16;
383       }
384       /* Emit Huffman symbol for run length / number of bits. (F.1.2.2.1) */
385       unsigned int rs = (r << 4) + nbits;
386       PUT_CODE(actbl->ehufco[rs], actbl->ehufsi[rs], diff)
387       i++;
388       bitmap <<= 1;
389     }
390   }
391 
392   /* If the last coefficient(s) were zero, emit an end-of-block (EOB) code.
393    * The value of RS for the EOB code is 0.
394    */
395   if (i != 64) {
396     PUT_BITS(actbl->ehufco[0], actbl->ehufsi[0])
397   }
398 
399   state_ptr->cur.put_buffer = put_buffer;
400   state_ptr->cur.free_bits = free_bits;
401 
402   return buffer;
403 }
404