1 static inline int
is_nonzero(const uint8x16_t v)2 is_nonzero (const uint8x16_t v)
3 {
4 uint64_t u64;
5 const uint64x2_t v64 = vreinterpretq_u64_u8(v);
6 const uint32x2_t v32 = vqmovn_u64(v64);
7
8 vst1_u64(&u64, vreinterpret_u64_u32(v32));
9 return u64 != 0;
10 }
11
12 static inline uint8x16_t
delta_lookup(const uint8x16_t v)13 delta_lookup (const uint8x16_t v)
14 {
15 const uint8x8_t lut = {
16 0, 16, 19, 4, (uint8_t) -65, (uint8_t) -65, (uint8_t) -71, (uint8_t) -71,
17 };
18
19 return vcombine_u8(
20 vtbl1_u8(lut, vget_low_u8(v)),
21 vtbl1_u8(lut, vget_high_u8(v)));
22 }
23
24 static inline uint8x16_t
dec_loop_neon32_lane(uint8x16_t * lane)25 dec_loop_neon32_lane (uint8x16_t *lane)
26 {
27 // See the SSSE3 decoder for an explanation of the algorithm.
28 const uint8x16_t lut_lo = {
29 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
30 0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A
31 };
32
33 const uint8x16_t lut_hi = {
34 0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08,
35 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10
36 };
37
38 const uint8x16_t mask_0F = vdupq_n_u8(0x0F);
39 const uint8x16_t mask_2F = vdupq_n_u8(0x2F);
40
41 const uint8x16_t hi_nibbles = vshrq_n_u8(*lane, 4);
42 const uint8x16_t lo_nibbles = vandq_u8(*lane, mask_0F);
43 const uint8x16_t eq_2F = vceqq_u8(*lane, mask_2F);
44
45 const uint8x16_t hi = vqtbl1q_u8(lut_hi, hi_nibbles);
46 const uint8x16_t lo = vqtbl1q_u8(lut_lo, lo_nibbles);
47
48 // Now simply add the delta values to the input:
49 *lane = vaddq_u8(*lane, delta_lookup(vaddq_u8(eq_2F, hi_nibbles)));
50
51 // Return the validity mask:
52 return vandq_u8(lo, hi);
53 }
54
55 static inline void
dec_loop_neon32(const uint8_t ** s,size_t * slen,uint8_t ** o,size_t * olen)56 dec_loop_neon32 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
57 {
58 if (*slen < 64) {
59 return;
60 }
61
62 // Process blocks of 64 bytes per round. Unlike the SSE codecs, no
63 // extra trailing zero bytes are written, so it is not necessary to
64 // reserve extra input bytes:
65 size_t rounds = *slen / 64;
66
67 *slen -= rounds * 64; // 64 bytes consumed per round
68 *olen += rounds * 48; // 48 bytes produced per round
69
70 do {
71 uint8x16x3_t dec;
72
73 // Load 64 bytes and deinterleave:
74 uint8x16x4_t str = vld4q_u8(*s);
75
76 // Decode each lane, collect a mask of invalid inputs:
77 const uint8x16_t classified
78 = dec_loop_neon32_lane(&str.val[0])
79 | dec_loop_neon32_lane(&str.val[1])
80 | dec_loop_neon32_lane(&str.val[2])
81 | dec_loop_neon32_lane(&str.val[3]);
82
83 // Check for invalid input: if any of the delta values are
84 // zero, fall back on bytewise code to do error checking and
85 // reporting:
86 if (is_nonzero(classified)) {
87 break;
88 }
89
90 // Compress four bytes into three:
91 dec.val[0] = vorrq_u8(vshlq_n_u8(str.val[0], 2), vshrq_n_u8(str.val[1], 4));
92 dec.val[1] = vorrq_u8(vshlq_n_u8(str.val[1], 4), vshrq_n_u8(str.val[2], 2));
93 dec.val[2] = vorrq_u8(vshlq_n_u8(str.val[2], 6), str.val[3]);
94
95 // Interleave and store decoded result:
96 vst3q_u8(*o, dec);
97
98 *s += 64;
99 *o += 48;
100
101 } while (--rounds > 0);
102
103 // Adjust for any rounds that were skipped:
104 *slen += rounds * 64;
105 *olen -= rounds * 48;
106 }
107