1 /* adler32_avx2_tpl.h -- adler32 avx2 vectorized function templates
2 * Copyright (C) 2022 Adam Stylinski
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
5
6 #include "../../zbuild.h"
7 #include <immintrin.h>
8 #include "../../adler32_fold.h"
9 #include "../../adler32_p.h"
10 #include "../../fallback_builtins.h"
11 #include "adler32_avx2_p.h"
12
13 #ifdef X86_SSE42_ADLER32
14 extern uint32_t adler32_fold_copy_sse42(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len);
15 extern uint32_t adler32_ssse3(uint32_t adler, const uint8_t *src, size_t len);
16 #define copy_sub32(a, b, c, d) adler32_fold_copy_sse42(a, b, c, d)
17 #define sub32(a, b, c) adler32_ssse3(a, b, c)
18 #else
19 #define copy_sub32(a, b, c, d) adler32_copy_len_16(adler0, c, b, d, adler1)
20 #define sub32(a, b, c) adler32_len_16(adler0, b, c, adler1)
21 #endif
22
23 #ifdef COPY
adler32_fold_copy_avx2(uint32_t adler,uint8_t * dst,const uint8_t * src,size_t len)24 Z_INTERNAL uint32_t adler32_fold_copy_avx2(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) {
25 #else
26 Z_INTERNAL uint32_t adler32_avx2(uint32_t adler, const uint8_t *src, size_t len) {
27 #endif
28 if (src == NULL) return 1L;
29 if (len == 0) return adler;
30
31 uint32_t adler0, adler1;
32 adler1 = (adler >> 16) & 0xffff;
33 adler0 = adler & 0xffff;
34
35 rem_peel:
36 if (len < 16) {
37 #ifdef COPY
38 return adler32_copy_len_16(adler0, src, dst, len, adler1);
39 #else
40 return adler32_len_16(adler0, src, len, adler1);
41 #endif
42 } else if (len < 32) {
43 #ifdef COPY
44 return copy_sub32(adler, dst, src, len);
45 #else
46 return sub32(adler, src, len);
47 #endif
48 }
49
50 __m256i vs1, vs2;
51
52 const __m256i dot2v = _mm256_setr_epi8(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
53 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
54 const __m256i dot3v = _mm256_set1_epi16(1);
55 const __m256i zero = _mm256_setzero_si256();
56
57 while (len >= 32) {
58 vs1 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler0));
59 vs2 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler1));
60 __m256i vs1_0 = vs1;
61 __m256i vs3 = _mm256_setzero_si256();
62
63 size_t k = MIN(len, NMAX);
64 k -= k % 32;
65 len -= k;
66
67 while (k >= 32) {
68 /*
69 vs1 = adler + sum(c[i])
70 vs2 = sum2 + 32 vs1 + sum( (32-i+1) c[i] )
71 */
72 __m256i vbuf = _mm256_loadu_si256((__m256i*)src);
73 src += 32;
74 k -= 32;
75
76 __m256i vs1_sad = _mm256_sad_epu8(vbuf, zero); // Sum of abs diff, resulting in 2 x int32's
77 //
78 #ifdef COPY
79 _mm256_storeu_si256((__m256i*)dst, vbuf);
80 dst += 32;
81 #endif
82 vs1 = _mm256_add_epi32(vs1, vs1_sad);
83 vs3 = _mm256_add_epi32(vs3, vs1_0);
84 __m256i v_short_sum2 = _mm256_maddubs_epi16(vbuf, dot2v); // sum 32 uint8s to 16 shorts
85 __m256i vsum2 = _mm256_madd_epi16(v_short_sum2, dot3v); // sum 16 shorts to 8 uint32s
86 vs2 = _mm256_add_epi32(vsum2, vs2);
87 vs1_0 = vs1;
88 }
89
90 /* Defer the multiplication with 32 to outside of the loop */
91 vs3 = _mm256_slli_epi32(vs3, 5);
92 vs2 = _mm256_add_epi32(vs2, vs3);
93
94 /* The compiler is generating the following sequence for this integer modulus
95 * when done the scalar way, in GPRs:
96
97 adler = (s1_unpack[0] % BASE) + (s1_unpack[1] % BASE) + (s1_unpack[2] % BASE) + (s1_unpack[3] % BASE) +
98 (s1_unpack[4] % BASE) + (s1_unpack[5] % BASE) + (s1_unpack[6] % BASE) + (s1_unpack[7] % BASE);
99
100 mov $0x80078071,%edi // move magic constant into 32 bit register %edi
101 ...
102 vmovd %xmm1,%esi // move vector lane 0 to 32 bit register %esi
103 mov %rsi,%rax // zero-extend this value to 64 bit precision in %rax
104 imul %rdi,%rsi // do a signed multiplication with magic constant and vector element
105 shr $0x2f,%rsi // shift right by 47
106 imul $0xfff1,%esi,%esi // do a signed multiplication with value truncated to 32 bits with 0xfff1
107 sub %esi,%eax // subtract lower 32 bits of original vector value from modified one above
108 ...
109 // repeats for each element with vpextract instructions
110
111 This is tricky with AVX2 for a number of reasons:
112 1.) There's no 64 bit multiplication instruction, but there is a sequence to get there
113 2.) There's ways to extend vectors to 64 bit precision, but no simple way to truncate
114 back down to 32 bit precision later (there is in AVX512)
115 3.) Full width integer multiplications aren't cheap
116
117 We can, however, and do a relatively cheap sequence for horizontal sums.
118 Then, we simply do the integer modulus on the resulting 64 bit GPR, on a scalar value. It was
119 previously thought that casting to 64 bit precision was needed prior to the horizontal sum, but
120 that is simply not the case, as NMAX is defined as the maximum number of scalar sums that can be
121 performed on the maximum possible inputs before overflow
122 */
123
124
125 /* In AVX2-land, this trip through GPRs will probably be unvoidable, as there's no cheap and easy
126 * conversion from 64 bit integer to 32 bit (needed for the inexpensive modulus with a constant).
127 * This casting to 32 bit is cheap through GPRs (just register aliasing). See above for exactly
128 * what the compiler is doing to avoid integer divisions. */
129 adler0 = partial_hsum256(vs1) % BASE;
130 adler1 = hsum256(vs2) % BASE;
131 }
132
133 adler = adler0 | (adler1 << 16);
134
135 if (len) {
136 goto rem_peel;
137 }
138
139 return adler;
140 }
141