1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10
11 #include <emmintrin.h>
12
13 #include <xnnpack/gavgpool.h>
14
15
xnn_q8_gavgpool_ukernel_up7__sse2(size_t m,size_t n,const uint8_t * input,size_t input_stride,const uint8_t * zero,uint8_t * output,const union xnn_q8_avgpool_params params[restrict static1])16 void xnn_q8_gavgpool_ukernel_up7__sse2(
17 size_t m,
18 size_t n,
19 const uint8_t* input,
20 size_t input_stride,
21 const uint8_t* zero,
22 uint8_t* output,
23 const union xnn_q8_avgpool_params params[restrict static 1])
24 {
25 assert(m != 0);
26 assert(m <= 7);
27 assert(n != 0);
28
29 const uint8_t* i0 = input;
30 const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
31 if (m < 2) {
32 i1 = zero;
33 }
34 const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
35 if (m <= 2) {
36 i2 = zero;
37 }
38 const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
39 if (m < 4) {
40 i3 = zero;
41 }
42 const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
43 if (m <= 4) {
44 i4 = zero;
45 }
46 const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
47 if (m < 6) {
48 i5 = zero;
49 }
50 const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
51 if (m <= 6) {
52 i6 = zero;
53 }
54
55 const __m128i vbias = _mm_load_si128((const __m128i*) ¶ms->sse2.bias);
56 const __m128i vzero = _mm_setzero_si128();
57 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
58 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
59 const __m128i vright_shift = _mm_loadl_epi64((const __m128i*) params->sse2.right_shift);
60
61 while (n >= 8) {
62 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
63 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
64 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
65 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
66 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
67 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
68 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
69
70 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
71 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
72 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
73 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
74 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
75 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
76 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
77
78 const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
79 const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
80 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
81
82 const __m128i vsum016 = _mm_add_epi16(vsum01, vxi6);
83 const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
84 const __m128i vsum = _mm_add_epi16(vsum016, vsum2345);
85
86 __m128i vacc_lo = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
87 __m128i vacc_hi = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
88
89 const __m128i vneg_mask_lo = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo);
90 const __m128i vneg_mask_hi = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi);
91
92 const __m128i vabs_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vneg_mask_lo), vneg_mask_lo);
93 const __m128i vabs_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vneg_mask_hi), vneg_mask_hi);
94
95 const __m128i vabs_lo1032 = _mm_shuffle_epi32(vabs_lo0123, _MM_SHUFFLE(2, 3, 0, 1));
96 const __m128i vabs_hi1032 = _mm_shuffle_epi32(vabs_hi0123, _MM_SHUFFLE(2, 3, 0, 1));
97
98 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier);
99 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier);
100
101 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier);
102 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier);
103
104 const __m128i vabs_scaled_lo02 = _mm_srl_epi64(_mm_add_epi64(vabsmul_lo02, vrounding), vright_shift);
105 const __m128i vabs_scaled_lo13 = _mm_srl_epi64(_mm_add_epi64(vabsmul_lo13, vrounding), vright_shift);
106 const __m128i vabs_scaled_hi02 = _mm_srl_epi64(_mm_add_epi64(vabsmul_hi02, vrounding), vright_shift);
107 const __m128i vabs_scaled_hi13 = _mm_srl_epi64(_mm_add_epi64(vabsmul_hi13, vrounding), vright_shift);
108
109 const __m128i vabs_scaled_lo0213 = _mm_castps_si128(
110 _mm_shuffle_ps(_mm_castsi128_ps(vabs_scaled_lo02), _mm_castsi128_ps(vabs_scaled_lo13), _MM_SHUFFLE(2, 0, 2, 0)));
111 const __m128i vabs_scaled_hi0213 = _mm_castps_si128(
112 _mm_shuffle_ps(_mm_castsi128_ps(vabs_scaled_hi02), _mm_castsi128_ps(vabs_scaled_hi13), _MM_SHUFFLE(2, 0, 2, 0)));
113
114 const __m128i vabs_scaled_lo = _mm_shuffle_epi32(vabs_scaled_lo0213, _MM_SHUFFLE(3, 1, 2, 0));
115 const __m128i vabs_scaled_hi = _mm_shuffle_epi32(vabs_scaled_hi0213, _MM_SHUFFLE(3, 1, 2, 0));
116
117 const __m128i vscaled_lo = _mm_sub_epi32(_mm_xor_si128(vabs_scaled_lo, vneg_mask_lo), vneg_mask_lo);
118 const __m128i vscaled_hi = _mm_sub_epi32(_mm_xor_si128(vabs_scaled_hi, vneg_mask_hi), vneg_mask_hi);
119
120 __m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
121 vout = _mm_adds_epi16(vout, _mm_load_si128((const __m128i*) params->sse2.output_zero_point));
122 vout = _mm_packus_epi16(vout, vout);
123 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max));
124 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min));
125
126 _mm_storel_epi64((__m128i*) output, vout); output += 8;
127
128 n -= 8;
129 }
130 if (n != 0) {
131 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0);
132 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1);
133 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2);
134 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3);
135 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4);
136 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5);
137 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6);
138
139 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
140 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
141 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
142 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
143 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
144 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
145 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
146
147 const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
148 const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
149 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
150
151 const __m128i vsum016 = _mm_add_epi16(vsum01, vxi6);
152 const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
153 const __m128i vsum = _mm_add_epi16(vsum016, vsum2345);
154
155 __m128i vacc_lo = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vsum, vzero));
156 __m128i vacc_hi = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vsum, vzero));
157
158 const __m128i vneg_mask_lo = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo);
159 const __m128i vneg_mask_hi = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi);
160
161 const __m128i vabs_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vneg_mask_lo), vneg_mask_lo);
162 const __m128i vabs_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vneg_mask_hi), vneg_mask_hi);
163
164 const __m128i vabs_lo1032 = _mm_shuffle_epi32(vabs_lo0123, _MM_SHUFFLE(2, 3, 0, 1));
165 const __m128i vabs_hi1032 = _mm_shuffle_epi32(vabs_hi0123, _MM_SHUFFLE(2, 3, 0, 1));
166
167 const __m128i vabsmul_lo02 = _mm_mul_epu32(vabs_lo0123, vmultiplier);
168 const __m128i vabsmul_hi02 = _mm_mul_epu32(vabs_hi0123, vmultiplier);
169
170 const __m128i vabsmul_lo13 = _mm_mul_epu32(vabs_lo1032, vmultiplier);
171 const __m128i vabsmul_hi13 = _mm_mul_epu32(vabs_hi1032, vmultiplier);
172
173 const __m128i vabs_scaled_lo02 = _mm_srl_epi64(_mm_add_epi64(vabsmul_lo02, vrounding), vright_shift);
174 const __m128i vabs_scaled_lo13 = _mm_srl_epi64(_mm_add_epi64(vabsmul_lo13, vrounding), vright_shift);
175 const __m128i vabs_scaled_hi02 = _mm_srl_epi64(_mm_add_epi64(vabsmul_hi02, vrounding), vright_shift);
176 const __m128i vabs_scaled_hi13 = _mm_srl_epi64(_mm_add_epi64(vabsmul_hi13, vrounding), vright_shift);
177
178 const __m128i vabs_scaled_lo0213 = _mm_castps_si128(
179 _mm_shuffle_ps(_mm_castsi128_ps(vabs_scaled_lo02), _mm_castsi128_ps(vabs_scaled_lo13), _MM_SHUFFLE(2, 0, 2, 0)));
180 const __m128i vabs_scaled_hi0213 = _mm_castps_si128(
181 _mm_shuffle_ps(_mm_castsi128_ps(vabs_scaled_hi02), _mm_castsi128_ps(vabs_scaled_hi13), _MM_SHUFFLE(2, 0, 2, 0)));
182
183 const __m128i vabs_scaled_lo = _mm_shuffle_epi32(vabs_scaled_lo0213, _MM_SHUFFLE(3, 1, 2, 0));
184 const __m128i vabs_scaled_hi = _mm_shuffle_epi32(vabs_scaled_hi0213, _MM_SHUFFLE(3, 1, 2, 0));
185
186 const __m128i vscaled_lo = _mm_sub_epi32(_mm_xor_si128(vabs_scaled_lo, vneg_mask_lo), vneg_mask_lo);
187 const __m128i vscaled_hi = _mm_sub_epi32(_mm_xor_si128(vabs_scaled_hi, vneg_mask_hi), vneg_mask_hi);
188
189 __m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
190 vout = _mm_adds_epi16(vout, _mm_load_si128((const __m128i*) params->sse2.output_zero_point));
191 vout = _mm_packus_epi16(vout, vout);
192 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max));
193 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min));
194
195 if (n & 4) {
196 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout);
197 output += 4;
198 vout = _mm_srli_epi64(vout, 32);
199 }
200 if (n & 2) {
201 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout, 0);
202 output += 2;
203 vout = _mm_srli_epi32(vout, 16);
204 }
205 if (n & 1) {
206 *((uint8_t*) output) = (uint8_t) _mm_cvtsi128_si32(vout);
207 }
208 }
209 }
210