1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <arm_neon.h>
9
10 #include <xnnpack/avgpool.h>
11
12
xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8(size_t output_pixels,size_t kernel_elements,size_t channels,const void ** input,size_t input_offset,const void * zero,void * buffer,void * output,size_t input_increment,size_t output_increment,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])13 void xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8(
14 size_t output_pixels,
15 size_t kernel_elements,
16 size_t channels,
17 const void** input,
18 size_t input_offset,
19 const void* zero,
20 void* buffer,
21 void* output,
22 size_t input_increment,
23 size_t output_increment,
24 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(output_pixels != 0);
27 assert(kernel_elements > 9);
28 assert(channels != 0);
29
30 const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.scale));
31 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
32 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
33
34 do {
35 {
36 const __fp16* i0 = *input++;
37 assert(i0 != NULL);
38 if XNN_UNPREDICTABLE(i0 != zero) {
39 i0 = (const __fp16*) ((uintptr_t) i0 + input_offset);
40 }
41 const __fp16* i1 = *input++;
42 assert(i1 != NULL);
43 if XNN_UNPREDICTABLE(i1 != zero) {
44 i1 = (const __fp16*) ((uintptr_t) i1 + input_offset);
45 }
46 const __fp16* i2 = *input++;
47 assert(i2 != NULL);
48 if XNN_UNPREDICTABLE(i2 != zero) {
49 i2 = (const __fp16*) ((uintptr_t) i2 + input_offset);
50 }
51 const __fp16* i3 = *input++;
52 assert(i3 != NULL);
53 if XNN_UNPREDICTABLE(i3 != zero) {
54 i3 = (const __fp16*) ((uintptr_t) i3 + input_offset);
55 }
56 const __fp16* i4 = *input++;
57 assert(i4 != NULL);
58 if XNN_UNPREDICTABLE(i4 != zero) {
59 i4 = (const __fp16*) ((uintptr_t) i4 + input_offset);
60 }
61 const __fp16* i5 = *input++;
62 assert(i5 != NULL);
63 if XNN_UNPREDICTABLE(i5 != zero) {
64 i5 = (const __fp16*) ((uintptr_t) i5 + input_offset);
65 }
66 const __fp16* i6 = *input++;
67 assert(i6 != NULL);
68 if XNN_UNPREDICTABLE(i6 != zero) {
69 i6 = (const __fp16*) ((uintptr_t) i6 + input_offset);
70 }
71 const __fp16* i7 = *input++;
72 assert(i7 != NULL);
73 if XNN_UNPREDICTABLE(i7 != zero) {
74 i7 = (const __fp16*) ((uintptr_t) i7 + input_offset);
75 }
76 const __fp16* i8 = *input++;
77 assert(i8 != NULL);
78 if XNN_UNPREDICTABLE(i8 != zero) {
79 i8 = (const __fp16*) ((uintptr_t) i8 + input_offset);
80 }
81
82 __fp16* b = (__fp16*) buffer;
83 for (size_t c = 0; c < channels; c += 8) {
84 const float16x8_t vi0 = vld1q_f16(i0); i0 += 8;
85 const float16x8_t vi1 = vld1q_f16(i1); i1 += 8;
86 const float16x8_t vi2 = vld1q_f16(i2); i2 += 8;
87 const float16x8_t vi3 = vld1q_f16(i3); i3 += 8;
88 const float16x8_t vi4 = vld1q_f16(i4); i4 += 8;
89 const float16x8_t vi5 = vld1q_f16(i5); i5 += 8;
90 const float16x8_t vi6 = vld1q_f16(i6); i6 += 8;
91 const float16x8_t vi7 = vld1q_f16(i7); i7 += 8;
92 const float16x8_t vi8 = vld1q_f16(i8); i8 += 8;
93
94 const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
95 const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
96 const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
97 const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
98 const float16x8_t vsum018 = vaddq_f16(vsum01, vi8);
99 const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
100 const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67);
101 const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678);
102
103 vst1q_f16(b, vsum); b += 8;
104 }
105 }
106
107 size_t k = kernel_elements;
108 for (k -= 9; k > 8; k -= 8) {
109 const __fp16* i0 = (const __fp16*) *input++;
110 assert(i0 != NULL);
111 if XNN_UNPREDICTABLE(i0 != zero) {
112 i0 = (const __fp16*) ((uintptr_t) i0 + input_offset);
113 }
114 const __fp16* i1 = (const __fp16*) *input++;
115 assert(i1 != NULL);
116 if XNN_UNPREDICTABLE(i1 != zero) {
117 i1 = (const __fp16*) ((uintptr_t) i1 + input_offset);
118 }
119 const __fp16* i2 = (const __fp16*) *input++;
120 assert(i2 != NULL);
121 if XNN_UNPREDICTABLE(i2 != zero) {
122 i2 = (const __fp16*) ((uintptr_t) i2 + input_offset);
123 }
124 const __fp16* i3 = (const __fp16*) *input++;
125 assert(i3 != NULL);
126 if XNN_UNPREDICTABLE(i3 != zero) {
127 i3 = (const __fp16*) ((uintptr_t) i3 + input_offset);
128 }
129 const __fp16* i4 = (const __fp16*) *input++;
130 assert(i4 != NULL);
131 if XNN_UNPREDICTABLE(i4 != zero) {
132 i4 = (const __fp16*) ((uintptr_t) i4 + input_offset);
133 }
134 const __fp16* i5 = (const __fp16*) *input++;
135 assert(i5 != NULL);
136 if XNN_UNPREDICTABLE(i5 != zero) {
137 i5 = (const __fp16*) ((uintptr_t) i5 + input_offset);
138 }
139 const __fp16* i6 = (const __fp16*) *input++;
140 assert(i6 != NULL);
141 if XNN_UNPREDICTABLE(i6 != zero) {
142 i6 = (const __fp16*) ((uintptr_t) i6 + input_offset);
143 }
144 const __fp16* i7 = (const __fp16*) *input++;
145 assert(i7 != NULL);
146 if XNN_UNPREDICTABLE(i7 != zero) {
147 i7 = (const __fp16*) ((uintptr_t) i7 + input_offset);
148 }
149
150 __fp16* b = (__fp16*) buffer;
151 for (size_t c = 0; c < channels; c += 8) {
152 const float16x8_t vi0 = vld1q_f16(i0); i0 += 8;
153 const float16x8_t vi1 = vld1q_f16(i1); i1 += 8;
154 const float16x8_t vi2 = vld1q_f16(i2); i2 += 8;
155 const float16x8_t vi3 = vld1q_f16(i3); i3 += 8;
156 const float16x8_t vi4 = vld1q_f16(i4); i4 += 8;
157 const float16x8_t vi5 = vld1q_f16(i5); i5 += 8;
158 const float16x8_t vi6 = vld1q_f16(i6); i6 += 8;
159 const float16x8_t vi7 = vld1q_f16(i7); i7 += 8;
160 const float16x8_t vacc = vld1q_f16(b);
161
162 const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
163 const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
164 const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
165 const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
166 const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
167 const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
168 const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
169 const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
170
171 vst1q_f16(b, vsum); b += 8;
172 }
173 }
174
175 assert(k >= 1);
176 {
177 const __fp16* i0 = (const __fp16*) input[0];
178 assert(i0 != NULL);
179 const __fp16* i1 = (const __fp16*) input[1];
180 const __fp16* i2 = (const __fp16*) input[2];
181 const __fp16* i3 = (const __fp16*) input[3];
182 const __fp16* i4 = (const __fp16*) input[4];
183 const __fp16* i5 = (const __fp16*) input[5];
184 const __fp16* i6 = (const __fp16*) input[6];
185 const __fp16* i7 = (const __fp16*) input[7];
186 input = (const void**) ((uintptr_t) input + input_increment);
187 if (k < 2) {
188 i1 = (const __fp16*) zero;
189 }
190 assert(i1 != NULL);
191 if (k <= 2) {
192 i2 = (const __fp16*) zero;
193 }
194 assert(i2 != NULL);
195 if (k < 4) {
196 i3 = (const __fp16*) zero;
197 }
198 assert(i3 != NULL);
199 if (k <= 4) {
200 i4 = (const __fp16*) zero;
201 }
202 assert(i4 != NULL);
203 if (k < 6) {
204 i5 = (const __fp16*) zero;
205 }
206 assert(i5 != NULL);
207 if (k <= 6) {
208 i6 = (const __fp16*) zero;
209 }
210 assert(i6 != NULL);
211 if (k < 8) {
212 i7 = (const __fp16*) zero;
213 }
214 assert(i7 != NULL);
215 if XNN_UNPREDICTABLE(i0 != zero) {
216 i0 = (const __fp16*) ((uintptr_t) i0 + input_offset);
217 }
218 if XNN_UNPREDICTABLE(i1 != zero) {
219 i1 = (const __fp16*) ((uintptr_t) i1 + input_offset);
220 }
221 if XNN_UNPREDICTABLE(i2 != zero) {
222 i2 = (const __fp16*) ((uintptr_t) i2 + input_offset);
223 }
224 if XNN_UNPREDICTABLE(i3 != zero) {
225 i3 = (const __fp16*) ((uintptr_t) i3 + input_offset);
226 }
227 if XNN_UNPREDICTABLE(i4 != zero) {
228 i4 = (const __fp16*) ((uintptr_t) i4 + input_offset);
229 }
230 if XNN_UNPREDICTABLE(i5 != zero) {
231 i5 = (const __fp16*) ((uintptr_t) i5 + input_offset);
232 }
233 if XNN_UNPREDICTABLE(i6 != zero) {
234 i6 = (const __fp16*) ((uintptr_t) i6 + input_offset);
235 }
236 if XNN_UNPREDICTABLE(i7 != zero) {
237 i7 = (const __fp16*) ((uintptr_t) i7 + input_offset);
238 }
239
240 size_t c = channels;
241 __fp16* b = (__fp16*) buffer;
242 while (c >= 8) {
243 const float16x8_t vi0 = vld1q_f16(i0); i0 += 8;
244 const float16x8_t vi1 = vld1q_f16(i1); i1 += 8;
245 const float16x8_t vi2 = vld1q_f16(i2); i2 += 8;
246 const float16x8_t vi3 = vld1q_f16(i3); i3 += 8;
247 const float16x8_t vi4 = vld1q_f16(i4); i4 += 8;
248 const float16x8_t vi5 = vld1q_f16(i5); i5 += 8;
249 const float16x8_t vi6 = vld1q_f16(i6); i6 += 8;
250 const float16x8_t vi7 = vld1q_f16(i7); i7 += 8;
251 const float16x8_t vacc = vld1q_f16(b); b += 8;
252
253 const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
254 const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
255 const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
256 const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
257 const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
258 const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
259 const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
260 const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
261
262 float16x8_t vout = vmulq_f16(vsum, vscale);
263 vout = vmaxq_f16(vout, vmin);
264 vout = vminq_f16(vout, vmax);
265
266 vst1q_f16(output, vout); output = (__fp16*) output + 8;
267
268 c -= 8;
269 }
270 if (c != 0) {
271 const float16x8_t vi0 = vld1q_f16(i0);
272 const float16x8_t vi1 = vld1q_f16(i1);
273 const float16x8_t vi2 = vld1q_f16(i2);
274 const float16x8_t vi3 = vld1q_f16(i3);
275 const float16x8_t vi4 = vld1q_f16(i4);
276 const float16x8_t vi5 = vld1q_f16(i5);
277 const float16x8_t vi6 = vld1q_f16(i6);
278 const float16x8_t vi7 = vld1q_f16(i7);
279 const float16x8_t vacc = vld1q_f16(b);
280
281 const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
282 const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
283 const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
284 const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
285 const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
286 const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
287 const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
288 const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
289
290 float16x8_t vout = vmulq_f16(vsum, vscale);
291 vout = vmaxq_f16(vout, vmin);
292 vout = vminq_f16(vout, vmax);
293
294 float16x4_t vout_lo = vget_low_f16(vout);
295 if (c & 4) {
296 vst1_f16(output, vout_lo); output = (__fp16*) output + 4;
297 vout_lo = vget_high_f16(vout);
298 }
299 if (c & 2) {
300 vst1_lane_u32(output, vreinterpret_u32_f16(vout_lo), 0); output = (__fp16*) output + 2;
301 vout_lo = vext_f16(vout_lo, vout_lo, 2);
302 }
303 if (c & 1) {
304 vst1_lane_f16(output, vout_lo, 0); output = (__fp16*) output + 1;
305 }
306 }
307 }
308 output = (__fp16*) ((uintptr_t) output + output_increment);
309 } while (--output_pixels != 0);
310 }
311