• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <arm_neon.h>
9 
10 #include <xnnpack/pavgpool.h>
11 
12 
xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4(size_t output_pixels,size_t kernel_elements,size_t channels,const float ** input,size_t input_offset,const float * zero,const float * multiplier,float * buffer,float * output,size_t input_increment,size_t output_increment,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])13 void xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4(
14     size_t output_pixels,
15     size_t kernel_elements,
16     size_t channels,
17     const float** input,
18     size_t input_offset,
19     const float* zero,
20     const float* multiplier,
21     float* buffer,
22     float* output,
23     size_t input_increment,
24     size_t output_increment,
25     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
26 {
27   assert(output_pixels != 0);
28   assert(kernel_elements > 9);
29   assert(channels != 0);
30 
31   const float32x4_t voutput_min = vld1q_dup_f32(&params->scalar.min);
32   const float32x4_t voutput_max = vld1q_dup_f32(&params->scalar.max);
33 
34   do {
35     {
36       const float* i0 = *input++;
37       assert(i0 != NULL);
38       if XNN_UNPREDICTABLE(i0 != zero) {
39         i0 = (const float*) ((uintptr_t) i0 + input_offset);
40       }
41       const float* i1 = *input++;
42       assert(i1 != NULL);
43       if XNN_UNPREDICTABLE(i1 != zero) {
44         i1 = (const float*) ((uintptr_t) i1 + input_offset);
45       }
46       const float* i2 = *input++;
47       assert(i2 != NULL);
48       if XNN_UNPREDICTABLE(i2 != zero) {
49         i2 = (const float*) ((uintptr_t) i2 + input_offset);
50       }
51       const float* i3 = *input++;
52       assert(i3 != NULL);
53       if XNN_UNPREDICTABLE(i3 != zero) {
54         i3 = (const float*) ((uintptr_t) i3 + input_offset);
55       }
56       const float* i4 = *input++;
57       assert(i4 != NULL);
58       if XNN_UNPREDICTABLE(i4 != zero) {
59         i4 = (const float*) ((uintptr_t) i4 + input_offset);
60       }
61       const float* i5 = *input++;
62       assert(i5 != NULL);
63       if XNN_UNPREDICTABLE(i5 != zero) {
64         i5 = (const float*) ((uintptr_t) i5 + input_offset);
65       }
66       const float* i6 = *input++;
67       assert(i6 != NULL);
68       if XNN_UNPREDICTABLE(i6 != zero) {
69         i6 = (const float*) ((uintptr_t) i6 + input_offset);
70       }
71       const float* i7 = *input++;
72       assert(i7 != NULL);
73       if XNN_UNPREDICTABLE(i7 != zero) {
74         i7 = (const float*) ((uintptr_t) i7 + input_offset);
75       }
76       const float* i8 = *input++;
77       assert(i8 != NULL);
78       if XNN_UNPREDICTABLE(i8 != zero) {
79         i8 = (const float*) ((uintptr_t) i8 + input_offset);
80       }
81 
82       float* b = buffer;
83       for (size_t c = 0; c < channels; c += 4) {
84         const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
85         const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
86         const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
87         const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
88         const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
89         const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
90         const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
91         const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
92         const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
93 
94         const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
95         const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
96         const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
97         const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
98         const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
99         const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
100         const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
101         const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
102 
103         vst1q_f32(b, vsum); b += 4;
104       }
105     }
106 
107     size_t k = kernel_elements;
108     for (k -= 9; k > 8; k -= 8) {
109       const float* i0 = *input++;
110       assert(i0 != NULL);
111       if XNN_UNPREDICTABLE(i0 != zero) {
112         i0 = (const float*) ((uintptr_t) i0 + input_offset);
113       }
114       const float* i1 = *input++;
115       assert(i1 != NULL);
116       if XNN_UNPREDICTABLE(i1 != zero) {
117         i1 = (const float*) ((uintptr_t) i1 + input_offset);
118       }
119       const float* i2 = *input++;
120       assert(i2 != NULL);
121       if XNN_UNPREDICTABLE(i2 != zero) {
122         i2 = (const float*) ((uintptr_t) i2 + input_offset);
123       }
124       const float* i3 = *input++;
125       assert(i3 != NULL);
126       if XNN_UNPREDICTABLE(i3 != zero) {
127         i3 = (const float*) ((uintptr_t) i3 + input_offset);
128       }
129       const float* i4 = *input++;
130       assert(i4 != NULL);
131       if XNN_UNPREDICTABLE(i4 != zero) {
132         i4 = (const float*) ((uintptr_t) i4 + input_offset);
133       }
134       const float* i5 = *input++;
135       assert(i5 != NULL);
136       if XNN_UNPREDICTABLE(i5 != zero) {
137         i5 = (const float*) ((uintptr_t) i5 + input_offset);
138       }
139       const float* i6 = *input++;
140       assert(i6 != NULL);
141       if XNN_UNPREDICTABLE(i6 != zero) {
142         i6 = (const float*) ((uintptr_t) i6 + input_offset);
143       }
144       const float* i7 = *input++;
145       assert(i7 != NULL);
146       if XNN_UNPREDICTABLE(i7 != zero) {
147         i7 = (const float*) ((uintptr_t) i7 + input_offset);
148       }
149 
150       float* b = buffer;
151       for (size_t c = 0; c < channels; c += 4) {
152         const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
153         const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
154         const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
155         const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
156         const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
157         const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
158         const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
159         const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
160         const float32x4_t vacc = vld1q_f32(b);
161 
162         const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
163         const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
164         const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
165         const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
166         const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
167         const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
168         const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
169         const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
170 
171         vst1q_f32(b, vsum); b += 4;
172       }
173     }
174 
175     {
176       const float* i0 = input[0];
177       assert(i0 != NULL);
178       const float* i1 = input[1];
179       const float* i2 = input[2];
180       const float* i3 = input[3];
181       const float* i4 = input[4];
182       const float* i5 = input[5];
183       const float* i6 = input[6];
184       const float* i7 = input[7];
185       input = (const float**) ((uintptr_t) input + input_increment);
186       if (k < 2) {
187         i1 = zero;
188       }
189       assert(i1 != NULL);
190       if (k <= 2) {
191         i2 = zero;
192       }
193       assert(i2 != NULL);
194       if (k < 4) {
195         i3 = zero;
196       }
197       assert(i3 != NULL);
198       if (k <= 4) {
199         i4 = zero;
200       }
201       assert(i4 != NULL);
202       if (k < 6) {
203         i5 = zero;
204       }
205       assert(i5 != NULL);
206       if (k <= 6) {
207         i6 = zero;
208       }
209       assert(i6 != NULL);
210       if (k < 8) {
211         i7 = zero;
212       }
213       assert(i7 != NULL);
214       if XNN_UNPREDICTABLE(i0 != zero) {
215         i0 = (const float*) ((uintptr_t) i0 + input_offset);
216       }
217       if XNN_UNPREDICTABLE(i1 != zero) {
218         i1 = (const float*) ((uintptr_t) i1 + input_offset);
219       }
220       if XNN_UNPREDICTABLE(i2 != zero) {
221         i2 = (const float*) ((uintptr_t) i2 + input_offset);
222       }
223       if XNN_UNPREDICTABLE(i3 != zero) {
224         i3 = (const float*) ((uintptr_t) i3 + input_offset);
225       }
226       if XNN_UNPREDICTABLE(i4 != zero) {
227         i4 = (const float*) ((uintptr_t) i4 + input_offset);
228       }
229       if XNN_UNPREDICTABLE(i5 != zero) {
230         i5 = (const float*) ((uintptr_t) i5 + input_offset);
231       }
232       if XNN_UNPREDICTABLE(i6 != zero) {
233         i6 = (const float*) ((uintptr_t) i6 + input_offset);
234       }
235       if XNN_UNPREDICTABLE(i7 != zero) {
236         i7 = (const float*) ((uintptr_t) i7 + input_offset);
237       }
238 
239       const float32x4_t vmultiplier = vld1q_dup_f32(multiplier); multiplier += 1;
240 
241       size_t c = channels;
242       float* b = buffer;
243       while (c >= 4) {
244         const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
245         const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
246         const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
247         const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
248         const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
249         const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
250         const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
251         const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
252         const float32x4_t vacc = vld1q_f32(b); b += 4;
253 
254         const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
255         const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
256         const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
257         const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
258         const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
259         const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
260         const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
261         const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
262 
263         float32x4_t vout = vmulq_f32(vsum, vmultiplier);
264         vout = vmaxq_f32(vout, voutput_min);
265         vout = vminq_f32(vout, voutput_max);
266 
267         vst1q_f32(output, vout); output += 4;
268 
269         c -= 4;
270       }
271       if (c != 0) {
272         const float32x4_t vi0 = vld1q_f32(i0);
273         const float32x4_t vi1 = vld1q_f32(i1);
274         const float32x4_t vi2 = vld1q_f32(i2);
275         const float32x4_t vi3 = vld1q_f32(i3);
276         const float32x4_t vi4 = vld1q_f32(i4);
277         const float32x4_t vi5 = vld1q_f32(i5);
278         const float32x4_t vi6 = vld1q_f32(i6);
279         const float32x4_t vi7 = vld1q_f32(i7);
280         const float32x4_t vacc = vld1q_f32(b);
281 
282         const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
283         const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
284         const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
285         const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
286         const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
287         const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
288         const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
289         const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
290 
291         float32x4_t vout = vmulq_f32(vsum, vmultiplier);
292         vout = vmaxq_f32(vout, voutput_min);
293         vout = vminq_f32(vout, voutput_max);
294 
295         float32x2_t vout_lo = vget_low_f32(vout);
296         if (c & 2) {
297           vst1_f32(output, vout_lo); output += 2;
298           vout_lo = vget_high_f32(vout);
299         }
300         if (c & 1) {
301           vst1_lane_f32(output, vout_lo, 0); output += 1;
302         }
303       }
304     }
305     output = (float*) ((uintptr_t) output + output_increment);
306   } while (--output_pixels != 0);
307 }
308