• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 
11 #include <arm_neon.h>
12 
13 #include <xnnpack/avgpool.h>
14 #include <xnnpack/common.h>
15 
16 
xnn_q8_avgpool_ukernel_mp9p8q__neon(size_t n,size_t ks,size_t kc,const uint8_t ** input,const uint8_t * zero,int32_t * buffer,uint8_t * output,size_t input_increment,size_t output_increment,const union xnn_q8_avgpool_params params[restrict static1])17 void xnn_q8_avgpool_ukernel_mp9p8q__neon(
18     size_t n,
19     size_t ks,
20     size_t kc,
21     const uint8_t** input,
22     const uint8_t* zero,
23     int32_t* buffer,
24     uint8_t* output,
25     size_t input_increment,
26     size_t output_increment,
27     const union xnn_q8_avgpool_params params[restrict static 1])
28 {
29   assert(n != 0);
30   assert(ks > 9);
31   assert(kc != 0);
32 
33   const int32x4_t vbias = vld1q_dup_s32(&params->neon.bias);
34 #if XNN_ARCH_ARM64
35   const int32x4_t vmultiplier = vld1q_dup_s32(&params->neon.multiplier);
36 #else
37   const int32x2_t vmultiplier = vld1_dup_s32(&params->neon.multiplier);
38 #endif
39   const int64x2_t vleft_shift = vld1q_dup_s64(&params->neon.left_shift);
40   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
41   const uint8x8_t voutput_min = vld1_dup_u8(&params->neon.output_min);
42   const uint8x8_t voutput_max = vld1_dup_u8(&params->neon.output_max);
43 
44   do {
45     {
46       const uint8_t* i0 = *input++;
47       const uint8_t* i1 = *input++;
48       const uint8_t* i2 = *input++;
49       const uint8_t* i3 = *input++;
50       const uint8_t* i4 = *input++;
51       const uint8_t* i5 = *input++;
52       const uint8_t* i6 = *input++;
53       const uint8_t* i7 = *input++;
54       const uint8_t* i8 = *input++;
55 
56       int32_t* acc = buffer;
57       for (size_t k = 0; k < kc; k += 8) {
58         const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
59         const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
60         const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
61         const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
62         const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
63         const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
64         const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
65         const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
66         const uint8x8_t vi8 = vld1_u8(i8); i8 += 8;
67 
68         const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
69         const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
70         const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
71         const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
72 
73         const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
74         const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
75         const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
76 
77         const int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
78         const int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
79 
80         vst1q_s32(acc, vacc_lo); acc += 4;
81         vst1q_s32(acc, vacc_hi); acc += 4;
82       }
83     }
84 
85     size_t m = ks;
86     for (m -= 9; m > 8; m -= 8) {
87       const uint8_t* i0 = *input++;
88       const uint8_t* i1 = *input++;
89       const uint8_t* i2 = *input++;
90       const uint8_t* i3 = *input++;
91       const uint8_t* i4 = *input++;
92       const uint8_t* i5 = *input++;
93       const uint8_t* i6 = *input++;
94       const uint8_t* i7 = *input++;
95 
96       int32_t* acc = buffer;
97       for (size_t k = 0; k < kc; k += 8) {
98         const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
99         const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
100         const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
101         const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
102         const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
103         const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
104         const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
105         const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
106         int32x4_t vacc_lo = vld1q_s32(acc);
107         int32x4_t vacc_hi = vld1q_s32(acc + 4);
108 
109         const uint16x8_t vsum01 = vaddl_u8(vi0, vi1);
110         const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
111         const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
112         const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
113 
114         const uint16x8_t vsum0123 = vaddq_u16(vsum01, vsum23);
115         const uint16x8_t vsum4567 = vaddq_u16(vsum45, vsum67);
116         const uint16x8_t vsum = vaddq_u16(vsum0123, vsum4567);
117 
118         vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vsum)));
119         vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vsum)));
120 
121         vst1q_s32(acc, vacc_lo); acc += 4;
122         vst1q_s32(acc, vacc_hi); acc += 4;
123       }
124     }
125 
126     {
127       const uint8_t* i0 = input[0];
128       const uint8_t* i1 = input[1];
129       const uint8_t* i2 = input[2];
130       const uint8_t* i3 = input[3];
131       const uint8_t* i4 = input[4];
132       const uint8_t* i5 = input[5];
133       const uint8_t* i6 = input[6];
134       const uint8_t* i7 = input[7];
135       input = (const uint8_t**) ((uintptr_t) input + input_increment);
136       if (m < 2) {
137         i1 = zero;
138       }
139       if (m <= 2) {
140         i2 = zero;
141       }
142       if (m < 4) {
143         i3 = zero;
144       }
145       if (m <= 4) {
146         i4 = zero;
147       }
148       if (m < 6) {
149         i5 = zero;
150       }
151       if (m <= 6) {
152         i6 = zero;
153       }
154       if (m != 8) {
155         i7 = zero;
156       }
157 
158       size_t k = kc;
159       int32_t* acc = buffer;
160       while (k >= 8) {
161         const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
162         const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
163         const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
164         const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
165         const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
166         const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
167         const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
168         const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
169         int32x4_t vacc_lo = vld1q_s32(acc); acc += 4;
170         int32x4_t vacc_hi = vld1q_s32(acc); acc += 4;
171 
172         const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
173         const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
174         const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
175         const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
176 
177         const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
178         const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
179         const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
180 
181         vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
182         vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
183 
184         const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0)));
185         const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0)));
186 
187 #if XNN_ARCH_ARM64
188         const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier));
189         const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier);
190         const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier));
191         const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier);
192 
193         const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
194         const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo);
195         const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
196         const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi);
197 #else
198         const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier);
199         const int64x2_t vproduct23 = vmull_s32(vget_high_s32(vacc_lo), vmultiplier);
200         const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vmultiplier);
201         const int64x2_t vproduct67 = vmull_s32(vget_high_s32(vacc_hi), vmultiplier);
202 
203         const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
204         const int64x2_t vadjusted_product23 = vaddw_s32(vproduct23, vget_high_s32(vneg_mask_lo));
205         const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
206         const int64x2_t vadjusted_product67 = vaddw_s32(vproduct67, vget_high_s32(vneg_mask_hi));
207 #endif
208 
209         const int64x2_t vscaled_acc01 = vrshlq_s64(vadjusted_product01, vleft_shift);
210         const int64x2_t vscaled_acc23 = vrshlq_s64(vadjusted_product23, vleft_shift);
211         const int64x2_t vscaled_acc45 = vrshlq_s64(vadjusted_product45, vleft_shift);
212         const int64x2_t vscaled_acc67 = vrshlq_s64(vadjusted_product67, vleft_shift);
213 
214 #if XNN_ARCH_ARM64
215         vacc_lo = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc01), vreinterpretq_s32_s64(vscaled_acc23));
216         vacc_hi = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc45), vreinterpretq_s32_s64(vscaled_acc67));
217 
218         const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
219 #else
220         vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23));
221         vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67));
222 
223         const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), voutput_zero_point);
224 #endif
225 
226         uint8x8_t vout = vqmovun_s16(vacc);
227         vout = vmax_u8(vout, voutput_min);
228         vout = vmin_u8(vout, voutput_max);
229 
230         vst1_u8(output, vout); output += 8;
231 
232         k -= 8;
233       }
234       if (k != 0) {
235         const uint8x8_t vi0 = vld1_u8(i0);
236         const uint8x8_t vi1 = vld1_u8(i1);
237         const uint8x8_t vi2 = vld1_u8(i2);
238         const uint8x8_t vi3 = vld1_u8(i3);
239         const uint8x8_t vi4 = vld1_u8(i4);
240         const uint8x8_t vi5 = vld1_u8(i5);
241         const uint8x8_t vi6 = vld1_u8(i6);
242         const uint8x8_t vi7 = vld1_u8(i7);
243         int32x4_t vacc_lo = vld1q_s32(acc); acc += 4;
244         int32x4_t vacc_hi = vld1q_s32(acc);
245 
246         const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
247         const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
248         const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
249         const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
250 
251         const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
252         const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
253         const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
254 
255         vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
256         vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
257 
258         const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0)));
259         const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0)));
260 
261 #if XNN_ARCH_ARM64
262         const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier));
263         const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier);
264         const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier));
265         const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier);
266 
267         const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
268         const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo);
269         const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
270         const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi);
271 #else
272         const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier);
273         const int64x2_t vproduct23 = vmull_s32(vget_high_s32(vacc_lo), vmultiplier);
274         const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vmultiplier);
275         const int64x2_t vproduct67 = vmull_s32(vget_high_s32(vacc_hi), vmultiplier);
276 
277         const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
278         const int64x2_t vadjusted_product23 = vaddw_s32(vproduct23, vget_high_s32(vneg_mask_lo));
279         const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
280         const int64x2_t vadjusted_product67 = vaddw_s32(vproduct67, vget_high_s32(vneg_mask_hi));
281 #endif
282 
283         const int64x2_t vscaled_acc01 = vrshlq_s64(vadjusted_product01, vleft_shift);
284         const int64x2_t vscaled_acc23 = vrshlq_s64(vadjusted_product23, vleft_shift);
285         const int64x2_t vscaled_acc45 = vrshlq_s64(vadjusted_product45, vleft_shift);
286         const int64x2_t vscaled_acc67 = vrshlq_s64(vadjusted_product67, vleft_shift);
287 
288 #if XNN_ARCH_ARM64
289         vacc_lo = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc01), vreinterpretq_s32_s64(vscaled_acc23));
290         vacc_hi = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc45), vreinterpretq_s32_s64(vscaled_acc67));
291 
292         const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
293 #else
294         vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23));
295         vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67));
296 
297         const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), voutput_zero_point);
298 #endif
299 
300         uint8x8_t vout = vqmovun_s16(vacc);
301         vout = vmax_u8(vout, voutput_min);
302         vout = vmin_u8(vout, voutput_max);
303 
304         if (k & 4) {
305           vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0); output += 4;
306           vout = vext_u8(vout, vout, 4);
307         }
308         if (k & 2) {
309           vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0); output += 2;
310           vout = vext_u8(vout, vout, 2);
311         }
312         if (k & 1) {
313           vst1_lane_u8(output, vout, 0); output += 1;
314         }
315       }
316     }
317     output = (uint8_t*) ((uintptr_t) output + output_increment);
318   } while (--n != 0);
319 }
320