• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 
11 #include <arm_neon.h>
12 
13 #include <xnnpack/avgpool.h>
14 #include <xnnpack/common.h>
15 
16 
xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8(size_t output_pixels,size_t kernel_elements,size_t channels,const uint8_t ** input,size_t input_offset,const uint8_t * zero,uint8_t * output,size_t input_increment,size_t output_increment,const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8(
18     size_t output_pixels,
19     size_t kernel_elements,
20     size_t channels,
21     const uint8_t** input,
22     size_t input_offset,
23     const uint8_t* zero,
24     uint8_t* output,
25     size_t input_increment,
26     size_t output_increment,
27     const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(output_pixels != 0);
30   assert(kernel_elements != 0);
31   assert(kernel_elements <= 9);
32   assert(channels != 0);
33 
34   const int32x4_t vbias = vld1q_dup_s32(&params->neon.bias);
35 #if XNN_ARCH_ARM64
36   const int32x4_t vmultiplier = vld1q_dup_s32(&params->neon.multiplier);
37 #else
38   const int32x2_t vmultiplier = vld1_dup_s32(&params->neon.multiplier);
39 #endif
40   const int64x2_t vleft_shift = vld1q_dup_s64(&params->neon.left_shift);
41   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
42   const uint8x8_t voutput_min = vld1_dup_u8(&params->neon.output_min);
43   const uint8x8_t voutput_max = vld1_dup_u8(&params->neon.output_max);
44 
45   do {
46     const uint8_t* i0 = input[0];
47     assert(i0 != NULL);
48     const uint8_t* i1 = input[1];
49     const uint8_t* i2 = input[2];
50     const uint8_t* i3 = input[3];
51     const uint8_t* i4 = input[4];
52     const uint8_t* i5 = input[5];
53     const uint8_t* i6 = input[6];
54     const uint8_t* i7 = input[7];
55     const uint8_t* i8 = input[8];
56     input = (const uint8_t**) ((uintptr_t) input + input_increment);
57     if (kernel_elements < 2) {
58       i1 = zero;
59     }
60     assert(i1 != NULL);
61     if (kernel_elements <= 2) {
62       i2 = zero;
63     }
64     assert(i2 != NULL);
65     if (kernel_elements < 4) {
66       i3 = zero;
67     }
68     assert(i3 != NULL);
69     if (kernel_elements <= 4) {
70       i4 = zero;
71     }
72     assert(i4 != NULL);
73     if (kernel_elements < 6) {
74       i5 = zero;
75     }
76     assert(i5 != NULL);
77     if (kernel_elements <= 6) {
78       i6 = zero;
79     }
80     assert(i6 != NULL);
81     if (kernel_elements < 8) {
82       i7 = zero;
83     }
84     assert(i7 != NULL);
85     if (kernel_elements <= 8) {
86       i8 = zero;
87     }
88     assert(i8 != NULL);
89     if XNN_UNPREDICTABLE(i0 != zero) {
90       i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
91     }
92     if XNN_UNPREDICTABLE(i1 != zero) {
93       i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
94     }
95     if XNN_UNPREDICTABLE(i2 != zero) {
96       i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
97     }
98     if XNN_UNPREDICTABLE(i3 != zero) {
99       i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
100     }
101     if XNN_UNPREDICTABLE(i4 != zero) {
102       i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
103     }
104     if XNN_UNPREDICTABLE(i5 != zero) {
105       i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
106     }
107     if XNN_UNPREDICTABLE(i6 != zero) {
108       i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
109     }
110     if XNN_UNPREDICTABLE(i7 != zero) {
111       i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
112     }
113     if XNN_UNPREDICTABLE(i8 != zero) {
114       i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
115     }
116 
117     size_t c = channels;
118     while (c >= 8) {
119       const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
120       const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
121       const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
122       const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
123       const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
124       const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
125       const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
126       const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
127       const uint8x8_t vi8 = vld1_u8(i8); i8 += 8;
128 
129       const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
130       const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
131       const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
132       const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
133 
134       const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
135       const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
136       const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
137 
138       int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
139       int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
140 
141       const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0)));
142       const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0)));
143 
144 #if XNN_ARCH_ARM64
145       const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier));
146       const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier);
147       const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier));
148       const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier);
149 
150       const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
151       const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo);
152       const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
153       const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi);
154 #else
155       const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier);
156       const int64x2_t vproduct23 = vmull_s32(vget_high_s32(vacc_lo), vmultiplier);
157       const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vmultiplier);
158       const int64x2_t vproduct67 = vmull_s32(vget_high_s32(vacc_hi), vmultiplier);
159 
160       const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
161       const int64x2_t vadjusted_product23 = vaddw_s32(vproduct23, vget_high_s32(vneg_mask_lo));
162       const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
163       const int64x2_t vadjusted_product67 = vaddw_s32(vproduct67, vget_high_s32(vneg_mask_hi));
164 #endif
165 
166       const int64x2_t vscaled_acc01 = vrshlq_s64(vadjusted_product01, vleft_shift);
167       const int64x2_t vscaled_acc23 = vrshlq_s64(vadjusted_product23, vleft_shift);
168       const int64x2_t vscaled_acc45 = vrshlq_s64(vadjusted_product45, vleft_shift);
169       const int64x2_t vscaled_acc67 = vrshlq_s64(vadjusted_product67, vleft_shift);
170 
171 #if XNN_ARCH_ARM64
172       vacc_lo = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc01), vreinterpretq_s32_s64(vscaled_acc23));
173       vacc_hi = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc45), vreinterpretq_s32_s64(vscaled_acc67));
174 
175       const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
176 #else
177       vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23));
178       vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67));
179 
180       const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), voutput_zero_point);
181 #endif
182 
183       uint8x8_t vout = vqmovun_s16(vacc);
184       vout = vmax_u8(vout, voutput_min);
185       vout = vmin_u8(vout, voutput_max);
186 
187       vst1_u8(output, vout); output += 8;
188 
189       c -= 8;
190     }
191     if (c != 0) {
192       const uint8x8_t vi0 = vld1_u8(i0);
193       const uint8x8_t vi1 = vld1_u8(i1);
194       const uint8x8_t vi2 = vld1_u8(i2);
195       const uint8x8_t vi3 = vld1_u8(i3);
196       const uint8x8_t vi4 = vld1_u8(i4);
197       const uint8x8_t vi5 = vld1_u8(i5);
198       const uint8x8_t vi6 = vld1_u8(i6);
199       const uint8x8_t vi7 = vld1_u8(i7);
200       const uint8x8_t vi8 = vld1_u8(i8);
201 
202       const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
203       const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
204       const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
205       const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
206 
207       const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
208       const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
209       const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
210 
211       int32x4_t vacc_lo = vaddw_s16(vbias, vreinterpret_s16_u16(vget_low_u16(vsum)));
212       int32x4_t vacc_hi = vaddw_s16(vbias, vreinterpret_s16_u16(vget_high_u16(vsum)));
213 
214       const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0)));
215       const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0)));
216 
217 #if XNN_ARCH_ARM64
218       const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier));
219       const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier);
220       const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier));
221       const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier);
222 
223       const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
224       const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo);
225       const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
226       const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi);
227 #else
228       const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier);
229       const int64x2_t vproduct23 = vmull_s32(vget_high_s32(vacc_lo), vmultiplier);
230       const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vmultiplier);
231       const int64x2_t vproduct67 = vmull_s32(vget_high_s32(vacc_hi), vmultiplier);
232 
233       const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo));
234       const int64x2_t vadjusted_product23 = vaddw_s32(vproduct23, vget_high_s32(vneg_mask_lo));
235       const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi));
236       const int64x2_t vadjusted_product67 = vaddw_s32(vproduct67, vget_high_s32(vneg_mask_hi));
237 #endif
238 
239       const int64x2_t vscaled_acc01 = vrshlq_s64(vadjusted_product01, vleft_shift);
240       const int64x2_t vscaled_acc23 = vrshlq_s64(vadjusted_product23, vleft_shift);
241       const int64x2_t vscaled_acc45 = vrshlq_s64(vadjusted_product45, vleft_shift);
242       const int64x2_t vscaled_acc67 = vrshlq_s64(vadjusted_product67, vleft_shift);
243 
244 #if XNN_ARCH_ARM64
245       vacc_lo = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc01), vreinterpretq_s32_s64(vscaled_acc23));
246       vacc_hi = vuzp1q_s32(vreinterpretq_s32_s64(vscaled_acc45), vreinterpretq_s32_s64(vscaled_acc67));
247 
248       const int16x8_t vacc = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
249 #else
250       vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23));
251       vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67));
252 
253       const int16x8_t vacc = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)), voutput_zero_point);
254 #endif
255 
256       uint8x8_t vout = vqmovun_s16(vacc);
257       vout = vmax_u8(vout, voutput_min);
258       vout = vmin_u8(vout, voutput_max);
259 
260       if (c & 4) {
261         vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout), 0); output += 4;
262         vout = vext_u8(vout, vout, 4);
263       }
264       if (c & 2) {
265         vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout), 0); output += 2;
266         vout = vext_u8(vout, vout, 2);
267       }
268       if (c & 1) {
269         vst1_lane_u8(output, vout, 0); output += 1;
270       }
271     }
272     output = (uint8_t*) ((uintptr_t) output + output_increment);
273   } while (--output_pixels != 0);
274 }
275