• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gavgpool/multipass-neonfp16arith.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8(size_t rows,size_t channels,const void * input,size_t input_stride,const void * zero,void * buffer,void * output,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8(
19     size_t rows,
20     size_t channels,
21     const void* input,
22     size_t input_stride,
23     const void* zero,
24     void* buffer,
25     void* output,
26     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(rows > 7);
29   assert(channels != 0);
30 
31   const __fp16* i0 = input;
32   const __fp16* i1 = (const __fp16*) ((uintptr_t) i0 + input_stride);
33   const __fp16* i2 = (const __fp16*) ((uintptr_t) i1 + input_stride);
34   const __fp16* i3 = (const __fp16*) ((uintptr_t) i2 + input_stride);
35   const __fp16* i4 = (const __fp16*) ((uintptr_t) i3 + input_stride);
36   const __fp16* i5 = (const __fp16*) ((uintptr_t) i4 + input_stride);
37   const __fp16* i6 = (const __fp16*) ((uintptr_t) i5 + input_stride);
38   const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(__fp16);
39 
40   __fp16* b = buffer;
41   size_t c = channels;
42   for (; c != 0; c = doz(c, 8)) {
43     const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
44     const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
45 
46     const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
47     float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
48 
49     const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
50     vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
51     const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
52     vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
53     const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
54     vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
55     const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
56     vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
57     vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
58 
59     vst1q_f16(b, vacc01234567); b += 8;
60   }
61 
62   for (rows -= 7; rows > 7; rows -= 7) {
63     i0 = (const __fp16*) ((uintptr_t) i0 + input_increment);
64     i1 = (const __fp16*) ((uintptr_t) i1 + input_increment);
65     i2 = (const __fp16*) ((uintptr_t) i2 + input_increment);
66     i3 = (const __fp16*) ((uintptr_t) i3 + input_increment);
67     i4 = (const __fp16*) ((uintptr_t) i4 + input_increment);
68     i5 = (const __fp16*) ((uintptr_t) i5 + input_increment);
69     i6 = (const __fp16*) ((uintptr_t) i6 + input_increment);
70 
71     __fp16* b = buffer;
72     size_t c = channels;
73     for (; c != 0; c = doz(c, 8)) {
74       float16x8_t vacc01234567 = vld1q_f16(b);
75 
76       const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
77 
78       const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
79       vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
80       const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
81       vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
82       const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
83       vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
84       const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
85       vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
86       const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
87       vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
88       const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
89       vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
90       vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
91 
92       vst1q_f16(b, vacc01234567); b += 8;
93     }
94   }
95 
96   i0 = (const __fp16*) ((uintptr_t) i0 + input_increment);
97   i1 = (const __fp16*) ((uintptr_t) i1 + input_increment);
98   if XNN_UNPREDICTABLE(rows < 2) {
99     i1 = (const __fp16*) zero;
100   }
101   i2 = (const __fp16*) ((uintptr_t) i2 + input_increment);
102   if XNN_UNPREDICTABLE(rows <= 2) {
103     i2 = (const __fp16*) zero;
104   }
105   i3 = (const __fp16*) ((uintptr_t) i3 + input_increment);
106   if XNN_UNPREDICTABLE(rows < 4) {
107     i3 = (const __fp16*) zero;
108   }
109   i4 = (const __fp16*) ((uintptr_t) i4 + input_increment);
110   if XNN_UNPREDICTABLE(rows <= 4) {
111     i4 = (const __fp16*) zero;
112   }
113   i5 = (const __fp16*) ((uintptr_t) i5 + input_increment);
114   if XNN_UNPREDICTABLE(rows < 6) {
115     i5 = (const __fp16*) zero;
116   }
117   i6 = (const __fp16*) ((uintptr_t) i6 + input_increment);
118   if XNN_UNPREDICTABLE(rows <= 6) {
119     i6 = (const __fp16*) zero;
120   }
121 
122   const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.scale));
123   const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.min));
124   const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.max));
125   for (; channels >= 8; channels -= 8) {
126     float16x8_t vacc01234567 = vld1q_f16(buffer); buffer = (__fp16*) buffer + 8;
127 
128     const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
129 
130     const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
131     vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
132     const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
133     vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
134     const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
135     vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
136     const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
137     vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
138     const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
139     vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
140     const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
141     vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
142     vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
143 
144     vacc01234567 = vmulq_f16(vacc01234567, vscale);
145 
146     vacc01234567 = vmaxq_f16(vacc01234567, vmin);
147 
148     vacc01234567 = vminq_f16(vacc01234567, vmax);
149 
150     vst1q_f16(output, vacc01234567); output = (__fp16*) output + 8;
151   }
152   if XNN_UNLIKELY(channels != 0) {
153     {
154       float16x8_t vacc01234567 = vld1q_f16(buffer); buffer = (__fp16*) buffer + 8;
155 
156       const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
157       const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
158       vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
159       const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
160       vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
161       const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
162       vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
163       const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
164       vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
165       const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
166       vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
167       const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
168       vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
169       vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
170 
171       vacc01234567 = vmulq_f16(vacc01234567, vscale);
172       vacc01234567 = vmaxq_f16(vacc01234567, vmin);
173       vacc01234567 = vminq_f16(vacc01234567, vmax);
174 
175       float16x4_t vacc0123 = vget_low_f16(vacc01234567);
176       if (channels & 4) {
177         vst1_f16(output, vacc0123); output = (__fp16*) output + 4;
178         vacc0123 = vget_high_f16(vacc01234567);
179       }
180       if (channels & 2) {
181         vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (__fp16*) output + 2;
182         vacc0123 = vext_f16(vacc0123, vacc0123, 2);
183       }
184       if (channels & 1) {
185         vst1_lane_f16(output, vacc0123, 0); output = (__fp16*) output + 1;
186       }
187     }
188   }
189 }
190