1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc3(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top >= 1);
32 assert(padding_top <= 2);
33
34 const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even);
35 const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd);
36 const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max);
37 const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min);
38
39 const float32x4_t vw0123 = vld1q_f32(weights);
40 const float32x4_t vw4567 = vld1q_f32(weights + 4);
41 const float32x4_t vw89AB = vld1q_f32(weights + 8);
42 const float32x4_t vwCDEF = vld1q_f32(weights + 12);
43 const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
44 const float32x4_t vwKLMN = vld1q_f32(weights + 20);
45 const float32x2_t vwOP = vld1_f32(weights + 24);
46
47 const uint32_t padding_top_less_1 = padding_top - 1;
48 const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
49
50 const float* i0 = zero;
51 const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
52 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
53 if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
54 i1 = zero;
55 }
56 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
57 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
58
59
60 float* o0 = output;
61
62 size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
63 size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
64 do {
65 if XNN_UNPREDICTABLE(padded_input_height < 6) {
66 i3 = zero;
67 }
68 if XNN_UNPREDICTABLE(padded_input_height < 7) {
69 i4 = zero;
70 }
71
72 float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
73 float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
74 float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
75 float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
76 float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
77
78 float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
79 float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
80 float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
81 float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
82 float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
83
84 float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
85 float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
86 float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
87 float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
88 float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
89
90 size_t w = input_width;
91 for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
92 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
93
94 float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
95
96 float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
97
98 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
99
100 vo0p2 = vmlaq_lane_f32(vo0p2, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
101
102 vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
103
104 vo0p1 = vmlaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
105
106 vo0p2 = vmlaq_lane_f32(vo0p2, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
107
108 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
109
110 vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
111
112 vo0p2 = vmlaq_lane_f32(vo0p2, vi4x8ACE9BDF.val[1], vwOP, 0);
113
114 const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
115 vi0x0246 = vi0x8ACE9BDF.val[0];
116 const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
117 vi1x0246 = vi1x8ACE9BDF.val[0];
118 const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
119 vi2x0246 = vi2x8ACE9BDF.val[0];
120 const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
121 vi3x0246 = vi3x8ACE9BDF.val[0];
122 const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
123 vi4x0246 = vi4x8ACE9BDF.val[0];
124
125 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
126
127 vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
128
129 vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
130
131 vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
132
133 vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
134
135 const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
136 vi0x1357 = vi0x8ACE9BDF.val[1];
137 const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
138 vi1x1357 = vi1x8ACE9BDF.val[1];
139 const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
140 vi2x1357 = vi2x8ACE9BDF.val[1];
141 const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
142 vi3x1357 = vi3x8ACE9BDF.val[1];
143 const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
144 vi4x1357 = vi4x8ACE9BDF.val[1];
145
146 const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
147 const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
148 const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
149 const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
150 const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
151
152 vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
153
154 vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
155
156 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
157
158 vo0p2 = vmlaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
159
160 vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
161
162 const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
163 vi0x8ACE9BDF = vi0xGIKMHJLN;
164 const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
165 vi1x8ACE9BDF = vi1xGIKMHJLN;
166 const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
167 vi2x8ACE9BDF = vi2xGIKMHJLN;
168 const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
169 vi3x8ACE9BDF = vi3xGIKMHJLN;
170 const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
171 vi4x8ACE9BDF = vi4xGIKMHJLN;
172
173 vo0p1 = vmlaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
174
175 vo0p2 = vmlaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
176
177 vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
178
179 vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
180
181 vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
182
183 vo0p0 = vaddq_f32(vo0p0, vo0p1);
184 vo0p0 = vaddq_f32(vo0p0, vo0p2);
185
186 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
187
188 vo0 = vminq_f32(vo0, vmax);
189
190 vst1q_f32(o0, vo0); o0 += 4;
191 }
192 // Last block has 1-8 pixels to process.
193 assert(w <= 8 * sizeof(float));
194 assert(w >= 1 * sizeof(float));
195 {
196 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
197
198 const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
199 const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
200 const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
201 const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
202 const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
203
204 const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
205 const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
206 const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
207 const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
208 const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
209
210 float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
211
212 float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
213
214 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE, vget_low_f32(vwCDEF), 1);
215
216 vo0p2 = vmlaq_lane_f32(vo0p2, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
217
218 vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
219
220 vo0p1 = vmlaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0);
221
222 vo0p2 = vmlaq_lane_f32(vo0p2, vi1x9BDF, vget_low_f32(vw89AB), 1);
223
224 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x9BDF, vget_high_f32(vwCDEF), 0);
225
226 vo0p1 = vmlaq_lane_f32(vo0p1, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
227
228 vo0p2 = vmlaq_lane_f32(vo0p2, vi4x9BDF, vwOP, 0);
229
230 const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
231 const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
232 const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
233 const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
234 const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
235
236 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
237
238 vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
239
240 vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
241
242 vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
243
244 vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
245
246 const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
247 const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
248 const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
249 const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
250 const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
251
252 vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
253
254 vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
255
256 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
257
258 vo0p2 = vmlaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
259
260 vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
261
262 const float32x4_t vzero = vmovq_n_f32(0.0f);
263 const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
264 const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
265 const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
266 const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
267 const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
268
269 vo0p1 = vmlaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
270
271 vo0p2 = vmlaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
272
273 vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
274
275 vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
276
277 vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
278
279 vo0p0 = vaddq_f32(vo0p0, vo0p1);
280 vo0p0 = vaddq_f32(vo0p0, vo0p2);
281
282 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
283
284 vo0 = vminq_f32(vo0, vmax);
285
286 size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
287 if XNN_LIKELY(w_tmp >= 4) {
288 vst1q_f32(o0, vo0); o0 += 4;
289 } else {
290 float32x2_t vo0_lo = vget_low_f32(vo0);
291 if (w_tmp & 2) {
292 vst1_f32(o0, vo0_lo); o0 += 2;
293
294 vo0_lo = vget_high_f32(vo0);
295 }
296 if (w_tmp & 1) {
297 vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
298 }
299 }
300 }
301
302 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
303 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
304 i2 = (const float*) ((uintptr_t) i4 - input_decrement);
305 i3 = (const float*) ((uintptr_t) i2 + input_width);
306 i4 = (const float*) ((uintptr_t) i3 + input_width);
307
308
309 output_height -= 1;
310 padded_input_height -= 2;
311 } while (output_height != 0);
312 }
313