• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-avx.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_minmax_ukernel_up8x25__fma3(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up8x25__fma3(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const float* zero,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const __m256 vmax = _mm256_load_ps(params->avx.max);
33   const __m256 vmin = _mm256_load_ps(params->avx.min);
34   do {
35     const float* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const float*) ((uintptr_t) i0 + input_offset);
39     }
40     const float* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const float*) ((uintptr_t) i1 + input_offset);
44     }
45     const float* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const float*) ((uintptr_t) i2 + input_offset);
49     }
50     const float* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const float*) ((uintptr_t) i3 + input_offset);
54     }
55     const float* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const float*) ((uintptr_t) i4 + input_offset);
59     }
60     const float* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const float*) ((uintptr_t) i5 + input_offset);
64     }
65     const float* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const float*) ((uintptr_t) i6 + input_offset);
69     }
70     const float* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const float*) ((uintptr_t) i7 + input_offset);
74     }
75     const float* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const float*) ((uintptr_t) i8 + input_offset);
79     }
80     const float* i9 = input[9];
81     assert(i9 != NULL);
82     if XNN_UNPREDICTABLE(i9 != zero) {
83       i9 = (const float*) ((uintptr_t) i9 + input_offset);
84     }
85     const float* i10 = input[10];
86     assert(i10 != NULL);
87     if XNN_UNPREDICTABLE(i10 != zero) {
88       i10 = (const float*) ((uintptr_t) i10 + input_offset);
89     }
90     const float* i11 = input[11];
91     assert(i11 != NULL);
92     if XNN_UNPREDICTABLE(i11 != zero) {
93       i11 = (const float*) ((uintptr_t) i11 + input_offset);
94     }
95     const float* i12 = input[12];
96     assert(i12 != NULL);
97     if XNN_UNPREDICTABLE(i12 != zero) {
98       i12 = (const float*) ((uintptr_t) i12 + input_offset);
99     }
100     const float* i13 = input[13];
101     assert(i13 != NULL);
102     if XNN_UNPREDICTABLE(i13 != zero) {
103       i13 = (const float*) ((uintptr_t) i13 + input_offset);
104     }
105     const float* i14 = input[14];
106     assert(i14 != NULL);
107     if XNN_UNPREDICTABLE(i14 != zero) {
108       i14 = (const float*) ((uintptr_t) i14 + input_offset);
109     }
110     const float* i15 = input[15];
111     assert(i15 != NULL);
112     if XNN_UNPREDICTABLE(i15 != zero) {
113       i15 = (const float*) ((uintptr_t) i15 + input_offset);
114     }
115     const float* i16 = input[16];
116     assert(i16 != NULL);
117     if XNN_UNPREDICTABLE(i16 != zero) {
118       i16 = (const float*) ((uintptr_t) i16 + input_offset);
119     }
120     const float* i17 = input[17];
121     assert(i17 != NULL);
122     if XNN_UNPREDICTABLE(i17 != zero) {
123       i17 = (const float*) ((uintptr_t) i17 + input_offset);
124     }
125     const float* i18 = input[18];
126     assert(i18 != NULL);
127     if XNN_UNPREDICTABLE(i18 != zero) {
128       i18 = (const float*) ((uintptr_t) i18 + input_offset);
129     }
130     const float* i19 = input[19];
131     assert(i19 != NULL);
132     if XNN_UNPREDICTABLE(i19 != zero) {
133       i19 = (const float*) ((uintptr_t) i19 + input_offset);
134     }
135     const float* i20 = input[20];
136     assert(i20 != NULL);
137     if XNN_UNPREDICTABLE(i20 != zero) {
138       i20 = (const float*) ((uintptr_t) i20 + input_offset);
139     }
140     const float* i21 = input[21];
141     assert(i21 != NULL);
142     if XNN_UNPREDICTABLE(i21 != zero) {
143       i21 = (const float*) ((uintptr_t) i21 + input_offset);
144     }
145     const float* i22 = input[22];
146     assert(i22 != NULL);
147     if XNN_UNPREDICTABLE(i22 != zero) {
148       i22 = (const float*) ((uintptr_t) i22 + input_offset);
149     }
150     const float* i23 = input[23];
151     assert(i23 != NULL);
152     if XNN_UNPREDICTABLE(i23 != zero) {
153       i23 = (const float*) ((uintptr_t) i23 + input_offset);
154     }
155     const float* i24 = input[24];
156     assert(i24 != NULL);
157     if XNN_UNPREDICTABLE(i24 != zero) {
158       i24 = (const float*) ((uintptr_t) i24 + input_offset);
159     }
160     input = (const float**) ((uintptr_t) input + input_stride);
161 
162     size_t c = channels;
163     const float* w = weights;
164     for (; c >= 8; c -= 8) {
165       __m256 vacc01234567p0 = _mm256_load_ps(w);
166 
167 
168       const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
169       i0 += 8;
170 
171       const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
172       vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
173 
174       const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
175       i1 += 8;
176 
177       const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
178       vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
179 
180       const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
181       i2 += 8;
182 
183       const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
184       vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
185 
186       const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
187       i3 += 8;
188 
189       const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
190       vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
191 
192       const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
193       i4 += 8;
194 
195       const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
196       vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
197 
198       const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
199       i5 += 8;
200 
201       const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
202       vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
203 
204       const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
205       i6 += 8;
206 
207       const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
208       vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
209 
210       const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
211       i7 += 8;
212 
213       const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
214       vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
215 
216       const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
217       i8 += 8;
218 
219       const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
220       vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
221 
222       const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
223       i9 += 8;
224 
225       const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
226       vacc01234567p0 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p0);
227 
228       const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
229       i10 += 8;
230 
231       const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
232       vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
233 
234       const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
235       i11 += 8;
236 
237       const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
238       vacc01234567p0 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p0);
239 
240       const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
241       i12 += 8;
242 
243       const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
244       vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
245 
246       const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
247       i13 += 8;
248 
249       const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
250       vacc01234567p0 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p0);
251 
252       const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
253       i14 += 8;
254 
255       const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
256       vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
257 
258       const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
259       i15 += 8;
260 
261       const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
262       vacc01234567p0 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p0);
263 
264       const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
265       i16 += 8;
266 
267       const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
268       vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
269 
270       const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
271       i17 += 8;
272 
273       const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
274       vacc01234567p0 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p0);
275 
276       const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
277       i18 += 8;
278 
279       const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
280       vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
281 
282       const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
283       i19 += 8;
284 
285       const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
286       vacc01234567p0 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p0);
287 
288       const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
289       i20 += 8;
290 
291       const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
292       vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
293 
294       const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
295       i21 += 8;
296 
297       const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
298       vacc01234567p0 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p0);
299 
300       const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
301       i22 += 8;
302 
303       const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
304       vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
305 
306       const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
307       i23 += 8;
308 
309       const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
310       vacc01234567p0 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p0);
311 
312       const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
313       i24 += 8;
314 
315       const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
316       vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
317 
318       w += 208;
319 
320 
321       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
322       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
323 
324       _mm256_storeu_ps(output, vacc01234567);
325       output += 8;
326     }
327     if XNN_UNLIKELY(c != 0) {
328       assert(c >= 1);
329       assert(c <= 7);
330       const __m256i vmask = _mm256_loadu_si256((const __m256i*) &params->avx.mask_table[7 - c]);
331 
332       __m256 vacc01234567p0 = _mm256_load_ps(w);
333 
334       const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
335       const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
336       vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
337 
338       const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
339       const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
340       vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
341 
342       const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
343       const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
344       vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
345 
346       const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
347       const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
348       vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
349 
350       const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
351       const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
352       vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
353 
354       const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
355       const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
356       vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
357 
358       const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
359       const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
360       vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
361 
362       const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
363       const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
364       vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
365 
366       const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
367       const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
368       vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
369 
370       const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
371       const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
372       vacc01234567p0 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p0);
373 
374       const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
375       const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
376       vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
377 
378       const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
379       const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
380       vacc01234567p0 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p0);
381 
382       const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
383       const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
384       vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
385 
386       const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
387       const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
388       vacc01234567p0 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p0);
389 
390       const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
391       const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
392       vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
393 
394       const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
395       const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
396       vacc01234567p0 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p0);
397 
398       const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
399       const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
400       vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
401 
402       const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
403       const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
404       vacc01234567p0 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p0);
405 
406       const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
407       const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
408       vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
409 
410       const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
411       const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
412       vacc01234567p0 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p0);
413 
414       const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
415       const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
416       vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
417 
418       const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
419       const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
420       vacc01234567p0 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p0);
421 
422       const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
423       const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
424       vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
425 
426       const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
427       const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
428       vacc01234567p0 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p0);
429 
430       const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
431       const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
432       vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
433 
434 
435       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
436       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
437 
438       __m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
439       if (c & 4) {
440         _mm_storeu_ps(output, vacc0123);
441         vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
442         output += 4;
443       }
444       if (c & 2) {
445         _mm_storel_pi((__m64*) output, vacc0123);
446         vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
447         output += 2;
448       }
449       if (c & 1) {
450         _mm_store_ss(output, vacc0123);
451         output += 1;
452       }
453     }
454 
455     output = (float*) ((uintptr_t) output + output_increment);
456   } while (--output_width != 0);
457 }
458