1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv/up-avx512.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f32_dwconv_ukernel_up16x9__avx512f_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,const union xnn_f32_output_params params[restrict static1])18 void xnn_f32_dwconv_ukernel_up16x9__avx512f_acc2(
19 size_t channels,
20 size_t output_width,
21 const float** input,
22 const float* weights,
23 float* output,
24 size_t input_stride,
25 size_t output_increment,
26 const union xnn_f32_output_params params[restrict static 1])
27 {
28 assert(channels != 0);
29 assert(output_width != 0);
30
31 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
32 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
33 do {
34 const float* i0 = input[0];
35 assert(i0 != NULL);
36 const float* i1 = input[1];
37 assert(i1 != NULL);
38 const float* i2 = input[2];
39 assert(i2 != NULL);
40 const float* i3 = input[3];
41 assert(i3 != NULL);
42 const float* i4 = input[4];
43 assert(i4 != NULL);
44 const float* i5 = input[5];
45 assert(i5 != NULL);
46 const float* i6 = input[6];
47 assert(i6 != NULL);
48 const float* i7 = input[7];
49 assert(i7 != NULL);
50 const float* i8 = input[8];
51 assert(i8 != NULL);
52 input = (const float**) ((uintptr_t) input + input_stride);
53
54 size_t c = channels;
55 const float* w = weights;
56 for (; c >= 16; c -= 16) {
57 __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
58
59
60 const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
61 i0 += 16;
62
63 const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
64 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
65
66 const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
67 i1 += 16;
68
69 const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
70 __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
71
72 const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
73 i2 += 16;
74
75 const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
76 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
77
78 const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
79 i3 += 16;
80
81 const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
82 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
83
84 const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
85 i4 += 16;
86
87 const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
88 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
89
90 const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
91 i5 += 16;
92
93 const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
94 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
95
96 const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
97 i6 += 16;
98
99 const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
100 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
101
102 const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
103 i7 += 16;
104
105 const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
106 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
107
108 const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
109 i8 += 16;
110
111 const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
112 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
113
114 w += 160;
115
116 // Add up all accumulators to vacc0123456789ABCDEFp0
117 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
118
119 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
120 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
121
122 _mm512_storeu_ps(output, vacc0123456789ABCDEF);
123 output += 16;
124 }
125 if XNN_UNLIKELY(c != 0) {
126 assert(c >= 1);
127 assert(c <= 16);
128 // Prepare mask for valid 32-bit elements (depends on nc).
129 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
130
131 __m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
132
133 const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
134 const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
135 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
136
137 const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
138 const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
139 __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
140
141 const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
142 const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
143 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
144
145 const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
146 const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
147 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
148
149 const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
150 const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80);
151 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
152
153 const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
154 const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
155 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
156
157 const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
158 const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112);
159 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
160
161 const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
162 const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
163 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
164
165 const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
166 const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144);
167 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
168
169 // Add up all accumulators to vacc0123456789ABCDEFp0
170 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
171
172 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
173 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
174
175 _mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
176 output += c;
177 }
178
179 output = (float*) ((uintptr_t) output + output_increment);
180 } while (--output_width != 0);
181 }
182