1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top == 1);
32
33 const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
34 const __m128 vmax = _mm_load_ps(params->sse.max);
35 const __m128 vmin = _mm_load_ps(params->sse.min);
36
37 const __m128 vbias = _mm_load1_ps(weights);
38 const __m128 vk00 = _mm_load1_ps(weights + 1);
39 const __m128 vk01 = _mm_load1_ps(weights + 2);
40 const __m128 vk02 = _mm_load1_ps(weights + 3);
41 const __m128 vk10 = _mm_load1_ps(weights + 4);
42 const __m128 vk11 = _mm_load1_ps(weights + 5);
43 const __m128 vk12 = _mm_load1_ps(weights + 6);
44 const __m128 vk20 = _mm_load1_ps(weights + 7);
45 const __m128 vk21 = _mm_load1_ps(weights + 8);
46 const __m128 vk22 = _mm_load1_ps(weights + 9);
47
48 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
49
50 const float* i0 = zero;
51 const float* i1 = input;
52 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
53 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
54
55 float* o0 = output;
56 float* o1 = (float*) ((uintptr_t) o0 + input_width);
57
58 size_t output_height = input_height;
59 do {
60 if XNN_UNPREDICTABLE(output_height < 2) {
61 i2 = zero;
62 o1 = o0;
63 }
64 if XNN_UNPREDICTABLE(output_height < 3) {
65 i3 = zero;
66 }
67
68 // vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
69 __m128 vi0x3012 = _mm_setzero_ps();
70 // vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
71 __m128 vi1x3012 = _mm_setzero_ps();
72 // vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
73 __m128 vi2x3012 = _mm_setzero_ps();
74 // vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
75 __m128 vi3x3012 = _mm_setzero_ps();
76
77 __m128 vi0x4567 = _mm_loadu_ps(i0);
78 i0 += 4;
79 __m128 vi1x4567 = _mm_loadu_ps(i1);
80 i1 += 4;
81 __m128 vi2x4567 = _mm_loadu_ps(i2);
82 i2 += 4;
83 __m128 vi3x4567 = _mm_loadu_ps(i3);
84 i3 += 4;
85
86 size_t w = input_width;
87 for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
88 // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
89 const __m128 vi0x89AB = _mm_loadu_ps(i0);
90 i0 += 4;
91 // vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
92 const __m128 vi1x89AB = _mm_loadu_ps(i1);
93 i1 += 4;
94 // vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
95 const __m128 vi2x89AB = _mm_loadu_ps(i2);
96 i2 += 4;
97 // vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
98 const __m128 vi3x89AB = _mm_loadu_ps(i3);
99 i3 += 4;
100
101 // vi0x7456 = ( vi06, vi05, vi04, vi07 )
102 const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
103 // vi1x7456 = ( vi16, vi15, vi14, vi17 )
104 const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
105 // vi2x7456 = ( vi26, vi25, vi24, vi27 )
106 const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
107 // vi3x7456 = ( vi36, vi35, vi34, vi37 )
108 const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
109
110 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
111 __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
112 __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
113 __m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
114 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
115 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
116
117 // vi0x3456 = ( vi06, vi05, vi04, vi03 )
118 const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
119 // vi1x3456 = ( vi16, vi15, vi14, vi13 )
120 const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
121 // vi2x3456 = ( vi26, vi25, vi24, vi23 )
122 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
123 // vi3x3456 = ( vi36, vi35, vi34, vi33 )
124 const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
125
126 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
127 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
128 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
129 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
130 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
131 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
132
133 vi0x3012 = vi0x7456;
134 vi1x3012 = vi1x7456;
135 vi2x3012 = vi2x7456;
136 vi3x3012 = vi3x7456;
137
138 // vi0x8567 = ( vi07, vi06, vi05, vi08 )
139 const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
140 // vi1x8567 = ( vi17, vi16, vi15, vi18 )
141 const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
142 // vi2x8567 = ( vi27, vi26, vi25, vi28 )
143 const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
144 // vi3x8567 = ( vi37, vi36, vi35, vi38 )
145 const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
146
147 // vi0x5678 = ( vi08, vi07, vi06, vi05 )
148 const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
149 // vi1x5678 = ( vi18, vi17, vi16, vi15 )
150 const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
151 // vi2x5678 = ( vi28, vi27, vi26, vi25 )
152 const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
153 // vi3x5678 = ( vi38, vi37, vi36, vi35 )
154 const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
155
156 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
157 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
158 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
159 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
160 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
161 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
162
163 vi0x4567 = vi0x89AB;
164 vi1x4567 = vi1x89AB;
165 vi2x4567 = vi2x89AB;
166 vi3x4567 = vi3x89AB;
167
168 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
169 vo1p0 = _mm_add_ps(vo1p0, vo1p1);
170
171 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
172 __m128 vo1 = _mm_max_ps(vo1p0, vmin);
173
174 vo0 = _mm_min_ps(vo0, vmax);
175 vo1 = _mm_min_ps(vo1, vmax);
176
177 _mm_storeu_ps(o1, vo1);
178 o1 += 4;
179 _mm_storeu_ps(o0, vo0);
180 o0 += 4;
181 }
182 // Always process the last block of 1..4 pixels.
183 assert(w >= 1 * sizeof(float));
184 assert(w <= 4 * sizeof(float));
185 {
186 vi0x4567 = _mm_and_ps(vmask, vi0x4567);
187 vi1x4567 = _mm_and_ps(vmask, vi1x4567);
188 vi2x4567 = _mm_and_ps(vmask, vi2x4567);
189 vi3x4567 = _mm_and_ps(vmask, vi3x4567);
190
191 // vi0x7456 = ( vi06, vi05, vi04, vi07 )
192 const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
193 // vi1x7456 = ( vi16, vi15, vi14, vi17 )
194 const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
195 // vi2x7456 = ( vi26, vi25, vi24, vi27 )
196 const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
197 // vi3x7456 = ( vi36, vi35, vi34, vi37 )
198 const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
199
200 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
201 __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
202 __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
203 __m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
204 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
205 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
206
207 // vi0x3456 = ( vi06, vi05, vi04, vi03 )
208 const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
209 // vi1x3456 = ( vi16, vi15, vi14, vi13 )
210 const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
211 // vi2x3456 = ( vi26, vi25, vi24, vi23 )
212 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
213 // vi3x3456 = ( vi36, vi35, vi34, vi33 )
214 const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
215
216 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
217 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
218 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
219 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
220 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
221 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
222
223 const __m128 vzero = _mm_setzero_ps();
224 // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
225 const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
226 // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
227 const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
228 // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
229 const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
230 // vi3x8567 = ( vi37, vi36, vi35, 0.0 )
231 const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
232
233 // vi0x5678 = ( vi08, vi07, vi06, vi05 )
234 const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
235 // vi1x5678 = ( vi18, vi17, vi16, vi15 )
236 const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
237 // vi2x5678 = ( vi28, vi27, vi26, vi25 )
238 const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
239 // vi3x5678 = ( vi38, vi37, vi36, vi35 )
240 const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
241
242 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
243 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
244 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
245 vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
246 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
247 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
248
249 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
250 vo1p0 = _mm_add_ps(vo1p0, vo1p1);
251
252 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
253 __m128 vo1 = _mm_max_ps(vo1p0, vmin);
254
255 vo0 = _mm_min_ps(vo0, vmax);
256 vo1 = _mm_min_ps(vo1, vmax);
257
258 if XNN_LIKELY(w == 4 * sizeof(float)) {
259 _mm_storeu_ps(o1, vo1);
260 o1 += 4;
261 _mm_storeu_ps(o0, vo0);
262 o0 += 4;
263 } else {
264 if (w & (2 * sizeof(float))) {
265 _mm_storel_pi((__m64*) o1, vo1);
266 o1 += 2;
267 _mm_storel_pi((__m64*) o0, vo0);
268 o0 += 2;
269
270 vo0 = _mm_movehl_ps(vo0, vo0);
271 vo1 = _mm_movehl_ps(vo1, vo1);
272 }
273 if (w & (1 * sizeof(float))) {
274 _mm_store_ss(o1, vo1);
275 o1 += 1;
276 _mm_store_ss(o0, vo0);
277 o0 += 1;
278 }
279 }
280 }
281
282 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
283 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
284 i2 = (const float*) ((uintptr_t) i1 + input_width);
285 i3 = (const float*) ((uintptr_t) i2 + input_width);
286
287 o0 = o1;
288 o1 = (float*) ((uintptr_t) o0 + input_width);
289
290 output_height = doz(output_height, 2);
291 } while (output_height != 0);
292 }
293