• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_ukernel_up4x25__sse_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_dwconv_ukernel_up4x25__sse_acc2(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     const union xnn_f32_output_params params[restrict static 1])
26 {
27   assert(channels != 0);
28   assert(output_width != 0);
29 
30   const __m128 vmax = _mm_load_ps(params->sse.max);
31   const __m128 vmin = _mm_load_ps(params->sse.min);
32   do {
33     const float* i0 = input[0];
34     assert(i0 != NULL);
35     const float* i1 = input[1];
36     assert(i1 != NULL);
37     const float* i2 = input[2];
38     assert(i2 != NULL);
39     const float* i3 = input[3];
40     assert(i3 != NULL);
41     const float* i4 = input[4];
42     assert(i4 != NULL);
43     const float* i5 = input[5];
44     assert(i5 != NULL);
45     const float* i6 = input[6];
46     assert(i6 != NULL);
47     const float* i7 = input[7];
48     assert(i7 != NULL);
49     const float* i8 = input[8];
50     assert(i8 != NULL);
51     const float* i9 = input[9];
52     assert(i9 != NULL);
53     const float* i10 = input[10];
54     assert(i10 != NULL);
55     const float* i11 = input[11];
56     assert(i11 != NULL);
57     const float* i12 = input[12];
58     assert(i12 != NULL);
59     const float* i13 = input[13];
60     assert(i13 != NULL);
61     const float* i14 = input[14];
62     assert(i14 != NULL);
63     const float* i15 = input[15];
64     assert(i15 != NULL);
65     const float* i16 = input[16];
66     assert(i16 != NULL);
67     const float* i17 = input[17];
68     assert(i17 != NULL);
69     const float* i18 = input[18];
70     assert(i18 != NULL);
71     const float* i19 = input[19];
72     assert(i19 != NULL);
73     const float* i20 = input[20];
74     assert(i20 != NULL);
75     const float* i21 = input[21];
76     assert(i21 != NULL);
77     const float* i22 = input[22];
78     assert(i22 != NULL);
79     const float* i23 = input[23];
80     assert(i23 != NULL);
81     const float* i24 = input[24];
82     assert(i24 != NULL);
83     input = (const float**) ((uintptr_t) input + input_stride);
84 
85     size_t c = channels;
86     const float* w = weights;
87     for (; c >= 4; c -= 4) {
88       __m128 vacc0123p0 = _mm_load_ps(w);
89 
90 
91       const __m128 vi0x0123 = _mm_loadu_ps(i0);
92       i0 += 4;
93 
94       const __m128 vk0x0123 = _mm_load_ps(w + 4);
95       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
96 
97       const __m128 vi1x0123 = _mm_loadu_ps(i1);
98       i1 += 4;
99 
100       const __m128 vk1x0123 = _mm_load_ps(w + 8);
101       __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
102 
103       const __m128 vi2x0123 = _mm_loadu_ps(i2);
104       i2 += 4;
105 
106       const __m128 vk2x0123 = _mm_load_ps(w + 12);
107       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
108 
109       const __m128 vi3x0123 = _mm_loadu_ps(i3);
110       i3 += 4;
111 
112       const __m128 vk3x0123 = _mm_load_ps(w + 16);
113       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
114 
115       const __m128 vi4x0123 = _mm_loadu_ps(i4);
116       i4 += 4;
117 
118       const __m128 vk4x0123 = _mm_load_ps(w + 20);
119       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
120 
121       const __m128 vi5x0123 = _mm_loadu_ps(i5);
122       i5 += 4;
123 
124       const __m128 vk5x0123 = _mm_load_ps(w + 24);
125       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
126 
127       const __m128 vi6x0123 = _mm_loadu_ps(i6);
128       i6 += 4;
129 
130       const __m128 vk6x0123 = _mm_load_ps(w + 28);
131       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
132 
133       const __m128 vi7x0123 = _mm_loadu_ps(i7);
134       i7 += 4;
135 
136       const __m128 vk7x0123 = _mm_load_ps(w + 32);
137       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
138 
139       const __m128 vi8x0123 = _mm_loadu_ps(i8);
140       i8 += 4;
141 
142       const __m128 vk8x0123 = _mm_load_ps(w + 36);
143       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
144 
145       const __m128 vi9x0123 = _mm_loadu_ps(i9);
146       i9 += 4;
147 
148       const __m128 vk9x0123 = _mm_load_ps(w + 40);
149       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
150 
151       const __m128 vi10x0123 = _mm_loadu_ps(i10);
152       i10 += 4;
153 
154       const __m128 vk10x0123 = _mm_load_ps(w + 44);
155       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
156 
157       const __m128 vi11x0123 = _mm_loadu_ps(i11);
158       i11 += 4;
159 
160       const __m128 vk11x0123 = _mm_load_ps(w + 48);
161       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
162 
163       const __m128 vi12x0123 = _mm_loadu_ps(i12);
164       i12 += 4;
165 
166       const __m128 vk12x0123 = _mm_load_ps(w + 52);
167       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
168 
169       const __m128 vi13x0123 = _mm_loadu_ps(i13);
170       i13 += 4;
171 
172       const __m128 vk13x0123 = _mm_load_ps(w + 56);
173       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
174 
175       const __m128 vi14x0123 = _mm_loadu_ps(i14);
176       i14 += 4;
177 
178       const __m128 vk14x0123 = _mm_load_ps(w + 60);
179       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
180 
181       const __m128 vi15x0123 = _mm_loadu_ps(i15);
182       i15 += 4;
183 
184       const __m128 vk15x0123 = _mm_load_ps(w + 64);
185       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
186 
187       const __m128 vi16x0123 = _mm_loadu_ps(i16);
188       i16 += 4;
189 
190       const __m128 vk16x0123 = _mm_load_ps(w + 68);
191       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
192 
193       const __m128 vi17x0123 = _mm_loadu_ps(i17);
194       i17 += 4;
195 
196       const __m128 vk17x0123 = _mm_load_ps(w + 72);
197       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
198 
199       const __m128 vi18x0123 = _mm_loadu_ps(i18);
200       i18 += 4;
201 
202       const __m128 vk18x0123 = _mm_load_ps(w + 76);
203       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
204 
205       const __m128 vi19x0123 = _mm_loadu_ps(i19);
206       i19 += 4;
207 
208       const __m128 vk19x0123 = _mm_load_ps(w + 80);
209       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
210 
211       const __m128 vi20x0123 = _mm_loadu_ps(i20);
212       i20 += 4;
213 
214       const __m128 vk20x0123 = _mm_load_ps(w + 84);
215       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
216 
217       const __m128 vi21x0123 = _mm_loadu_ps(i21);
218       i21 += 4;
219 
220       const __m128 vk21x0123 = _mm_load_ps(w + 88);
221       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
222 
223       const __m128 vi22x0123 = _mm_loadu_ps(i22);
224       i22 += 4;
225 
226       const __m128 vk22x0123 = _mm_load_ps(w + 92);
227       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
228 
229       const __m128 vi23x0123 = _mm_loadu_ps(i23);
230       i23 += 4;
231 
232       const __m128 vk23x0123 = _mm_load_ps(w + 96);
233       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
234 
235       const __m128 vi24x0123 = _mm_loadu_ps(i24);
236       i24 += 4;
237 
238       const __m128 vk24x0123 = _mm_load_ps(w + 100);
239       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
240 
241       w += 104;
242 
243       // Add up all accumulators to vacc0123p0
244       vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
245 
246       __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
247       vacc0123 = _mm_min_ps(vacc0123, vmax);
248 
249       _mm_storeu_ps(output, vacc0123);
250       output += 4;
251     }
252     if XNN_UNLIKELY(c != 0) {
253       __m128 vacc0123p0 = _mm_load_ps(w);
254 
255       const __m128 vi0x0123 = _mm_loadu_ps(i0);
256       const __m128 vk0x0123 = _mm_load_ps(w + 4);
257       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
258 
259       const __m128 vi1x0123 = _mm_loadu_ps(i1);
260       const __m128 vk1x0123 = _mm_load_ps(w + 8);
261       __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
262 
263       const __m128 vi2x0123 = _mm_loadu_ps(i2);
264       const __m128 vk2x0123 = _mm_load_ps(w + 12);
265       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
266 
267       const __m128 vi3x0123 = _mm_loadu_ps(i3);
268       const __m128 vk3x0123 = _mm_load_ps(w + 16);
269       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
270 
271       const __m128 vi4x0123 = _mm_loadu_ps(i4);
272       const __m128 vk4x0123 = _mm_load_ps(w + 20);
273       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
274 
275       const __m128 vi5x0123 = _mm_loadu_ps(i5);
276       const __m128 vk5x0123 = _mm_load_ps(w + 24);
277       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
278 
279       const __m128 vi6x0123 = _mm_loadu_ps(i6);
280       const __m128 vk6x0123 = _mm_load_ps(w + 28);
281       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
282 
283       const __m128 vi7x0123 = _mm_loadu_ps(i7);
284       const __m128 vk7x0123 = _mm_load_ps(w + 32);
285       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
286 
287       const __m128 vi8x0123 = _mm_loadu_ps(i8);
288       const __m128 vk8x0123 = _mm_load_ps(w + 36);
289       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
290 
291       const __m128 vi9x0123 = _mm_loadu_ps(i9);
292       const __m128 vk9x0123 = _mm_load_ps(w + 40);
293       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
294 
295       const __m128 vi10x0123 = _mm_loadu_ps(i10);
296       const __m128 vk10x0123 = _mm_load_ps(w + 44);
297       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
298 
299       const __m128 vi11x0123 = _mm_loadu_ps(i11);
300       const __m128 vk11x0123 = _mm_load_ps(w + 48);
301       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
302 
303       const __m128 vi12x0123 = _mm_loadu_ps(i12);
304       const __m128 vk12x0123 = _mm_load_ps(w + 52);
305       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
306 
307       const __m128 vi13x0123 = _mm_loadu_ps(i13);
308       const __m128 vk13x0123 = _mm_load_ps(w + 56);
309       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
310 
311       const __m128 vi14x0123 = _mm_loadu_ps(i14);
312       const __m128 vk14x0123 = _mm_load_ps(w + 60);
313       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
314 
315       const __m128 vi15x0123 = _mm_loadu_ps(i15);
316       const __m128 vk15x0123 = _mm_load_ps(w + 64);
317       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
318 
319       const __m128 vi16x0123 = _mm_loadu_ps(i16);
320       const __m128 vk16x0123 = _mm_load_ps(w + 68);
321       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
322 
323       const __m128 vi17x0123 = _mm_loadu_ps(i17);
324       const __m128 vk17x0123 = _mm_load_ps(w + 72);
325       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
326 
327       const __m128 vi18x0123 = _mm_loadu_ps(i18);
328       const __m128 vk18x0123 = _mm_load_ps(w + 76);
329       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
330 
331       const __m128 vi19x0123 = _mm_loadu_ps(i19);
332       const __m128 vk19x0123 = _mm_load_ps(w + 80);
333       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
334 
335       const __m128 vi20x0123 = _mm_loadu_ps(i20);
336       const __m128 vk20x0123 = _mm_load_ps(w + 84);
337       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
338 
339       const __m128 vi21x0123 = _mm_loadu_ps(i21);
340       const __m128 vk21x0123 = _mm_load_ps(w + 88);
341       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
342 
343       const __m128 vi22x0123 = _mm_loadu_ps(i22);
344       const __m128 vk22x0123 = _mm_load_ps(w + 92);
345       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
346 
347       const __m128 vi23x0123 = _mm_loadu_ps(i23);
348       const __m128 vk23x0123 = _mm_load_ps(w + 96);
349       vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
350 
351       const __m128 vi24x0123 = _mm_loadu_ps(i24);
352       const __m128 vk24x0123 = _mm_load_ps(w + 100);
353       vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
354 
355       // Add up all accumulators to vacc0123p0
356       vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
357 
358       __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
359       vacc0123 = _mm_min_ps(vacc0123, vmax);
360 
361       if (c & 2) {
362         _mm_storel_pi((__m64*) output, vacc0123);
363         vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
364         output += 2;
365       }
366       if (c & 1) {
367         _mm_store_ss(output, vacc0123);
368         output += 1;
369       }
370     }
371 
372     output = (float*) ((uintptr_t) output + output_increment);
373   } while (--output_width != 0);
374 }
375