• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <xmmintrin.h>
9 
10 #include <xnnpack/dwconv.h>
11 #include <xnnpack/math.h>
12 
13 
xnn_f32_dwconv_spchw_ukernel_3x3p1__sse(size_t m,size_t n,const float * input,const float * weights,float * output,size_t input_tuple_stride,size_t output_tuple_stride,size_t input_width_stride,size_t output_width_stride,const union xnn_f32_spchw_params params[restrict static1])14 void xnn_f32_dwconv_spchw_ukernel_3x3p1__sse(
15     size_t m,
16     size_t n,
17     const float* input,
18     const float* weights,
19     float* output,
20     size_t input_tuple_stride,
21     size_t output_tuple_stride,
22     size_t input_width_stride,
23     size_t output_width_stride,
24     const union xnn_f32_spchw_params params[restrict static 1])
25 {
26   assert(n != 0);
27 
28   const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
29   const __m128 vmax = _mm_load_ps(params->sse.max);
30   const __m128 vmin = _mm_load_ps(params->sse.min);
31 
32   const size_t input_width_increment = input_width_stride - round_up_po2(n, 4) / 4 * input_tuple_stride;
33   const size_t output_width_increment = output_width_stride - (n - 1) / 4 * output_tuple_stride;
34 
35   // No vertical padding.
36   const float* i0 = input;
37   const float* i1 = (const float*) ((uintptr_t) i0 + input_width_stride);
38   const float* i2 = (const float*) ((uintptr_t) i1 + input_width_stride);
39 
40   const __m128 vbias = _mm_load1_ps(weights);
41   const __m128 vk00 = _mm_load1_ps(weights + 1);
42   const __m128 vk01 = _mm_load1_ps(weights + 2);
43   const __m128 vk02 = _mm_load1_ps(weights + 3);
44   const __m128 vk10 = _mm_load1_ps(weights + 4);
45   const __m128 vk11 = _mm_load1_ps(weights + 5);
46   const __m128 vk12 = _mm_load1_ps(weights + 6);
47   const __m128 vk20 = _mm_load1_ps(weights + 7);
48   const __m128 vk21 = _mm_load1_ps(weights + 8);
49   const __m128 vk22 = _mm_load1_ps(weights + 9);
50 
51   do {
52     // vi0x3012 = ( vi02, vi01, vi00, vi03 )
53     __m128 vi0x3012 = _mm_setzero_ps();
54     // vi1x3012 = ( vi12, vi11, vi10, vi13 )
55     __m128 vi1x3012 = _mm_setzero_ps();
56     // vi2x3012 = ( vi22, vi21, vi20, vi13 )
57     __m128 vi2x3012 = _mm_setzero_ps();
58     // vi0x4567 = ( vi07, vi06, vi05, vi04 )
59     __m128 vi0x4567 = _mm_loadu_ps(i0);
60     i0 = (const float*) ((uintptr_t) i0 + input_tuple_stride);
61     // vi1x4567 = ( vi17, vi16, vi15, vi14 )
62     __m128 vi1x4567 = _mm_loadu_ps(i1);
63     i1 = (const float*) ((uintptr_t) i1 + input_tuple_stride);
64     // vi2x4567 = ( vi27, vi26, vi25, vi24 )
65     __m128 vi2x4567 = _mm_loadu_ps(i2);
66     i2 = (const float*) ((uintptr_t) i2 + input_tuple_stride);
67 
68     size_t k = n;
69     for (; k > 4; k -= 4) {
70       __m128 vo4567p0 = vbias;
71 
72       // vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
73       const __m128 vi0x89AB = _mm_loadu_ps(i0);
74       i0 = (const float*) ((uintptr_t) i0 + input_tuple_stride);
75       // vi1x89AB = ( vi1B, vi0A, vi09, vi08 )
76       const __m128 vi1x89AB = _mm_loadu_ps(i1);
77       i1 = (const float*) ((uintptr_t) i1 + input_tuple_stride);
78       // vi2x89AB = ( vi2B, vi0A, vi09, vi08 )
79       const __m128 vi2x89AB = _mm_loadu_ps(i2);
80       i2 = (const float*) ((uintptr_t) i2 + input_tuple_stride);
81 
82       // vi0x7456 = ( vi06, vi05, vi04, vi07 )
83       const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
84       // vi1x7456 = ( vi16, vi15, vi14, vi17 )
85       const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
86       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
87       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
88 
89       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
90       __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
91       __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
92 
93       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
94       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
95       // vi1x3456 = ( vi16, vi15, vi14, vi13 )
96       const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
97       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
98       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
99 
100       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
101       vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
102       vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
103 
104       vi0x3012 = vi0x7456;
105       vi1x3012 = vi1x7456;
106       vi2x3012 = vi2x7456;
107 
108       // vi0x8567 = ( vi07, vi06, vi05, vi08 )
109       const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
110       // vi1x8567 = ( vi17, vi16, vi15, vi18 )
111       const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
112       // vi2x8567 = ( vi27, vi26, vi25, vi28 )
113       const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
114 
115       // vi0x5678 = ( vi08, vi07, vi06, vi05 )
116       const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
117       // vi1x5678 = ( vi18, vi17, vi16, vi15 )
118       const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
119       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
120       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
121 
122       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
123       vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
124       vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
125 
126       vi0x4567 = vi0x89AB;
127       vi1x4567 = vi1x89AB;
128       vi2x4567 = vi2x89AB;
129 
130       __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
131       vo = _mm_add_ps(vo, vo4567p2);
132 
133       vo = _mm_max_ps(vo, vmin);
134       vo = _mm_min_ps(vo, vmax);
135 
136       _mm_storeu_ps(output, vo);
137       output = (float*) ((uintptr_t) output + output_tuple_stride);
138     }
139     // Always process the last block of 1..4 pixels.
140     assert(k >= 1);
141     assert(k <= 4);
142     {
143       __m128 vo4567p0 = vbias;
144 
145       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
146       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
147       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
148 
149       // vi0x7456 = ( vi06, vi05, vi04, vi07 )
150       const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
151       // vi1x7456 = ( vi16, vi15, vi14, vi17 )
152       const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
153       // vi2x7456 = ( vi26, vi25, vi24, vi27 )
154       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
155 
156       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x4567, vk01));
157       __m128 vo4567p1 = _mm_mul_ps(vi1x4567, vk11);
158       __m128 vo4567p2 = _mm_mul_ps(vi2x4567, vk21);
159 
160       // vi0x3456 = ( vi06, vi05, vi04, vi03 )
161       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
162       // vi1x3456 = ( vi16, vi15, vi14, vi13 )
163       const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
164       // vi2x3456 = ( vi26, vi25, vi24, vi23 )
165       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
166 
167       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x3456, vk00));
168       vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x3456, vk10));
169       vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x3456, vk20));
170 
171       const __m128 vzero = _mm_setzero_ps();
172       // vi0x8567 = ( vi07, vi06, vi05, 0.0 )
173       const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
174       // vi1x8567 = ( vi17, vi16, vi15, 0.0 )
175       const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
176       // vi2x8567 = ( vi27, vi26, vi25, 0.0 )
177       const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
178 
179       // vi0x5678 = ( vi08, vi07, vi06, vi05 )
180       const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
181       // vi1x5678 = ( vi18, vi17, vi16, vi15 )
182       const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
183       // vi2x5678 = ( vi28, vi27, vi26, vi25 )
184       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
185 
186       vo4567p0 = _mm_add_ps(vo4567p0, _mm_mul_ps(vi0x5678, vk02));
187       vo4567p1 = _mm_add_ps(vo4567p1, _mm_mul_ps(vi1x5678, vk12));
188       vo4567p2 = _mm_add_ps(vo4567p2, _mm_mul_ps(vi2x5678, vk22));
189 
190       __m128 vo = _mm_add_ps(vo4567p0, vo4567p1);
191       vo = _mm_add_ps(vo, vo4567p2);
192 
193       vo = _mm_max_ps(vo, vmin);
194       vo = _mm_min_ps(vo, vmax);
195 
196       if XNN_LIKELY(k == 4) {
197         _mm_storeu_ps(output, vo);
198       } else {
199         float* output_lo = output;
200         if (k & 2) {
201           _mm_storel_pi((__m64*) output_lo, vo);
202           output_lo += 2;
203           vo = _mm_movehl_ps(vo, vo);
204         }
205         if (k & 1) {
206           _mm_store_ss(output_lo, vo);
207         }
208       }
209     }
210 
211     i0 = (const float*) ((uintptr_t) i0 + input_width_increment);
212     i1 = (const float*) ((uintptr_t) i1 + input_width_increment);
213     i2 = (const float*) ((uintptr_t) i2 + input_width_increment);
214     output = (float*) ((uintptr_t) output + output_width_increment);
215   } while (--m != 0);
216 }
217