• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-ppmm/sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/ppmm.h>
15 
16 
xnn_f32_ppmm_minmax_ukernel_4x8__sse(size_t mr,size_t nc,size_t kc,const float * restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_ppmm_minmax_ukernel_4x8__sse(
18   size_t mr,
19   size_t nc,
20   size_t kc,
21   const float*restrict a,
22   const float*restrict w,
23   float*restrict c,
24   size_t cm_stride,
25   size_t cn_stride,
26   const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mr != 0);
29   assert(mr <= 4);
30   assert(nc != 0);
31   assert(kc != 0);
32   assert(kc % sizeof(float) == 0);
33 
34   float* c0 = c;
35   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
36   if XNN_UNPREDICTABLE(mr < 2) {
37     c1 = c0;
38   }
39   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
40   if XNN_UNPREDICTABLE(mr <= 2) {
41     c2 = c1;
42   }
43   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
44   if XNN_UNPREDICTABLE(mr != 4) {
45     c3 = c2;
46   }
47 
48   do {
49     __m128 vacc0x0123 = _mm_load_ps(w);
50     __m128 vacc0x4567 = _mm_load_ps(w + 4);
51     __m128 vacc1x0123 = vacc0x0123;
52     __m128 vacc1x4567 = vacc0x4567;
53     __m128 vacc2x0123 = vacc0x0123;
54     __m128 vacc2x4567 = vacc0x4567;
55     __m128 vacc3x0123 = vacc0x0123;
56     __m128 vacc3x4567 = vacc0x4567;
57     w += 8;
58 
59     size_t k = kc;
60     do {
61       const __m128 va0123 = _mm_load_ps(a);
62       a += 4;
63 
64       const __m128 vb0123 = _mm_load_ps(w);
65       const __m128 vb4567 = _mm_load_ps(w + 4);
66       w += 8;
67 
68       const __m128 va0000 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(0, 0, 0, 0));
69       const __m128 va1111 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(1, 1, 1, 1));
70       const __m128 va2222 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(2, 2, 2, 2));
71       const __m128 va3333 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(3, 3, 3, 3));
72 
73       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0000, vb0123));
74       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1111, vb0123));
75       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2222, vb0123));
76       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3333, vb0123));
77       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0000, vb4567));
78       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1111, vb4567));
79       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2222, vb4567));
80       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3333, vb4567));
81 
82       k -= sizeof(float);
83     } while (k != 0);
84 
85     const __m128 vmax = _mm_load_ps(params->sse.max);
86     vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
87     vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
88     vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
89     vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
90     vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
91     vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
92     vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
93     vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
94 
95     const __m128 vmin = _mm_load_ps(params->sse.min);
96     vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
97     vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
98     vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
99     vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
100     vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
101     vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
102     vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
103     vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
104 
105     if XNN_LIKELY(nc >= 8) {
106       _mm_storeu_ps(c3, vacc3x0123);
107       _mm_storeu_ps(c3 + 4, vacc3x4567);
108       _mm_storeu_ps(c2, vacc2x0123);
109       _mm_storeu_ps(c2 + 4, vacc2x4567);
110       _mm_storeu_ps(c1, vacc1x0123);
111       _mm_storeu_ps(c1 + 4, vacc1x4567);
112       _mm_storeu_ps(c0, vacc0x0123);
113       _mm_storeu_ps(c0 + 4, vacc0x4567);
114 
115       a = (const float*) ((uintptr_t) a - kc * 4);
116 
117       c3 = (float*) ((uintptr_t) c3 + cn_stride);
118       c2 = (float*) ((uintptr_t) c2 + cn_stride);
119       c1 = (float*) ((uintptr_t) c1 + cn_stride);
120       c0 = (float*) ((uintptr_t) c0 + cn_stride);
121 
122       nc -= 8;
123     } else {
124       if (nc & 4) {
125         _mm_storeu_ps(c3, vacc3x0123);
126         _mm_storeu_ps(c2, vacc2x0123);
127         _mm_storeu_ps(c1, vacc1x0123);
128         _mm_storeu_ps(c0, vacc0x0123);
129 
130         vacc3x0123 = vacc3x4567;
131         vacc2x0123 = vacc2x4567;
132         vacc1x0123 = vacc1x4567;
133         vacc0x0123 = vacc0x4567;
134 
135         c3 += 4;
136         c2 += 4;
137         c1 += 4;
138         c0 += 4;
139       }
140       if (nc & 2) {
141         _mm_storel_pi((__m64*) c3, vacc3x0123);
142         _mm_storel_pi((__m64*) c2, vacc2x0123);
143         _mm_storel_pi((__m64*) c1, vacc1x0123);
144         _mm_storel_pi((__m64*) c0, vacc0x0123);
145 
146         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
147         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
148         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
149         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
150 
151         c3 += 2;
152         c2 += 2;
153         c1 += 2;
154         c0 += 2;
155       }
156       if (nc & 1) {
157         _mm_store_ss(c3, vacc3x0123);
158         _mm_store_ss(c2, vacc2x0123);
159         _mm_store_ss(c1, vacc1x0123);
160         _mm_store_ss(c0, vacc0x0123);
161       }
162 
163       nc = 0;
164     }
165   } while (nc != 0);
166 }
167