1 // Auto-generated file. Do not edit!
2 // Template: src/f32-prelu/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16
17
xnn_f32_prelu_ukernel__sse2_2x8(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)18 void xnn_f32_prelu_ukernel__sse2_2x8(
19 size_t rows,
20 size_t channels,
21 const float*restrict input,
22 size_t input_stride,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_stride) XNN_DISABLE_TSAN
26 {
27 assert(rows != 0);
28 assert(channels != 0);
29 assert(channels % sizeof(float) == 0);
30
31 const float* i0 = input;
32 float* o0 = output;
33 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
34 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
35 if XNN_UNPREDICTABLE(rows < 2) {
36 i1 = i0;
37 o1 = o0;
38 }
39
40 const size_t input_increment = input_stride * 2 - channels;
41 const size_t output_increment = output_stride * 2 - channels;
42
43 do {
44 const float* w = weights;
45 size_t c = channels;
46 for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
47 const __m128 vw0123 = _mm_load_ps(w);
48 const __m128 vw4567 = _mm_load_ps(w + 4);
49 w += 8;
50
51 const __m128 vi0x0123 = _mm_loadu_ps(i0);
52 const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
53 i0 += 8;
54 const __m128 vi1x0123 = _mm_loadu_ps(i1);
55 const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
56 i1 += 8;
57
58 const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
59 const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
60 const __m128 vprod0x4567 = _mm_mul_ps(vi0x4567, vw4567);
61 const __m128 vmask0x4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x4567)));
62 const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
63 const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
64 const __m128 vprod1x4567 = _mm_mul_ps(vi1x4567, vw4567);
65 const __m128 vmask1x4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x4567)));
66
67 const __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
68 const __m128 vacc0x4567 = _mm_or_ps(_mm_and_ps(vprod0x4567, vmask0x4567), _mm_andnot_ps(vmask0x4567, vi0x4567));
69 const __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
70 const __m128 vacc1x4567 = _mm_or_ps(_mm_and_ps(vprod1x4567, vmask1x4567), _mm_andnot_ps(vmask1x4567, vi1x4567));
71
72 _mm_storeu_ps(o0, vacc0x0123);
73 _mm_storeu_ps(o0 + 4, vacc0x4567);
74 o0 += 8;
75 _mm_storeu_ps(o1, vacc1x0123);
76 _mm_storeu_ps(o1 + 4, vacc1x4567);
77 o1 += 8;
78 }
79 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
80 const __m128 vw0123 = _mm_load_ps(w);
81 w += 4;
82
83 const __m128 vi0x0123 = _mm_loadu_ps(i0);
84 i0 += 4;
85 const __m128 vi1x0123 = _mm_loadu_ps(i1);
86 i1 += 4;
87
88 const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
89 const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
90 const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
91 const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
92
93 __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
94 __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
95
96 _mm_storeu_ps(o0, vacc0x0123);
97 o0 += 4;
98 _mm_storeu_ps(o1, vacc1x0123);
99 o1 += 4;
100 }
101 if XNN_UNLIKELY(c != 0) {
102 const __m128 vw0123 = _mm_load_ps(w);
103 w = (const float*) ((uintptr_t) w + c);
104
105 const __m128 vi0x0123 = _mm_loadu_ps(i0);
106 i0 = (const float*) ((uintptr_t) i0 + c);
107 const __m128 vi1x0123 = _mm_loadu_ps(i1);
108 i1 = (const float*) ((uintptr_t) i1 + c);
109
110 const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
111 const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
112 const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
113 const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
114
115 __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
116 __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
117
118 if (c & (2 * sizeof(float))) {
119 _mm_storel_pi((__m64*) o0, vacc0x0123);
120 _mm_storel_pi((__m64*) o1, vacc1x0123);
121
122 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
123 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
124
125 o0 += 2;
126 o1 += 2;
127 }
128 if (c & (1 * sizeof(float))) {
129 _mm_store_ss(o0, vacc0x0123);
130 _mm_store_ss(o1, vacc1x0123);
131
132 o0 += 1;
133 o1 += 1;
134 }
135 }
136 i0 = (const float*) ((uintptr_t) i0 + input_increment);
137 o0 = (float*) ((uintptr_t) o0 + output_increment);
138 i1 = (const float*) ((uintptr_t) i1 + input_increment);
139 o1 = (float*) ((uintptr_t) o1 + output_increment);
140 if XNN_UNPREDICTABLE(rows < 4) {
141 i1 = i0;
142 o1 = o0;
143 }
144 rows = doz(rows, 2);
145 } while (rows != 0);
146 }
147