1 // Auto-generated file. Do not edit!
2 // Template: src/f32-prelu/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16
17
xnn_f32_prelu_ukernel__sse2_2x4(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)18 void xnn_f32_prelu_ukernel__sse2_2x4(
19 size_t rows,
20 size_t channels,
21 const float*restrict input,
22 size_t input_stride,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_stride) XNN_DISABLE_TSAN
26 {
27 assert(rows != 0);
28 assert(channels != 0);
29 assert(channels % sizeof(float) == 0);
30
31 const float* i0 = input;
32 float* o0 = output;
33 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
34 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
35 if XNN_UNPREDICTABLE(rows < 2) {
36 i1 = i0;
37 o1 = o0;
38 }
39
40 const size_t input_increment = input_stride * 2 - channels;
41 const size_t output_increment = output_stride * 2 - channels;
42
43 do {
44 const float* w = weights;
45 size_t c = channels;
46 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
47 const __m128 vw0123 = _mm_load_ps(w);
48 w += 4;
49
50 const __m128 vi0x0123 = _mm_loadu_ps(i0);
51 i0 += 4;
52 const __m128 vi1x0123 = _mm_loadu_ps(i1);
53 i1 += 4;
54
55 const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
56 const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
57 const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
58 const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
59
60 const __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
61 const __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
62
63 _mm_storeu_ps(o0, vacc0x0123);
64 o0 += 4;
65 _mm_storeu_ps(o1, vacc1x0123);
66 o1 += 4;
67 }
68 if XNN_UNLIKELY(c != 0) {
69 const __m128 vw0123 = _mm_load_ps(w);
70 w = (const float*) ((uintptr_t) w + c);
71
72 const __m128 vi0x0123 = _mm_loadu_ps(i0);
73 i0 = (const float*) ((uintptr_t) i0 + c);
74 const __m128 vi1x0123 = _mm_loadu_ps(i1);
75 i1 = (const float*) ((uintptr_t) i1 + c);
76
77 const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
78 const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
79 const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
80 const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
81
82 __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
83 __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
84
85 if (c & (2 * sizeof(float))) {
86 _mm_storel_pi((__m64*) o0, vacc0x0123);
87 _mm_storel_pi((__m64*) o1, vacc1x0123);
88
89 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
90 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
91
92 o0 += 2;
93 o1 += 2;
94 }
95 if (c & (1 * sizeof(float))) {
96 _mm_store_ss(o0, vacc0x0123);
97 _mm_store_ss(o1, vacc1x0123);
98
99 o0 += 1;
100 o1 += 1;
101 }
102 }
103 i0 = (const float*) ((uintptr_t) i0 + input_increment);
104 o0 = (float*) ((uintptr_t) o0 + output_increment);
105 i1 = (const float*) ((uintptr_t) i1 + input_increment);
106 o1 = (float*) ((uintptr_t) o1 + output_increment);
107 if XNN_UNPREDICTABLE(rows < 4) {
108 i1 = i0;
109 o1 = o0;
110 }
111 rows = doz(rows, 2);
112 } while (rows != 0);
113 }
114