• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <xmmintrin.h>
9 
10 #include <xnnpack/gavgpool.h>
11 
12 
xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4(size_t rows,size_t channels,const float * input,size_t input_stride,const float * zero,float * output,const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])13 void xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4(
14     size_t rows,
15     size_t channels,
16     const float* input,
17     size_t input_stride,
18     const float* zero,
19     float* output,
20     const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
21 {
22   assert(rows != 0);
23   assert(rows <= 7);
24   assert(channels != 0);
25 
26   const float* i0 = input;
27   const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
28   if (rows < 2) {
29     i1 = zero;
30   }
31   const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
32   if (rows <= 2) {
33     i2 = zero;
34   }
35   const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
36   if (rows < 4) {
37     i3 = zero;
38   }
39   const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
40   if (rows <= 4) {
41     i4 = zero;
42   }
43   const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
44   if (rows < 6) {
45     i5 = zero;
46   }
47   const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
48   if (rows <= 6) {
49     i6 = zero;
50   }
51   const __m128 vscale = _mm_load_ps(params->sse2.scale);
52   const __m128 vmin = _mm_load_ps(params->sse2.min);
53   const __m128 vmax = _mm_load_ps(params->sse2.max);
54 
55   while (channels >= 4) {
56     const __m128 vi0 = _mm_loadu_ps(i0);
57     i0 += 4;
58     const __m128 vi1 = _mm_loadu_ps(i1);
59     i1 += 4;
60     const __m128 vi2 = _mm_loadu_ps(i2);
61     i2 += 4;
62     const __m128 vi3 = _mm_loadu_ps(i3);
63     i3 += 4;
64     const __m128 vi4 = _mm_loadu_ps(i4);
65     i4 += 4;
66     const __m128 vi5 = _mm_loadu_ps(i5);
67     i5 += 4;
68     const __m128 vi6 = _mm_loadu_ps(i6);
69     i6 += 4;
70 
71     const __m128 vsum01 = _mm_add_ps(vi0, vi1);
72     const __m128 vsum23 = _mm_add_ps(vi2, vi3);
73     const __m128 vsum45 = _mm_add_ps(vi4, vi5);
74 
75     const __m128 vsum016 = _mm_add_ps(vsum01, vi6);
76     const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
77 
78     const __m128 vsum = _mm_add_ps(vsum016, vsum2345);
79 
80     __m128 vout = _mm_mul_ps(vsum, vscale);
81     vout = _mm_max_ps(vout, vmin);
82     vout = _mm_min_ps(vout, vmax);
83 
84     _mm_storeu_ps(output, vout);
85     output += 4;
86 
87     channels -= 4;
88   }
89   if (channels != 0) {
90     const __m128 vi0 = _mm_loadu_ps(i0);
91     const __m128 vi1 = _mm_loadu_ps(i1);
92     const __m128 vi2 = _mm_loadu_ps(i2);
93     const __m128 vi3 = _mm_loadu_ps(i3);
94     const __m128 vi4 = _mm_loadu_ps(i4);
95     const __m128 vi5 = _mm_loadu_ps(i5);
96     const __m128 vi6 = _mm_loadu_ps(i6);
97 
98     const __m128 vsum01 = _mm_add_ps(vi0, vi1);
99     const __m128 vsum23 = _mm_add_ps(vi2, vi3);
100     const __m128 vsum45 = _mm_add_ps(vi4, vi5);
101 
102     const __m128 vsum016 = _mm_add_ps(vsum01, vi6);
103     const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
104 
105     const __m128 vsum = _mm_add_ps(vsum016, vsum2345);
106 
107     __m128 vout = _mm_mul_ps(vsum, vscale);
108     vout = _mm_max_ps(vout, vmin);
109     vout = _mm_min_ps(vout, vmax);
110 
111     if (channels & 2) {
112       _mm_storel_pi((__m64*) output, vout);
113       vout = _mm_movehl_ps(vout, vout);
114       output += 2;
115     }
116     if (channels & 1) {
117       _mm_store_ss(output, vout);
118     }
119   }
120 }
121