1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gavgpool/unipass-f16c.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gavgpool.h>
15
16
xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(size_t rows,size_t channels,const void * input,size_t input_stride,const void * zero,void * output,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(
18 size_t rows,
19 size_t channels,
20 const void* input,
21 size_t input_stride,
22 const void* zero,
23 void* output,
24 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(rows != 0);
27 assert(rows <= 7);
28 assert(channels != 0);
29
30 const uint16_t* i0 = input;
31 const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
32 if XNN_UNPREDICTABLE(rows < 2) {
33 i1 = (const uint16_t*) zero;
34 }
35 const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
36 if XNN_UNPREDICTABLE(rows <= 2) {
37 i2 = (const uint16_t*) zero;
38 }
39 const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
40 if XNN_UNPREDICTABLE(rows < 4) {
41 i3 = (const uint16_t*) zero;
42 }
43 const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
44 if XNN_UNPREDICTABLE(rows <= 4) {
45 i4 = (const uint16_t*) zero;
46 }
47 const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
48 if XNN_UNPREDICTABLE(rows < 6) {
49 i5 = (const uint16_t*) zero;
50 }
51 const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
52 if XNN_UNPREDICTABLE(rows <= 6) {
53 i6 = (const uint16_t*) zero;
54 }
55
56 const __m256 vscale = _mm256_load_ps(params->avx.scale);
57 const __m256 vmin = _mm256_load_ps(params->avx.min);
58 const __m256 vmax = _mm256_load_ps(params->avx.max);
59 for (; channels >= 8; channels -= 8) {
60 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
61 i0 += 8;
62 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
63 i1 += 8;
64
65 const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
66 __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
67 i2 += 8;
68
69 const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
70 i3 += 8;
71 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
72 const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
73 i4 += 8;
74 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
75 const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
76 i5 += 8;
77 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
78 const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
79 i6 += 8;
80 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
81 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
82
83 vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
84
85 __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
86
87 vout01234567 = _mm256_min_ps(vout01234567, vmax);
88
89 _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
90 output = (uint16_t*) output + 8;
91 }
92 if XNN_UNLIKELY(channels != 0) {
93 {
94 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
95 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
96
97 const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
98 __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
99
100 const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
101 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
102 const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
103 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
104 const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
105 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
106 const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
107 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
108 vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
109
110 vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
111 __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
112 vout01234567 = _mm256_min_ps(vout01234567, vmax);
113
114 __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
115 if (channels & 4) {
116 _mm_storel_epi64((__m128i*) output, vh01234567);
117 output = (uint16_t*) output + 4;
118 vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
119 }
120 if (channels & 2) {
121 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
122 output = (uint16_t*) output + 2;
123 vh01234567 = _mm_srli_epi64(vh01234567, 32);
124 }
125 if (channels & 1) {
126 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
127 }
128 }
129 }
130 }
131