1 // Auto-generated file. Do not edit!
2 // Template: src/f16-dwconv/up-fma3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_f16_dwconv_minmax_ukernel_up8x9__fma3(size_t channels,size_t output_width,const void ** input,const void * weights,void * output,size_t input_stride,size_t output_increment,size_t input_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f16_dwconv_minmax_ukernel_up8x9__fma3(
18 size_t channels,
19 size_t output_width,
20 const void** input,
21 const void* weights,
22 void* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const void* zero,
27 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const __m256 vmax = _mm256_load_ps(params->avx.max);
33 const __m256 vmin = _mm256_load_ps(params->avx.min);
34
35 uint16_t* o = (uint16_t*) output;
36 do {
37 const uint16_t* i0 = input[0];
38 assert(i0 != NULL);
39 if XNN_UNPREDICTABLE(i0 != zero) {
40 i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
41 }
42 const uint16_t* i1 = input[1];
43 assert(i1 != NULL);
44 if XNN_UNPREDICTABLE(i1 != zero) {
45 i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
46 }
47 const uint16_t* i2 = input[2];
48 assert(i2 != NULL);
49 if XNN_UNPREDICTABLE(i2 != zero) {
50 i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
51 }
52 const uint16_t* i3 = input[3];
53 assert(i3 != NULL);
54 if XNN_UNPREDICTABLE(i3 != zero) {
55 i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
56 }
57 const uint16_t* i4 = input[4];
58 assert(i4 != NULL);
59 if XNN_UNPREDICTABLE(i4 != zero) {
60 i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
61 }
62 const uint16_t* i5 = input[5];
63 assert(i5 != NULL);
64 if XNN_UNPREDICTABLE(i5 != zero) {
65 i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
66 }
67 const uint16_t* i6 = input[6];
68 assert(i6 != NULL);
69 if XNN_UNPREDICTABLE(i6 != zero) {
70 i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
71 }
72 const uint16_t* i7 = input[7];
73 assert(i7 != NULL);
74 if XNN_UNPREDICTABLE(i7 != zero) {
75 i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
76 }
77 const uint16_t* i8 = input[8];
78 assert(i8 != NULL);
79 if XNN_UNPREDICTABLE(i8 != zero) {
80 i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
81 }
82 input = (const void**) ((uintptr_t) input + input_stride);
83
84 size_t c = channels;
85 const uint16_t* w = weights;
86 for (; c >= 8; c -= 8) {
87 __m256 vacc01234567p0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
88
89
90 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
91 i0 += 8;
92
93 const __m256 vk0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
94 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
95
96 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
97 i1 += 8;
98
99 const __m256 vk1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
100 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
101
102 const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
103 i2 += 8;
104
105 const __m256 vk2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 24)));
106 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
107
108 const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
109 i3 += 8;
110
111 const __m256 vk3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 32)));
112 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
113
114 const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
115 i4 += 8;
116
117 const __m256 vk4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 40)));
118 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
119
120 const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
121 i5 += 8;
122
123 const __m256 vk5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 48)));
124 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
125
126 const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
127 i6 += 8;
128
129 const __m256 vk6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 56)));
130 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
131
132 const __m256 vi7x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
133 i7 += 8;
134
135 const __m256 vk7x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 64)));
136 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
137
138 const __m256 vi8x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
139 i8 += 8;
140
141 const __m256 vk8x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 72)));
142 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
143
144 w += 80;
145
146
147 __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
148 vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
149
150 _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC));
151 o += 8;
152 }
153 if XNN_UNLIKELY(c != 0) {
154 assert(c >= 1);
155 assert(c <= 7);
156
157 __m256 vacc01234567p0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
158
159 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
160
161 const __m256 vk0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 8)));
162 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
163
164 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
165
166 const __m256 vk1x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 16)));
167 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
168
169 const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
170
171 const __m256 vk2x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 24)));
172 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
173
174 const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
175
176 const __m256 vk3x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 32)));
177 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
178
179 const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
180
181 const __m256 vk4x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 40)));
182 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
183
184 const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
185
186 const __m256 vk5x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 48)));
187 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
188
189 const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
190
191 const __m256 vk6x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 56)));
192 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
193
194 const __m256 vi7x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
195
196 const __m256 vk7x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 64)));
197 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
198
199 const __m256 vi8x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
200
201 const __m256 vk8x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 72)));
202 vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
203
204
205 __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
206 vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
207
208 __m128i vh01234567 = _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC);
209 if (c & 4) {
210 _mm_storel_epi64((__m128i*) o, vh01234567);
211 vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
212 o += 4;
213 }
214 if (c & 2) {
215 *((uint32_t*) o) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
216 vh01234567 = _mm_srli_epi64(vh01234567, 32);
217 o += 2;
218 }
219 if (c & 1) {
220 *((uint16_t*) o) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
221 o += 1;
222 }
223 }
224
225 o = (uint16_t*) ((uintptr_t) o + output_increment);
226 } while (--output_width != 0);
227 }
228