1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <emmintrin.h>
9
10 #include <xnnpack/argmaxpool.h>
11
12
xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * accumulation_buffer,uint32_t * index_buffer,float * output,uint32_t * index,size_t input_increment,size_t output_increment,const union xnn_f32_output_params params[restrict static1])13 void xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(
14 size_t output_pixels,
15 size_t pooling_elements,
16 size_t channels,
17 const float** input,
18 size_t input_offset,
19 float* accumulation_buffer,
20 uint32_t* index_buffer,
21 float* output,
22 uint32_t* index,
23 size_t input_increment,
24 size_t output_increment,
25 const union xnn_f32_output_params params[restrict static 1])
26 {
27 assert(output_pixels != 0);
28 assert(pooling_elements != 0);
29 assert(pooling_elements > 9);
30 assert(channels != 0);
31
32 const __m128 voutput_max = _mm_load_ps(params->sse.max);
33 const __m128 voutput_min = _mm_load_ps(params->sse.min);
34 do {
35 {
36 float* ab = accumulation_buffer;
37 uint32_t* ib = index_buffer;
38
39 const float* i0 = *input++;
40 const float* i1 = *input++;
41 const float* i2 = *input++;
42 const float* i3 = *input++;
43 const float* i4 = *input++;
44 const float* i5 = *input++;
45 const float* i6 = *input++;
46 const float* i7 = *input++;
47 const float* i8 = *input++;
48 i0 = (const float*) ((uintptr_t) i0 + input_offset);
49 i1 = (const float*) ((uintptr_t) i1 + input_offset);
50 i2 = (const float*) ((uintptr_t) i2 + input_offset);
51 i3 = (const float*) ((uintptr_t) i3 + input_offset);
52 i4 = (const float*) ((uintptr_t) i4 + input_offset);
53 i5 = (const float*) ((uintptr_t) i5 + input_offset);
54 i6 = (const float*) ((uintptr_t) i6 + input_offset);
55 i7 = (const float*) ((uintptr_t) i7 + input_offset);
56 i8 = (const float*) ((uintptr_t) i8 + input_offset);
57
58 for (size_t c = 0; c < channels; c += 4) {
59 const __m128 vi0 = _mm_loadu_ps(i0);
60 i0 += 4;
61 const __m128 vi1 = _mm_loadu_ps(i1);
62 i1 += 4;
63 const __m128 vi2 = _mm_loadu_ps(i2);
64 i2 += 4;
65 const __m128 vi3 = _mm_loadu_ps(i3);
66 i3 += 4;
67 const __m128 vi4 = _mm_loadu_ps(i4);
68 i4 += 4;
69 const __m128 vi5 = _mm_loadu_ps(i5);
70 i5 += 4;
71 const __m128 vi6 = _mm_loadu_ps(i6);
72 i6 += 4;
73 const __m128 vi7 = _mm_loadu_ps(i7);
74 i7 += 4;
75 const __m128 vi8 = _mm_loadu_ps(i8);
76 i8 += 4;
77
78 __m128 vmax = vi0;
79 __m128i vidx = _mm_setzero_si128();
80
81 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
82 vmax = _mm_max_ps(vi1, vmax);
83 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
84
85 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
86 vmax = _mm_max_ps(vi2, vmax);
87 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
88
89 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
90 vmax = _mm_max_ps(vi3, vmax);
91 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
92
93 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
94 vmax = _mm_max_ps(vi4, vmax);
95 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
96
97 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
98 vmax = _mm_max_ps(vi5, vmax);
99 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
100
101 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
102 vmax = _mm_max_ps(vi6, vmax);
103 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
104
105 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
106 vmax = _mm_max_ps(vi7, vmax);
107 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
108
109 const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
110 vmax = _mm_max_ps(vi8, vmax);
111 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
112
113 _mm_store_ps(ab, vmax);
114 ab += 4;
115 _mm_store_si128((__m128i*) ib, vidx);
116 ib += 4;
117 }
118 }
119 const __m128i v1 = _mm_set1_epi32(1);
120 const __m128i v8 = _mm_set1_epi32(8);
121 __m128i vidx0 = _mm_add_epi32(v1, v8);
122
123 size_t k = pooling_elements;
124 for (k -= 9; k > 8; k -= 8) {
125 const float* i0 = *input++;
126 const float* i1 = *input++;
127 const float* i2 = *input++;
128 const float* i3 = *input++;
129 const float* i4 = *input++;
130 const float* i5 = *input++;
131 const float* i6 = *input++;
132 const float* i7 = *input++;
133 i0 = (const float*) ((uintptr_t) i0 + input_offset);
134 i1 = (const float*) ((uintptr_t) i1 + input_offset);
135 i2 = (const float*) ((uintptr_t) i2 + input_offset);
136 i3 = (const float*) ((uintptr_t) i3 + input_offset);
137 i4 = (const float*) ((uintptr_t) i4 + input_offset);
138 i5 = (const float*) ((uintptr_t) i5 + input_offset);
139 i6 = (const float*) ((uintptr_t) i6 + input_offset);
140 i7 = (const float*) ((uintptr_t) i7 + input_offset);
141
142 float* ab = accumulation_buffer;
143 uint32_t* ib = index_buffer;
144
145 for (size_t c = 0; c < channels; c += 4) {
146 const __m128 vi0 = _mm_loadu_ps(i0);
147 i0 += 4;
148 const __m128 vi1 = _mm_loadu_ps(i1);
149 i1 += 4;
150 const __m128 vi2 = _mm_loadu_ps(i2);
151 i2 += 4;
152 const __m128 vi3 = _mm_loadu_ps(i3);
153 i3 += 4;
154 const __m128 vi4 = _mm_loadu_ps(i4);
155 i4 += 4;
156 const __m128 vi5 = _mm_loadu_ps(i5);
157 i5 += 4;
158 const __m128 vi6 = _mm_loadu_ps(i6);
159 i6 += 4;
160 const __m128 vi7 = _mm_loadu_ps(i7);
161 i7 += 4;
162
163 __m128 vmax = _mm_load_ps(ab);
164 __m128i vidx = _mm_load_si128((const __m128i*) ib);
165
166 const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
167 vmax = _mm_max_ps(vi0, vmax);
168 vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
169
170 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
171 const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
172 vmax = _mm_max_ps(vi1, vmax);
173 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
174
175 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
176 const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
177 vmax = _mm_max_ps(vi2, vmax);
178 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
179
180 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
181 const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
182 vmax = _mm_max_ps(vi3, vmax);
183 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
184
185 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
186 const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
187 vmax = _mm_max_ps(vi4, vmax);
188 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
189
190 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
191 const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
192 vmax = _mm_max_ps(vi5, vmax);
193 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
194
195 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
196 const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
197 vmax = _mm_max_ps(vi6, vmax);
198 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
199
200 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
201 const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
202 vmax = _mm_max_ps(vi7, vmax);
203 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
204
205 _mm_store_ps(ab, vmax);
206 ab += 4;
207 _mm_store_si128((__m128i*) ib, vidx);
208 ib += 4;
209 }
210 vidx0 = _mm_add_epi32(vidx0, v8);
211 }
212
213 float* o = output;
214 uint32_t* i = index;
215 {
216 const float* i0 = input[0];
217 const float* i1 = input[1];
218 const float* i2 = input[2];
219 const float* i3 = input[3];
220 const float* i4 = input[4];
221 const float* i5 = input[5];
222 const float* i6 = input[6];
223 const float* i7 = input[7];
224 i0 = (const float*) ((uintptr_t) i0 + input_offset);
225 i1 = (const float*) ((uintptr_t) i1 + input_offset);
226 i2 = (const float*) ((uintptr_t) i2 + input_offset);
227 i3 = (const float*) ((uintptr_t) i3 + input_offset);
228 i4 = (const float*) ((uintptr_t) i4 + input_offset);
229 i5 = (const float*) ((uintptr_t) i5 + input_offset);
230 i6 = (const float*) ((uintptr_t) i6 + input_offset);
231 i7 = (const float*) ((uintptr_t) i7 + input_offset);
232 input = (const float**) ((uintptr_t) input + input_increment);
233 if (k < 2) {
234 i1 = i0;
235 }
236 if (k <= 2) {
237 i2 = i0;
238 }
239 if (k < 4) {
240 i3 = i0;
241 }
242 if (k <= 4) {
243 i4 = i0;
244 }
245 if (k < 6) {
246 i5 = i0;
247 }
248 if (k <= 6) {
249 i6 = i0;
250 }
251 if (k != 8) {
252 i7 = i0;
253 }
254
255 size_t c = channels;
256 float* ab = accumulation_buffer;
257 uint32_t* ib = index_buffer;
258 for (; c >= 4; c -= 4) {
259 const __m128 vi0 = _mm_loadu_ps(i0);
260 i0 += 4;
261 const __m128 vi1 = _mm_loadu_ps(i1);
262 i1 += 4;
263 const __m128 vi2 = _mm_loadu_ps(i2);
264 i2 += 4;
265 const __m128 vi3 = _mm_loadu_ps(i3);
266 i3 += 4;
267 const __m128 vi4 = _mm_loadu_ps(i4);
268 i4 += 4;
269 const __m128 vi5 = _mm_loadu_ps(i5);
270 i5 += 4;
271 const __m128 vi6 = _mm_loadu_ps(i6);
272 i6 += 4;
273 const __m128 vi7 = _mm_loadu_ps(i7);
274 i7 += 4;
275
276 __m128 vmax = _mm_load_ps(ab);
277 ab += 4;
278 __m128i vidx = _mm_load_si128((const __m128i*) ib);
279 ib += 4;
280
281 const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
282 vmax = _mm_max_ps(vi0, vmax);
283 vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
284
285 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
286 const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
287 vmax = _mm_max_ps(vi1, vmax);
288 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
289
290 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
291 const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
292 vmax = _mm_max_ps(vi2, vmax);
293 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
294
295 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
296 const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
297 vmax = _mm_max_ps(vi3, vmax);
298 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
299
300 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
301 const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
302 vmax = _mm_max_ps(vi4, vmax);
303 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
304
305 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
306 const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
307 vmax = _mm_max_ps(vi5, vmax);
308 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
309
310 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
311 const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
312 vmax = _mm_max_ps(vi6, vmax);
313 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
314
315 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
316 const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
317 vmax = _mm_max_ps(vi7, vmax);
318 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
319
320 __m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
321
322 _mm_storeu_ps(o, vout);
323 o += 4;
324 _mm_storeu_si128((__m128i*) i, vidx);
325 i += 4;
326 }
327 if (c != 0) {
328 const __m128 vi0 = _mm_loadu_ps(i0);
329 const __m128 vi1 = _mm_loadu_ps(i1);
330 const __m128 vi2 = _mm_loadu_ps(i2);
331 const __m128 vi3 = _mm_loadu_ps(i3);
332 const __m128 vi4 = _mm_loadu_ps(i4);
333 const __m128 vi5 = _mm_loadu_ps(i5);
334 const __m128 vi6 = _mm_loadu_ps(i6);
335 const __m128 vi7 = _mm_loadu_ps(i7);
336
337 __m128 vmax = _mm_load_ps(ab);
338 __m128i vidx = _mm_load_si128((const __m128i*) ib);
339
340 const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
341 vmax = _mm_max_ps(vi0, vmax);
342 vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
343
344 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
345 const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
346 vmax = _mm_max_ps(vi1, vmax);
347 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
348
349 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
350 const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
351 vmax = _mm_max_ps(vi2, vmax);
352 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
353
354 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
355 const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
356 vmax = _mm_max_ps(vi3, vmax);
357 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
358
359 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
360 const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
361 vmax = _mm_max_ps(vi4, vmax);
362 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
363
364 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
365 const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
366 vmax = _mm_max_ps(vi5, vmax);
367 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
368
369 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
370 const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
371 vmax = _mm_max_ps(vi6, vmax);
372 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
373
374 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
375 const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
376 vmax = _mm_max_ps(vi7, vmax);
377 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
378
379 __m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
380
381 if (c & 2) {
382 _mm_store_sd((double*) o, _mm_castps_pd(vout));
383 _mm_storel_epi64((__m128i*) i, vidx);
384 vout = _mm_movehl_ps(vout, vout);
385 vidx = _mm_unpackhi_epi64(vidx, vidx);
386 o += 2;
387 i += 2;
388 }
389 if (c & 1) {
390 _mm_store_ss(o, vout);
391 *i = (uint32_t) _mm_cvtsi128_si32(vidx);
392 o += 1;
393 i += 1;
394 }
395 }
396 }
397
398 output = (float*) ((uintptr_t) o + output_increment);
399 index = (uint32_t*) i;
400 } while (--output_pixels != 0);
401 }
402