1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <arm_neon.h>
9
10 #include <xnnpack/argmaxpool.h>
11
12
xnn_f32_argmaxpool_ukernel_4x__neon_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * output,uint32_t * index,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_4x__neon_c4(
14 size_t output_pixels,
15 size_t pooling_elements,
16 size_t channels,
17 const float** input,
18 size_t input_offset,
19 float* output,
20 uint32_t* index,
21 size_t input_increment,
22 size_t output_increment) XNN_DISABLE_TSAN
23 {
24 assert(output_pixels != 0);
25 assert(pooling_elements != 0);
26 assert(pooling_elements <= 4);
27 assert(channels != 0);
28
29 do {
30 const float* i0 = input[0];
31 const float* i1 = input[1];
32 const float* i2 = input[2];
33 const float* i3 = input[3];
34 i0 = (const float*) ((uintptr_t) i0 + input_offset);
35 i1 = (const float*) ((uintptr_t) i1 + input_offset);
36 i2 = (const float*) ((uintptr_t) i2 + input_offset);
37 i3 = (const float*) ((uintptr_t) i3 + input_offset);
38 if (pooling_elements < 2) {
39 i1 = i0;
40 }
41 if (pooling_elements <= 2) {
42 i2 = i0;
43 }
44 if (pooling_elements != 4) {
45 i3 = i0;
46 }
47
48 size_t c = channels;
49 for (; c >= 4; c -= 4) {
50 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
51 const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
52 const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
53 const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
54
55 float32x4_t vmax = vi0;
56 uint32x4_t vidx = vmovq_n_u32(0);
57
58 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
59 vmax = vbslq_f32(vm1, vi1, vmax);
60 vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
61
62 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
63 vmax = vbslq_f32(vm2, vi2, vmax);
64 vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
65
66 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
67 vmax = vbslq_f32(vm3, vi3, vmax);
68 vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
69
70 vst1q_f32(output, vmax); output += 4;
71 vst1q_u32(index, vidx); index += 4;
72 }
73 if (c != 0) {
74 const float32x4_t vi0 = vld1q_f32(i0);
75 const float32x4_t vi1 = vld1q_f32(i1);
76 const float32x4_t vi2 = vld1q_f32(i2);
77 const float32x4_t vi3 = vld1q_f32(i3);
78
79 float32x4_t vmax = vi0;
80 uint32x4_t vidx = vmovq_n_u32(0);
81
82 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
83 vmax = vbslq_f32(vm1, vi1, vmax);
84 vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
85
86 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
87 vmax = vbslq_f32(vm2, vi2, vmax);
88 vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
89
90 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
91 vmax = vbslq_f32(vm3, vi3, vmax);
92 vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
93
94 float32x2_t vmax_lo = vget_low_f32(vmax);
95 uint32x2_t vidx_lo = vget_low_u32(vidx);
96 if (c & 2) {
97 vst1_f32(output, vmax_lo); output += 2;
98 vst1_u32(index, vidx_lo); index += 2;
99 vmax_lo = vget_high_f32(vmax);
100 vidx_lo = vget_high_u32(vidx);
101 }
102 if (c & 1) {
103 vst1_lane_f32(output, vmax_lo, 0); output += 1;
104 vst1_lane_u32(index, vidx_lo, 0); index += 1;
105 }
106 }
107 input = (const float**) ((uintptr_t) input + input_increment);
108 output = (float*) ((uintptr_t) output + output_increment);
109 } while (--output_pixels != 0);
110 }
111