1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <arm_neon.h>
9
10 #include <xnnpack/dwconv.h>
11 #include <xnnpack/math.h>
12
13
xnn_f32_dwconv_spchw_ukernel_3x3s2p1__neonfma(size_t m,size_t n,const float * input,const float * weights,float * output,size_t input_tuple_stride,size_t output_tuple_stride,size_t input_width_stride,size_t output_width_stride,const union xnn_f32_spchw_params params[restrict static1])14 void xnn_f32_dwconv_spchw_ukernel_3x3s2p1__neonfma(
15 size_t m,
16 size_t n,
17 const float* input,
18 const float* weights,
19 float* output,
20 size_t input_tuple_stride,
21 size_t output_tuple_stride,
22 size_t input_width_stride,
23 size_t output_width_stride,
24 const union xnn_f32_spchw_params params[restrict static 1])
25 {
26 assert(n != 0);
27
28 const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even);
29 const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd);
30 const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max);
31 const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min);
32
33 const size_t input_width_increment = input_width_stride * 2 - n / 8 * input_tuple_stride * 2;
34 const size_t output_width_increment = output_width_stride - n / 8 * output_tuple_stride;
35
36 // No vertical padding.
37 const float* i0 = input;
38 const float* i1 = (const float*) ((uintptr_t) i0 + input_width_stride);
39 const float* i2 = (const float*) ((uintptr_t) i1 + input_width_stride);
40
41 const float32x4_t vw0123 = vld1q_f32(weights);
42 const float32x4_t vw4567 = vld1q_f32(weights + 4);
43 const float32x2_t vw89 = vld1_f32(weights + 8);
44
45 do {
46 float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
47 float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
48 float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
49
50 size_t k = n;
51 for (; k >= 8; k -= 8) {
52 float32x4_t vo468Ap0 = vdupq_laneq_f32(vw0123, 0);
53
54 const float32x4_t vi0x4567 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + input_tuple_stride);
55 const float32x4_t vi1x4567 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + input_tuple_stride);
56 const float32x4_t vi2x4567 = vld1q_f32(i2); i2 = (const float*) ((uintptr_t) i2 + input_tuple_stride);
57
58 const float32x4_t vi0x89AB = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + input_tuple_stride);
59 const float32x4_t vi1x89AB = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + input_tuple_stride);
60 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 = (const float*) ((uintptr_t) i2 + input_tuple_stride);
61
62 const float32x4_t vi0x468A = vuzp1q_f32(vi0x4567, vi0x89AB);
63 const float32x4_t vi0x579B = vuzp2q_f32(vi0x4567, vi0x89AB);
64 const float32x4_t vi1x468A = vuzp1q_f32(vi1x4567, vi1x89AB);
65 const float32x4_t vi1x579B = vuzp2q_f32(vi1x4567, vi1x89AB);
66 const float32x4_t vi2x468A = vuzp1q_f32(vi2x4567, vi2x89AB);
67 const float32x4_t vi2x579B = vuzp2q_f32(vi2x4567, vi2x89AB);
68 // add bias only to first row, it will then get added
69 // to the final result
70 // multiply each row by corresponding row of center column of filter
71 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x468A, vw0123, 2);
72 float32x4_t vo468Ap1 = vmulq_laneq_f32(vi1x468A, vw4567, 1);
73 float32x4_t vo468Ap2 = vmulq_lane_f32(vi2x468A, vw89, 0);
74
75 // grab the values corresponding the left filter tap
76 const float32x4_t vi0x3579 = vextq_f32(vi0x0123, vi0x579B, 3);
77 const float32x4_t vi1x3579 = vextq_f32(vi1x0123, vi1x579B, 3);
78 const float32x4_t vi2x3579 = vextq_f32(vi2x0123, vi2x579B, 3);
79
80 vi0x0123 = vi0x89AB;
81 vi1x0123 = vi1x89AB;
82 vi2x0123 = vi2x89AB;
83
84 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x3579, vw0123, 1);
85 vo468Ap1 = vfmaq_laneq_f32(vo468Ap1, vi1x3579, vw4567, 0);
86 vo468Ap2 = vfmaq_laneq_f32(vo468Ap2, vi2x3579, vw4567, 3);
87
88 // Do multiplication by right filter tap.
89 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x579B, vw0123, 3);
90 vo468Ap1 = vfmaq_laneq_f32(vo468Ap1, vi1x579B, vw4567, 2);
91 vo468Ap2 = vfmaq_lane_f32 (vo468Ap2, vi2x579B, vw89, 1);
92
93 // Add up across rows to get the final outputs.
94 float32x4_t vo = vaddq_f32(vo468Ap0, vo468Ap1);
95 vo = vaddq_f32(vo, vo468Ap2);
96
97 vo = vmaxq_f32(vo, vmin);
98 vo = vminq_f32(vo, vmax);
99
100 vst1q_f32(output, vo); output = (float*) ((uintptr_t) output + output_tuple_stride);
101 }
102 // Last block has 0-7 pixels to process.
103 assert(k < 8);
104 if XNN_LIKELY(k != 0) {
105 float32x4_t vo468Ap0 = vdupq_laneq_f32(vw0123, 0);
106
107 const float32x4_t vi0x4567 = vld1q_f32(i0);
108 const float32x4_t vi1x4567 = vld1q_f32(i1);
109 const float32x4_t vi2x4567 = vld1q_f32(i2);
110
111 const float32x4_t vi0x89AB = vld1q_f32((const float*) ((uintptr_t) i0 + input_tuple_stride));
112 const float32x4_t vi1x89AB = vld1q_f32((const float*) ((uintptr_t) i1 + input_tuple_stride));
113 const float32x4_t vi2x89AB = vld1q_f32((const float*) ((uintptr_t) i2 + input_tuple_stride));
114
115 const float32x4_t vi0x468A = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vuzp1q_f32(vi0x4567, vi0x89AB))));
116 const float32x4_t vi0x579B = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vuzp2q_f32(vi0x4567, vi0x89AB))));
117 const float32x4_t vi1x468A = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vuzp1q_f32(vi1x4567, vi1x89AB))));
118 const float32x4_t vi1x579B = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vuzp2q_f32(vi1x4567, vi1x89AB))));
119 const float32x4_t vi2x468A = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vuzp1q_f32(vi2x4567, vi2x89AB))));
120 const float32x4_t vi2x579B = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vuzp2q_f32(vi2x4567, vi2x89AB))));
121 // add bias only to first row, it will then get added
122 // to the final result
123 // multiply each row by corresponding row of center column of filter
124 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x468A, vw0123, 2);
125 float32x4_t vo468Ap1 = vmulq_laneq_f32(vi1x468A, vw4567, 1);
126 float32x4_t vo468Ap2 = vmulq_lane_f32(vi2x468A, vw89, 0);
127
128 // grab the values corresponding the left filter tap
129 const float32x4_t vi0x3579 = vextq_f32(vi0x0123, vi0x579B, 3);
130 const float32x4_t vi1x3579 = vextq_f32(vi1x0123, vi1x579B, 3);
131 const float32x4_t vi2x3579 = vextq_f32(vi2x0123, vi2x579B, 3);
132
133 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x3579, vw0123, 1);
134 vo468Ap1 = vfmaq_laneq_f32(vo468Ap1, vi1x3579, vw4567, 0);
135 vo468Ap2 = vfmaq_laneq_f32(vo468Ap2, vi2x3579, vw4567, 3);
136
137 // do multiplication by right filter tap
138 vo468Ap0 = vfmaq_laneq_f32(vo468Ap0, vi0x579B, vw0123, 3);
139 vo468Ap1 = vfmaq_laneq_f32(vo468Ap1, vi1x579B, vw4567, 2);
140 vo468Ap2 = vfmaq_lane_f32 (vo468Ap2, vi2x579B, vw89, 1);
141
142 // add up across rows to get the final outputs
143 float32x4_t vo = vaddq_f32(vo468Ap0, vo468Ap1);
144 vo = vaddq_f32(vo, vo468Ap2);
145
146 vo = vmaxq_f32(vo, vmin);
147 vo = vminq_f32(vo, vmax);
148
149 k += 1;
150 if (k & 8) {
151 vst1q_f32(output, vo);
152 } else {
153 float* output_lo = output;
154 float32x2_t vo_lo = vget_low_f32(vo);
155 if (k & 4) {
156 vst1_f32(output_lo, vo_lo); output_lo += 2;
157 vo_lo = vget_high_f32(vo);
158 }
159 if (k & 2) {
160 vst1_lane_f32(output_lo, vo_lo, 0);
161 }
162 }
163 }
164
165 i0 = (const float*) ((uintptr_t) i0 + input_width_increment);
166 i1 = (const float*) ((uintptr_t) i1 + input_width_increment);
167 i2 = (const float*) ((uintptr_t) i2 + input_width_increment);
168 output = (float*) ((uintptr_t) output + output_width_increment);
169 } while (--m != 0);
170 }
171