1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top == 1);
32
33 const uint32x4_t vmask = vld1q_u32(params->neon.mask);
34 const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max);
35 const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min);
36
37 const float32x4_t vw0123 = vld1q_f32(weights);
38 const float32x4_t vw4567 = vld1q_f32(weights + 4);
39 const float32x2_t vw89 = vld1_f32(weights + 8);
40
41 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
42
43 const float* i0 = zero;
44 const float* i1 = input;
45 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
46
47 float* o0 = output;
48
49 size_t output_height = input_height;
50 do {
51 if XNN_UNPREDICTABLE(output_height < 2) {
52 i2 = zero;
53 }
54
55 float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
56 float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
57 float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
58
59 float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
60 float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
61 float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
62
63 size_t w = input_width;
64 for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
65 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
66
67 const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
68 const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
69 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
70
71 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
72
73 float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
74
75 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
76
77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
78 const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
79 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
80
81 vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
82
83 vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
84
85 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
86
87 vi0x0123 = vi0x4567;
88 vi1x0123 = vi1x4567;
89 vi2x0123 = vi2x4567;
90
91 const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
92 const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
93 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
94
95 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
96
97 vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
98
99 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
100
101 vi0x4567 = vi0x89AB;
102 vi1x4567 = vi1x89AB;
103 vi2x4567 = vi2x89AB;
104
105 vo0p0 = vaddq_f32(vo0p0, vo0p1);
106
107 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
108
109 vo0 = vminq_f32(vo0, vmax);
110
111 vst1q_f32(o0, vo0); o0 += 4;
112 }
113 // Always process the last block of 1..4 pixels.
114 assert(w >= 1 * sizeof(float));
115 assert(w <= 4 * sizeof(float));
116 {
117 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
118
119 vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
120 vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
121 vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
122
123 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
124
125 float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
126
127 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
128
129 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
130 const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
131 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
132
133 vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
134
135 vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
136
137 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
138
139 const float32x4_t vzero = vmovq_n_f32(0.0f);
140 const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
141 const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
142 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
143
144 vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
145
146 vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
147
148 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
149
150 vo0p0 = vaddq_f32(vo0p0, vo0p1);
151
152 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
153
154 vo0 = vminq_f32(vo0, vmax);
155
156 if XNN_LIKELY(w == 4 * sizeof(float)) {
157 vst1q_f32(o0, vo0); o0 += 4;
158 } else {
159 float32x2_t vo0_lo = vget_low_f32(vo0);
160 if (w & (2 * sizeof(float))) {
161 vst1_f32(o0, vo0_lo); o0 += 2;
162
163 vo0_lo = vget_high_f32(vo0);
164 }
165 if (w & (1 * sizeof(float))) {
166 vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
167 }
168 }
169 }
170
171 i0 = (const float*) ((uintptr_t) i1 - input_decrement);
172 i1 = (const float*) ((uintptr_t) i2 - input_decrement);
173 i2 = (const float*) ((uintptr_t) i1 + input_width);
174
175
176 } while (--output_height != 0);
177 }
178