• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_ukernel_up8x9__neonfma(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_dwconv_ukernel_up8x9__neonfma(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     const union xnn_f32_output_params params[restrict static 1])
26 {
27   assert(channels != 0);
28   assert(output_width != 0);
29 
30   const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
31   const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
32   do {
33     const float* i0 = input[0];
34     assert(i0 != NULL);
35     const float* i1 = input[1];
36     assert(i1 != NULL);
37     const float* i2 = input[2];
38     assert(i2 != NULL);
39     const float* i3 = input[3];
40     assert(i3 != NULL);
41     const float* i4 = input[4];
42     assert(i4 != NULL);
43     const float* i5 = input[5];
44     assert(i5 != NULL);
45     const float* i6 = input[6];
46     assert(i6 != NULL);
47     const float* i7 = input[7];
48     assert(i7 != NULL);
49     const float* i8 = input[8];
50     assert(i8 != NULL);
51     input = (const float**) ((uintptr_t) input + input_stride);
52 
53     size_t c = channels;
54     const float* w = weights;
55     for (; c >= 8; c -= 8) {
56       float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
57       float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
58 
59 
60       const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
61       const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
62       const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
63       const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
64       vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
65       vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
66 
67       const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
68       const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
69       const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
70       const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
71       vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
72       vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
73 
74       const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
75       const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
76       const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
77       const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
78       vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
79       vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
80 
81       const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
82       const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
83       const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
84       const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
85       vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
86       vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
87 
88       const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
89       const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
90       const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
91       const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
92       vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
93       vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
94 
95       const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
96       const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
97       const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
98       const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
99       vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
100       vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
101 
102       const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
103       const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
104       const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
105       const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
106       vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
107       vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
108 
109       const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
110       const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
111       const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
112       const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
113       vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
114       vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
115 
116       const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
117       const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
118       const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
119       const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
120       vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
121       vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
122 
123 
124       float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
125       float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
126       vacc0123 = vminq_f32(vacc0123, vmax);
127       vacc4567 = vminq_f32(vacc4567, vmax);
128 
129       vst1q_f32(output, vacc0123); output += 4;
130       vst1q_f32(output, vacc4567); output += 4;
131     }
132     for (; c >= 4; c -= 4) {
133       float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
134 
135 
136       const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
137       const float32x4_t vk0x0123 = vld1q_f32(w + 4);
138       vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
139 
140       const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
141       const float32x4_t vk1x0123 = vld1q_f32(w + 12);
142       vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
143 
144       const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
145       const float32x4_t vk2x0123 = vld1q_f32(w + 20);
146       vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
147 
148       const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
149       const float32x4_t vk3x0123 = vld1q_f32(w + 28);
150       vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
151 
152       const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
153       const float32x4_t vk4x0123 = vld1q_f32(w + 36);
154       vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
155 
156       const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
157       const float32x4_t vk5x0123 = vld1q_f32(w + 44);
158       vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
159 
160       const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
161       const float32x4_t vk6x0123 = vld1q_f32(w + 52);
162       vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
163 
164       const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
165       const float32x4_t vk7x0123 = vld1q_f32(w + 60);
166       vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
167 
168       const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
169       const float32x4_t vk8x0123 = vld1q_f32(w + 68);
170       vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
171 
172 
173       float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
174       vacc0123 = vminq_f32(vacc0123, vmax);
175 
176       vst1q_f32(output, vacc0123); output += 4;
177     }
178     if XNN_UNLIKELY(c != 0) {
179       float32x4_t vacc0123p0 = vld1q_f32(w);
180 
181 
182       const float32x4_t vi0x0123 = vld1q_f32(i0);
183       const float32x4_t vk0x0123 = vld1q_f32(w + 8);
184       vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
185 
186       const float32x4_t vi1x0123 = vld1q_f32(i1);
187       const float32x4_t vk1x0123 = vld1q_f32(w + 16);
188       vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
189 
190       const float32x4_t vi2x0123 = vld1q_f32(i2);
191       const float32x4_t vk2x0123 = vld1q_f32(w + 24);
192       vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
193 
194       const float32x4_t vi3x0123 = vld1q_f32(i3);
195       const float32x4_t vk3x0123 = vld1q_f32(w + 32);
196       vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
197 
198       const float32x4_t vi4x0123 = vld1q_f32(i4);
199       const float32x4_t vk4x0123 = vld1q_f32(w + 40);
200       vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
201 
202       const float32x4_t vi5x0123 = vld1q_f32(i5);
203       const float32x4_t vk5x0123 = vld1q_f32(w + 48);
204       vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
205 
206       const float32x4_t vi6x0123 = vld1q_f32(i6);
207       const float32x4_t vk6x0123 = vld1q_f32(w + 56);
208       vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
209 
210       const float32x4_t vi7x0123 = vld1q_f32(i7);
211       const float32x4_t vk7x0123 = vld1q_f32(w + 64);
212       vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
213 
214       const float32x4_t vi8x0123 = vld1q_f32(i8);
215       const float32x4_t vk8x0123 = vld1q_f32(w + 72);
216       vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
217 
218 
219       float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
220       vacc0123 = vminq_f32(vacc0123, vmax);
221 
222       float32x2_t vacc01 = vget_low_f32(vacc0123);
223       if (c & 2) {
224         vst1_f32(output, vacc01); output += 2;
225         vacc01 = vget_high_f32(vacc0123);
226       }
227       if (c & 1) {
228         vst1_lane_f32(output, vacc01, 0); output += 1;
229       }
230     }
231 
232     output = (float*) ((uintptr_t) output + output_increment);
233   } while (--output_width != 0);
234 }
235