1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_32x1__neon(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_32x1__neon(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 32 * sizeof(float);
35 while XNN_LIKELY(mc >= 32 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
43 float32x4_t vacc4567 = vacc0123;
44 float32x4_t vacc89AB = vacc0123;
45 float32x4_t vaccCDEF = vacc0123;
46 float32x4_t vaccGHIJ = vacc0123;
47 float32x4_t vaccKLMN = vacc0123;
48 float32x4_t vaccOPQR = vacc0123;
49 float32x4_t vaccSTUV = vacc0123;
50 if XNN_LIKELY(nnz != 0) {
51 do {
52 const intptr_t diff = *dmap++;
53 const float32x4_t vi0123 = vld1q_f32(input);
54 const float32x4_t vi4567 = vld1q_f32(input + 4);
55 const float32x4_t vi89AB = vld1q_f32(input + 8);
56 const float32x4_t viCDEF = vld1q_f32(input + 12);
57 const float32x4_t viGHIJ = vld1q_f32(input + 16);
58 const float32x4_t viKLMN = vld1q_f32(input + 20);
59 const float32x4_t viOPQR = vld1q_f32(input + 24);
60 const float32x4_t viSTUV = vld1q_f32(input + 28);
61 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
62 __builtin_prefetch(input + 16);
63 __builtin_prefetch(input + 32);
64 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
65 __builtin_prefetch(w + 32);
66 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
67 vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
68 vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
69 vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
70 vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
71 vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
72 vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
73 vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
74 } while (--nnz != 0);
75 }
76 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
77 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
78 float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
79 float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
80 float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
81 float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
82 float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
83 float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
84 vout0123 = vmaxq_f32(vout0123, vmin);
85 vout4567 = vmaxq_f32(vout4567, vmin);
86 vout89AB = vmaxq_f32(vout89AB, vmin);
87 voutCDEF = vmaxq_f32(voutCDEF, vmin);
88 voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
89 voutKLMN = vmaxq_f32(voutKLMN, vmin);
90 voutOPQR = vmaxq_f32(voutOPQR, vmin);
91 voutSTUV = vmaxq_f32(voutSTUV, vmin);
92 vst1q_f32(output, vout0123);
93 vst1q_f32(output + 4, vout4567);
94 vst1q_f32(output + 8, vout89AB);
95 vst1q_f32(output + 12, voutCDEF);
96 vst1q_f32(output + 16, voutGHIJ);
97 vst1q_f32(output + 20, voutKLMN);
98 vst1q_f32(output + 24, voutOPQR);
99 vst1q_f32(output + 28, voutSTUV);
100 output = (float*restrict) ((uintptr_t) output + output_stride);
101 } while (--n != 0);
102 output = (float*restrict) ((uintptr_t) output - output_decrement);
103 input += 32;
104 mc -= 32 * sizeof(float);
105 }
106 if XNN_UNLIKELY(mc != 0) {
107 output_decrement += 16 * sizeof(float);
108 if (mc & (16 * sizeof(float))) {
109 const float*restrict w = weights;
110 const int32_t* dmap = widx_dmap;
111 const uint32_t* nnzmap = nidx_nnzmap;
112 size_t n = nc;
113 do {
114 uint32_t nnz = *nnzmap++;
115 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
116 float32x4_t vacc4567 = vacc0123;
117 float32x4_t vacc89AB = vacc0123;
118 float32x4_t vaccCDEF = vacc0123;
119 if XNN_LIKELY(nnz != 0) {
120 do {
121 const intptr_t diff = *dmap++;
122 const float32x4_t vi0123 = vld1q_f32(input);
123 const float32x4_t vi4567 = vld1q_f32(input + 4);
124 const float32x4_t vi89AB = vld1q_f32(input + 8);
125 const float32x4_t viCDEF = vld1q_f32(input + 12);
126 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
127 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
128 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
129 vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
130 vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
131 vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
132 } while (--nnz != 0);
133 }
134 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
135 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
136 float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
137 float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
138 vout0123 = vmaxq_f32(vout0123, vmin);
139 vout4567 = vmaxq_f32(vout4567, vmin);
140 vout89AB = vmaxq_f32(vout89AB, vmin);
141 voutCDEF = vmaxq_f32(voutCDEF, vmin);
142 vst1q_f32(output, vout0123);
143 vst1q_f32(output + 4, vout4567);
144 vst1q_f32(output + 8, vout89AB);
145 vst1q_f32(output + 12, voutCDEF);
146 output = (float*restrict) ((uintptr_t) output + output_stride);
147 } while (--n != 0);
148 output = (float*restrict) ((uintptr_t) output - output_decrement);
149 input += 16;
150 }
151 output_decrement += 8 * sizeof(float);
152 if (mc & (8 * sizeof(float))) {
153 const float*restrict w = weights;
154 const int32_t* dmap = widx_dmap;
155 const uint32_t* nnzmap = nidx_nnzmap;
156 size_t n = nc;
157 do {
158 uint32_t nnz = *nnzmap++;
159 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
160 float32x4_t vacc4567 = vacc0123;
161 if XNN_LIKELY(nnz != 0) {
162 do {
163 const intptr_t diff = *dmap++;
164 const float32x4_t vi0123 = vld1q_f32(input);
165 const float32x4_t vi4567 = vld1q_f32(input + 4);
166 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
167 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
168 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
169 vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
170 } while (--nnz != 0);
171 }
172 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
173 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
174 vout0123 = vmaxq_f32(vout0123, vmin);
175 vout4567 = vmaxq_f32(vout4567, vmin);
176 vst1q_f32(output, vout0123);
177 vst1q_f32(output + 4, vout4567);
178 output = (float*restrict) ((uintptr_t) output + output_stride);
179 } while (--n != 0);
180 output = (float*restrict) ((uintptr_t) output - output_decrement);
181 input += 8;
182 }
183 output_decrement += 4 * sizeof(float);
184 if (mc & (4 * sizeof(float))) {
185 const float*restrict w = weights;
186 const int32_t* dmap = widx_dmap;
187 const uint32_t* nnzmap = nidx_nnzmap;
188 size_t n = nc;
189 do {
190 uint32_t nnz = *nnzmap++;
191 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
192 if XNN_LIKELY(nnz != 0) {
193 do {
194 const intptr_t diff = *dmap++;
195 const float32x4_t vi0123 = vld1q_f32(input);
196 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
197 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
198 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
199 } while (--nnz != 0);
200 }
201 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
202 vout0123 = vmaxq_f32(vout0123, vmin);
203 vst1q_f32(output, vout0123);
204 output = (float*restrict) ((uintptr_t) output + output_stride);
205 } while (--n != 0);
206 output = (float*restrict) ((uintptr_t) output - output_decrement);
207 input += 4;
208 }
209 output_decrement += 2 * sizeof(float);
210 if (mc & (2 * sizeof(float))) {
211 const float*restrict w = weights;
212 const int32_t* dmap = widx_dmap;
213 const uint32_t* nnzmap = nidx_nnzmap;
214 size_t n = nc;
215 do {
216 uint32_t nnz = *nnzmap++;
217 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
218 if XNN_LIKELY(nnz != 0) {
219 do {
220 const intptr_t diff = *dmap++;
221 const float32x2_t vi01 = vld1_f32(input);
222 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
223 const float32x2_t vw = vld1_dup_f32(w); w += 1;
224 vacc01 = vmla_f32(vacc01, vi01, vw);
225 } while (--nnz != 0);
226 }
227 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
228 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
229 vst1_f32(output, vout01);
230 output = (float*restrict) ((uintptr_t) output + output_stride);
231 } while (--n != 0);
232 output = (float*restrict) ((uintptr_t) output - output_decrement);
233 input += 2;
234 }
235 output_decrement += 1 * sizeof(float);
236 if (mc & (1 * sizeof(float))) {
237 const float*restrict w = weights;
238 const int32_t* dmap = widx_dmap;
239 const uint32_t* nnzmap = nidx_nnzmap;
240 size_t n = nc;
241 do {
242 uint32_t nnz = *nnzmap++;
243 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
244 if XNN_LIKELY(nnz != 0) {
245 do {
246 const intptr_t diff = *dmap++;
247 const float32x2_t vi0 = vld1_dup_f32(input);
248 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
249 const float32x2_t vw = vld1_dup_f32(w); w += 1;
250 vacc0 = vmla_f32(vacc0, vi0, vw);
251 } while (--nnz != 0);
252 }
253 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
254 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
255 vst1_lane_f32(output, vout0, 0);
256 output = (float*restrict) ((uintptr_t) output + output_stride);
257 } while (--n != 0);
258 output = (float*restrict) ((uintptr_t) output - output_decrement);
259 input += 1;
260 }
261 }
262 }
263