1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 16 * sizeof(float);
35 while XNN_LIKELY(mc >= 16 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
43 float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
44 float32x4_t vacc4567x0 = vacc0123x0;
45 float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
46 float32x4_t vacc89ABx0 = vacc0123x0;
47 float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
48 float32x4_t vaccCDEFx0 = vacc0123x0;
49 float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
50 for (; nnz >= 2; nnz -= 2) {
51 const intptr_t diff0 = dmap[0];
52 const intptr_t diff1 = dmap[1];
53 dmap += 2;
54 const float32x4_t vi0123x0 = vld1q_f32(input);
55 const float32x4_t vi4567x0 = vld1q_f32(input + 4);
56 const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
57 const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
58 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
59 __builtin_prefetch(input + 16);
60 const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
61 __builtin_prefetch(w + 32);
62 vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
63 vacc4567x0 = vfmaq_f32(vacc4567x0, vi4567x0, vw0);
64 vacc89ABx0 = vfmaq_f32(vacc89ABx0, vi89ABx0, vw0);
65 vaccCDEFx0 = vfmaq_f32(vaccCDEFx0, viCDEFx0, vw0);
66 const float32x4_t vi0123x1 = vld1q_f32(input);
67 const float32x4_t vi4567x1 = vld1q_f32(input + 4);
68 const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
69 const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
70 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
71 __builtin_prefetch(input + 16);
72 const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
73 __builtin_prefetch(w + 32);
74 vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
75 vacc4567x1 = vfmaq_f32(vacc4567x1, vi4567x1, vw1);
76 vacc89ABx1 = vfmaq_f32(vacc89ABx1, vi89ABx1, vw1);
77 vaccCDEFx1 = vfmaq_f32(vaccCDEFx1, viCDEFx1, vw1);
78 }
79 float32x4_t vacc0123 = vacc0123x0;
80 float32x4_t vacc4567 = vacc4567x0;
81 float32x4_t vacc89AB = vacc89ABx0;
82 float32x4_t vaccCDEF = vaccCDEFx0;
83 vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
84 vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
85 vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
86 vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
87 if XNN_LIKELY(nnz != 0) {
88 do {
89 const intptr_t diff = *dmap++;
90 const float32x4_t vi0123 = vld1q_f32(input);
91 const float32x4_t vi4567 = vld1q_f32(input + 4);
92 const float32x4_t vi89AB = vld1q_f32(input + 8);
93 const float32x4_t viCDEF = vld1q_f32(input + 12);
94 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
95 __builtin_prefetch(input + 16);
96 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
97 __builtin_prefetch(w + 32);
98 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
99 vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
100 vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
101 vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
102 } while (--nnz != 0);
103 }
104 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
105 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
106 float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
107 float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
108 vout0123 = vmaxq_f32(vout0123, vmin);
109 vout4567 = vmaxq_f32(vout4567, vmin);
110 vout89AB = vmaxq_f32(vout89AB, vmin);
111 voutCDEF = vmaxq_f32(voutCDEF, vmin);
112 vst1q_f32(output, vout0123);
113 vst1q_f32(output + 4, vout4567);
114 vst1q_f32(output + 8, vout89AB);
115 vst1q_f32(output + 12, voutCDEF);
116 output = (float*restrict) ((uintptr_t) output + output_stride);
117 } while (--n != 0);
118 output = (float*restrict) ((uintptr_t) output - output_decrement);
119 input += 16;
120 mc -= 16 * sizeof(float);
121 }
122 if XNN_UNLIKELY(mc != 0) {
123 output_decrement += 8 * sizeof(float);
124 if (mc & (8 * sizeof(float))) {
125 const float*restrict w = weights;
126 const int32_t* dmap = widx_dmap;
127 const uint32_t* nnzmap = nidx_nnzmap;
128 size_t n = nc;
129 do {
130 uint32_t nnz = *nnzmap++;
131 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
132 float32x4_t vacc4567 = vacc0123;
133 if XNN_LIKELY(nnz != 0) {
134 do {
135 const intptr_t diff = *dmap++;
136 const float32x4_t vi0123 = vld1q_f32(input);
137 const float32x4_t vi4567 = vld1q_f32(input + 4);
138 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
139 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
140 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
141 vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
142 } while (--nnz != 0);
143 }
144 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
145 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
146 vout0123 = vmaxq_f32(vout0123, vmin);
147 vout4567 = vmaxq_f32(vout4567, vmin);
148 vst1q_f32(output, vout0123);
149 vst1q_f32(output + 4, vout4567);
150 output = (float*restrict) ((uintptr_t) output + output_stride);
151 } while (--n != 0);
152 output = (float*restrict) ((uintptr_t) output - output_decrement);
153 input += 8;
154 }
155 output_decrement += 4 * sizeof(float);
156 if (mc & (4 * sizeof(float))) {
157 const float*restrict w = weights;
158 const int32_t* dmap = widx_dmap;
159 const uint32_t* nnzmap = nidx_nnzmap;
160 size_t n = nc;
161 do {
162 uint32_t nnz = *nnzmap++;
163 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
164 if XNN_LIKELY(nnz != 0) {
165 do {
166 const intptr_t diff = *dmap++;
167 const float32x4_t vi0123 = vld1q_f32(input);
168 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
169 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
170 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
171 } while (--nnz != 0);
172 }
173 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
174 vout0123 = vmaxq_f32(vout0123, vmin);
175 vst1q_f32(output, vout0123);
176 output = (float*restrict) ((uintptr_t) output + output_stride);
177 } while (--n != 0);
178 output = (float*restrict) ((uintptr_t) output - output_decrement);
179 input += 4;
180 }
181 output_decrement += 2 * sizeof(float);
182 if (mc & (2 * sizeof(float))) {
183 const float*restrict w = weights;
184 const int32_t* dmap = widx_dmap;
185 const uint32_t* nnzmap = nidx_nnzmap;
186 size_t n = nc;
187 do {
188 uint32_t nnz = *nnzmap++;
189 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
190 if XNN_LIKELY(nnz != 0) {
191 do {
192 const intptr_t diff = *dmap++;
193 const float32x2_t vi01 = vld1_f32(input);
194 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
195 const float32x2_t vw = vld1_dup_f32(w); w += 1;
196 vacc01 = vfma_f32(vacc01, vi01, vw);
197 } while (--nnz != 0);
198 }
199 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
200 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
201 vst1_f32(output, vout01);
202 output = (float*restrict) ((uintptr_t) output + output_stride);
203 } while (--n != 0);
204 output = (float*restrict) ((uintptr_t) output - output_decrement);
205 input += 2;
206 }
207 output_decrement += 1 * sizeof(float);
208 if (mc & (1 * sizeof(float))) {
209 const float*restrict w = weights;
210 const int32_t* dmap = widx_dmap;
211 const uint32_t* nnzmap = nidx_nnzmap;
212 size_t n = nc;
213 do {
214 uint32_t nnz = *nnzmap++;
215 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
216 if XNN_LIKELY(nnz != 0) {
217 do {
218 const intptr_t diff = *dmap++;
219 const float32x2_t vi0 = vld1_dup_f32(input);
220 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
221 const float32x2_t vw = vld1_dup_f32(w); w += 1;
222 vacc0 = vfma_f32(vacc0, vi0, vw);
223 } while (--nnz != 0);
224 }
225 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
226 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
227 vst1_lane_f32(output, vout0, 0);
228 output = (float*restrict) ((uintptr_t) output + output_stride);
229 } while (--n != 0);
230 output = (float*restrict) ((uintptr_t) output - output_decrement);
231 input += 1;
232 }
233 }
234 }
235