1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd-pipelined.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined_x2(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
33 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 32 * sizeof(float);
35 while XNN_LIKELY(mc >= 32 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 v128_t vw = wasm_v32x4_load_splat(w); w += 1;
40 intptr_t diff = *dmap++;
41 v128_t vi0123 = wasm_v128_load(input + 0);
42 v128_t vi4567 = wasm_v128_load(input + 4);
43 v128_t vi89AB = wasm_v128_load(input + 8);
44 v128_t viCDEF = wasm_v128_load(input + 12);
45 v128_t viGHIJ = wasm_v128_load(input + 16);
46 v128_t viKLMN = wasm_v128_load(input + 20);
47 v128_t viOPQR = wasm_v128_load(input + 24);
48 v128_t viSTUV = wasm_v128_load(input + 28);
49 size_t n = nc;
50 do {
51 uint32_t nnz = *nnzmap++;
52 v128_t vacc0123 = vw;
53 v128_t vacc4567 = vw;
54 v128_t vacc89AB = vw;
55 v128_t vaccCDEF = vw;
56 v128_t vaccGHIJ = vw;
57 v128_t vaccKLMN = vw;
58 v128_t vaccOPQR = vw;
59 v128_t vaccSTUV = vw;
60 vw = wasm_v32x4_load_splat(w); w += 1;
61
62 for (; nnz >= 2; nnz -= 2) {
63 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
64 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
65 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
66 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
67 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
68 vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
69 vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
70 vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
71 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
72 diff = *dmap++;
73 vw = wasm_v32x4_load_splat(w); w += 1;
74 vi0123 = wasm_v128_load(input + 0);
75 vi4567 = wasm_v128_load(input + 4);
76 vi89AB = wasm_v128_load(input + 8);
77 viCDEF = wasm_v128_load(input + 12);
78 viGHIJ = wasm_v128_load(input + 16);
79 viKLMN = wasm_v128_load(input + 20);
80 viOPQR = wasm_v128_load(input + 24);
81 viSTUV = wasm_v128_load(input + 28);
82 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
83 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
84 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
85 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
86 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
87 vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
88 vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
89 vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
90 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
91 diff = *dmap++;
92 vw = wasm_v32x4_load_splat(w); w += 1;
93 vi0123 = wasm_v128_load(input + 0);
94 vi4567 = wasm_v128_load(input + 4);
95 vi89AB = wasm_v128_load(input + 8);
96 viCDEF = wasm_v128_load(input + 12);
97 viGHIJ = wasm_v128_load(input + 16);
98 viKLMN = wasm_v128_load(input + 20);
99 viOPQR = wasm_v128_load(input + 24);
100 viSTUV = wasm_v128_load(input + 28);
101 }
102
103 if XNN_LIKELY(nnz != 0) {
104 do {
105 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
106 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
107 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
108 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
109 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
110 vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
111 vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
112 vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
113 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
114
115 diff = *dmap++;
116 vw = wasm_v32x4_load_splat(w); w += 1;
117 vi0123 = wasm_v128_load(input + 0);
118 vi4567 = wasm_v128_load(input + 4);
119 vi89AB = wasm_v128_load(input + 8);
120 viCDEF = wasm_v128_load(input + 12);
121 viGHIJ = wasm_v128_load(input + 16);
122 viKLMN = wasm_v128_load(input + 20);
123 viOPQR = wasm_v128_load(input + 24);
124 viSTUV = wasm_v128_load(input + 28);
125 } while (--nnz != 0);
126 }
127 v128_t vout0123 = wasm_f32x4_min(vacc0123, vmax);
128 v128_t vout4567 = wasm_f32x4_min(vacc4567, vmax);
129 v128_t vout89AB = wasm_f32x4_min(vacc89AB, vmax);
130 v128_t voutCDEF = wasm_f32x4_min(vaccCDEF, vmax);
131 v128_t voutGHIJ = wasm_f32x4_min(vaccGHIJ, vmax);
132 v128_t voutKLMN = wasm_f32x4_min(vaccKLMN, vmax);
133 v128_t voutOPQR = wasm_f32x4_min(vaccOPQR, vmax);
134 v128_t voutSTUV = wasm_f32x4_min(vaccSTUV, vmax);
135 vout0123 = wasm_f32x4_max(vout0123, vmin);
136 vout4567 = wasm_f32x4_max(vout4567, vmin);
137 vout89AB = wasm_f32x4_max(vout89AB, vmin);
138 voutCDEF = wasm_f32x4_max(voutCDEF, vmin);
139 voutGHIJ = wasm_f32x4_max(voutGHIJ, vmin);
140 voutKLMN = wasm_f32x4_max(voutKLMN, vmin);
141 voutOPQR = wasm_f32x4_max(voutOPQR, vmin);
142 voutSTUV = wasm_f32x4_max(voutSTUV, vmin);
143 wasm_v128_store(output, vout0123);
144 wasm_v128_store(output + 4, vout4567);
145 wasm_v128_store(output + 8, vout89AB);
146 wasm_v128_store(output + 12, voutCDEF);
147 wasm_v128_store(output + 16, voutGHIJ);
148 wasm_v128_store(output + 20, voutKLMN);
149 wasm_v128_store(output + 24, voutOPQR);
150 wasm_v128_store(output + 28, voutSTUV);
151 output = (float*restrict) ((uintptr_t) output + output_stride);
152 } while (--n != 0);
153 output = (float*restrict) ((uintptr_t) output - output_decrement);
154 input += 32;
155 mc -= 32 * sizeof(float);
156 }
157 if XNN_UNLIKELY(mc != 0) {
158 output_decrement += 16 * sizeof(float);
159 if (mc & (16 * sizeof(float))) {
160 const float*restrict w = weights;
161 const int32_t* dmap = widx_dmap;
162 const uint32_t* nnzmap = nidx_nnzmap;
163 size_t n = nc;
164 do {
165 uint32_t nnz = *nnzmap++;
166 v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
167 v128_t vacc4567 = vacc0123;
168 v128_t vacc89AB = vacc0123;
169 v128_t vaccCDEF = vacc0123;
170 if XNN_LIKELY(nnz != 0) {
171 do {
172 const intptr_t diff = *dmap++;
173 const v128_t vi0123 = wasm_v128_load(input);
174 const v128_t vi4567 = wasm_v128_load(input + 4);
175 const v128_t vi89AB = wasm_v128_load(input + 8);
176 const v128_t viCDEF = wasm_v128_load(input + 12);
177 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
178 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
179 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
180 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
181 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
182 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
183 } while (--nnz != 0);
184 }
185 v128_t vout0123 = wasm_f32x4_min(vacc0123, vmax);
186 v128_t vout4567 = wasm_f32x4_min(vacc4567, vmax);
187 v128_t vout89AB = wasm_f32x4_min(vacc89AB, vmax);
188 v128_t voutCDEF = wasm_f32x4_min(vaccCDEF, vmax);
189 vout0123 = wasm_f32x4_max(vout0123, vmin);
190 vout4567 = wasm_f32x4_max(vout4567, vmin);
191 vout89AB = wasm_f32x4_max(vout89AB, vmin);
192 voutCDEF = wasm_f32x4_max(voutCDEF, vmin);
193 wasm_v128_store(output, vout0123);
194
195 wasm_v128_store(output + 4, vout4567);
196 wasm_v128_store(output + 8, vout89AB);
197 wasm_v128_store(output + 12, voutCDEF);
198 output = (float*restrict) ((uintptr_t) output + output_stride);
199 } while (--n != 0);
200 output = (float*restrict) ((uintptr_t) output - output_decrement);
201 input += 16;
202 }
203 output_decrement += 8 * sizeof(float);
204 if (mc & (8 * sizeof(float))) {
205 const float*restrict w = weights;
206 const int32_t* dmap = widx_dmap;
207 const uint32_t* nnzmap = nidx_nnzmap;
208 size_t n = nc;
209 do {
210 uint32_t nnz = *nnzmap++;
211 v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
212 v128_t vacc4567 = vacc0123;
213 if XNN_LIKELY(nnz != 0) {
214 do {
215 const intptr_t diff = *dmap++;
216 const v128_t vi0123 = wasm_v128_load(input);
217 const v128_t vi4567 = wasm_v128_load(input + 4);
218 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
219 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
220 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
221 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
222 } while (--nnz != 0);
223 }
224 v128_t vout0123 = wasm_f32x4_min(vacc0123, vmax);
225 v128_t vout4567 = wasm_f32x4_min(vacc4567, vmax);
226 vout0123 = wasm_f32x4_max(vout0123, vmin);
227 vout4567 = wasm_f32x4_max(vout4567, vmin);
228 wasm_v128_store(output, vout0123);
229
230 wasm_v128_store(output + 4, vout4567);
231 output = (float*restrict) ((uintptr_t) output + output_stride);
232 } while (--n != 0);
233 output = (float*restrict) ((uintptr_t) output - output_decrement);
234 input += 8;
235 }
236 output_decrement += 4 * sizeof(float);
237 if (mc & (4 * sizeof(float))) {
238 const float*restrict w = weights;
239 const int32_t* dmap = widx_dmap;
240 const uint32_t* nnzmap = nidx_nnzmap;
241 size_t n = nc;
242 do {
243 uint32_t nnz = *nnzmap++;
244 v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
245 if XNN_LIKELY(nnz != 0) {
246 do {
247 const intptr_t diff = *dmap++;
248 const v128_t vi0123 = wasm_v128_load(input);
249 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
250 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
251 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
252 } while (--nnz != 0);
253 }
254 v128_t vout0123 = wasm_f32x4_min(vacc0123, vmax);
255 vout0123 = wasm_f32x4_max(vout0123, vmin);
256 wasm_v128_store(output, vout0123);
257
258 output = (float*restrict) ((uintptr_t) output + output_stride);
259 } while (--n != 0);
260 output = (float*restrict) ((uintptr_t) output - output_decrement);
261 input += 4;
262 }
263 output_decrement += 2 * sizeof(float);
264 if (mc & (2 * sizeof(float))) {
265 const float*restrict w = weights;
266 const int32_t* dmap = widx_dmap;
267 const uint32_t* nnzmap = nidx_nnzmap;
268 size_t n = nc;
269 do {
270 uint32_t nnz = *nnzmap++;
271 v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
272 if XNN_LIKELY(nnz != 0) {
273 do {
274 const intptr_t diff = *dmap++;
275 const v128_t vi01 = wasm_v64x2_load_splat(input);
276 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
277 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
278 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
279 } while (--nnz != 0);
280 }
281 v128_t vout01 = wasm_f32x4_min(vacc01, vmax);
282 vout01 = wasm_f32x4_max(vout01, vmin);
283 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
284
285 output = (float*restrict) ((uintptr_t) output + output_stride);
286 } while (--n != 0);
287 output = (float*restrict) ((uintptr_t) output - output_decrement);
288 input += 2;
289 }
290 output_decrement += 1 * sizeof(float);
291 if (mc & (1 * sizeof(float))) {
292 const float*restrict w = weights;
293 const int32_t* dmap = widx_dmap;
294 const uint32_t* nnzmap = nidx_nnzmap;
295 size_t n = nc;
296 do {
297 uint32_t nnz = *nnzmap++;
298 v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
299 if XNN_LIKELY(nnz != 0) {
300 do {
301 const intptr_t diff = *dmap++;
302 const v128_t vi0 = wasm_v32x4_load_splat(input);
303 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
304 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
305 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
306 } while (--nnz != 0);
307 }
308 v128_t vout0 = wasm_f32x4_min(vacc0, vmax);
309 vout0 = wasm_f32x4_max(vout0, vmin);
310 *output = wasm_f32x4_extract_lane(vout0, 0);
311
312 output = (float*restrict) ((uintptr_t) output + output_stride);
313 } while (--n != 0);
314 output = (float*restrict) ((uintptr_t) output - output_decrement);
315 input += 1;
316 }
317 }
318 }
319