• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
33   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
34   const v128_t vzero = wasm_f64x2_splat(0.0);
35   size_t output_decrement = output_stride * nc - 32 * sizeof(float);
36   while XNN_LIKELY(mc >= 32 * sizeof(float)) {
37     const float*restrict w = weights;
38     const int32_t* dmap = widx_dmap;
39     const uint32_t* nnzmap = nidx_nnzmap;
40     size_t n = nc;
41     do {
42       uint32_t nnz = *nnzmap++;
43       v128_t vacc0123x0 = wasm_v32x4_load_splat(w);
44       w += 1;
45       v128_t vacc0123x1 = vzero;
46       v128_t vacc0123x2 = vzero;
47       v128_t vacc0123x3 = vzero;
48       v128_t vacc4567x0 = vacc0123x0;
49       v128_t vacc4567x1 = vzero;
50       v128_t vacc4567x2 = vzero;
51       v128_t vacc4567x3 = vzero;
52       v128_t vacc89ABx0 = vacc0123x0;
53       v128_t vacc89ABx1 = vzero;
54       v128_t vacc89ABx2 = vzero;
55       v128_t vacc89ABx3 = vzero;
56       v128_t vaccCDEFx0 = vacc0123x0;
57       v128_t vaccCDEFx1 = vzero;
58       v128_t vaccCDEFx2 = vzero;
59       v128_t vaccCDEFx3 = vzero;
60       v128_t vaccGHIJx0 = vacc0123x0;
61       v128_t vaccGHIJx1 = vzero;
62       v128_t vaccGHIJx2 = vzero;
63       v128_t vaccGHIJx3 = vzero;
64       v128_t vaccKLMNx0 = vacc0123x0;
65       v128_t vaccKLMNx1 = vzero;
66       v128_t vaccKLMNx2 = vzero;
67       v128_t vaccKLMNx3 = vzero;
68       v128_t vaccOPQRx0 = vacc0123x0;
69       v128_t vaccOPQRx1 = vzero;
70       v128_t vaccOPQRx2 = vzero;
71       v128_t vaccOPQRx3 = vzero;
72       v128_t vaccSTUVx0 = vacc0123x0;
73       v128_t vaccSTUVx1 = vzero;
74       v128_t vaccSTUVx2 = vzero;
75       v128_t vaccSTUVx3 = vzero;
76       for (; nnz >= 4; nnz -= 4) {
77         const intptr_t diff0 = dmap[0];
78         const intptr_t diff1 = dmap[1];
79         const intptr_t diff2 = dmap[2];
80         const intptr_t diff3 = dmap[3];
81         dmap += 4;
82         const v128_t vi0123x0 = wasm_v128_load(input);
83         const v128_t vi4567x0 = wasm_v128_load(input + 4);
84         const v128_t vi89ABx0 = wasm_v128_load(input + 8);
85         const v128_t viCDEFx0 = wasm_v128_load(input + 12);
86         const v128_t viGHIJx0 = wasm_v128_load(input + 16);
87         const v128_t viKLMNx0 = wasm_v128_load(input + 20);
88         const v128_t viOPQRx0 = wasm_v128_load(input + 24);
89         const v128_t viSTUVx0 = wasm_v128_load(input + 28);
90         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
91         const v128_t vw0 = wasm_v32x4_load_splat(w);
92         w += 1;
93         vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
94         vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
95         vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
96         vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
97         vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
98         vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
99         vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
100         vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
101         const v128_t vi0123x1 = wasm_v128_load(input);
102         const v128_t vi4567x1 = wasm_v128_load(input + 4);
103         const v128_t vi89ABx1 = wasm_v128_load(input + 8);
104         const v128_t viCDEFx1 = wasm_v128_load(input + 12);
105         const v128_t viGHIJx1 = wasm_v128_load(input + 16);
106         const v128_t viKLMNx1 = wasm_v128_load(input + 20);
107         const v128_t viOPQRx1 = wasm_v128_load(input + 24);
108         const v128_t viSTUVx1 = wasm_v128_load(input + 28);
109         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
110         const v128_t vw1 = wasm_v32x4_load_splat(w);
111         w += 1;
112         vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
113         vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
114         vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
115         vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
116         vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
117         vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
118         vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
119         vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
120         const v128_t vi0123x2 = wasm_v128_load(input);
121         const v128_t vi4567x2 = wasm_v128_load(input + 4);
122         const v128_t vi89ABx2 = wasm_v128_load(input + 8);
123         const v128_t viCDEFx2 = wasm_v128_load(input + 12);
124         const v128_t viGHIJx2 = wasm_v128_load(input + 16);
125         const v128_t viKLMNx2 = wasm_v128_load(input + 20);
126         const v128_t viOPQRx2 = wasm_v128_load(input + 24);
127         const v128_t viSTUVx2 = wasm_v128_load(input + 28);
128         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
129         const v128_t vw2 = wasm_v32x4_load_splat(w);
130         w += 1;
131         vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
132         vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
133         vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
134         vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
135         vaccGHIJx2 = wasm_f32x4_add(vaccGHIJx2, wasm_f32x4_mul(viGHIJx2, vw2));
136         vaccKLMNx2 = wasm_f32x4_add(vaccKLMNx2, wasm_f32x4_mul(viKLMNx2, vw2));
137         vaccOPQRx2 = wasm_f32x4_add(vaccOPQRx2, wasm_f32x4_mul(viOPQRx2, vw2));
138         vaccSTUVx2 = wasm_f32x4_add(vaccSTUVx2, wasm_f32x4_mul(viSTUVx2, vw2));
139         const v128_t vi0123x3 = wasm_v128_load(input);
140         const v128_t vi4567x3 = wasm_v128_load(input + 4);
141         const v128_t vi89ABx3 = wasm_v128_load(input + 8);
142         const v128_t viCDEFx3 = wasm_v128_load(input + 12);
143         const v128_t viGHIJx3 = wasm_v128_load(input + 16);
144         const v128_t viKLMNx3 = wasm_v128_load(input + 20);
145         const v128_t viOPQRx3 = wasm_v128_load(input + 24);
146         const v128_t viSTUVx3 = wasm_v128_load(input + 28);
147         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
148         const v128_t vw3 = wasm_v32x4_load_splat(w);
149         w += 1;
150         vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
151         vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
152         vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
153         vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
154         vaccGHIJx3 = wasm_f32x4_add(vaccGHIJx3, wasm_f32x4_mul(viGHIJx3, vw3));
155         vaccKLMNx3 = wasm_f32x4_add(vaccKLMNx3, wasm_f32x4_mul(viKLMNx3, vw3));
156         vaccOPQRx3 = wasm_f32x4_add(vaccOPQRx3, wasm_f32x4_mul(viOPQRx3, vw3));
157         vaccSTUVx3 = wasm_f32x4_add(vaccSTUVx3, wasm_f32x4_mul(viSTUVx3, vw3));
158       }
159       v128_t vacc0123 = vacc0123x0;
160       v128_t vacc4567 = vacc4567x0;
161       v128_t vacc89AB = vacc89ABx0;
162       v128_t vaccCDEF = vaccCDEFx0;
163       v128_t vaccGHIJ = vaccGHIJx0;
164       v128_t vaccKLMN = vaccKLMNx0;
165       v128_t vaccOPQR = vaccOPQRx0;
166       v128_t vaccSTUV = vaccSTUVx0;
167       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
168       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
169       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
170       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
171       vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
172       vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
173       vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
174       vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
175       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
176       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
177       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
178       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
179       vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx2);
180       vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx2);
181       vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx2);
182       vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx2);
183       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
184       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
185       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
186       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
187       vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx3);
188       vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx3);
189       vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx3);
190       vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx3);
191       if XNN_LIKELY(nnz != 0) {
192         do {
193           const intptr_t diff = *dmap++;
194           const v128_t vi0123 = wasm_v128_load(input);
195           const v128_t vi4567 = wasm_v128_load(input + 4);
196           const v128_t vi89AB = wasm_v128_load(input + 8);
197           const v128_t viCDEF = wasm_v128_load(input + 12);
198           const v128_t viGHIJ = wasm_v128_load(input + 16);
199           const v128_t viKLMN = wasm_v128_load(input + 20);
200           const v128_t viOPQR = wasm_v128_load(input + 24);
201           const v128_t viSTUV = wasm_v128_load(input + 28);
202           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
203           const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
204           vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
205           vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
206           vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
207           vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
208           vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
209           vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
210           vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
211           vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
212         } while (--nnz != 0);
213       }
214       v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
215       v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
216       v128_t vout89AB = wasm_v128_bitselect(vacc89AB, vmax, wasm_f32x4_le(vacc89AB, vmax));
217       v128_t voutCDEF = wasm_v128_bitselect(vaccCDEF, vmax, wasm_f32x4_le(vaccCDEF, vmax));
218       v128_t voutGHIJ = wasm_v128_bitselect(vaccGHIJ, vmax, wasm_f32x4_le(vaccGHIJ, vmax));
219       v128_t voutKLMN = wasm_v128_bitselect(vaccKLMN, vmax, wasm_f32x4_le(vaccKLMN, vmax));
220       v128_t voutOPQR = wasm_v128_bitselect(vaccOPQR, vmax, wasm_f32x4_le(vaccOPQR, vmax));
221       v128_t voutSTUV = wasm_v128_bitselect(vaccSTUV, vmax, wasm_f32x4_le(vaccSTUV, vmax));
222       vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
223       vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
224       vout89AB = wasm_v128_bitselect(vmin, vout89AB, wasm_f32x4_lt(vout89AB, vmin));
225       voutCDEF = wasm_v128_bitselect(vmin, voutCDEF, wasm_f32x4_lt(voutCDEF, vmin));
226       voutGHIJ = wasm_v128_bitselect(vmin, voutGHIJ, wasm_f32x4_lt(voutGHIJ, vmin));
227       voutKLMN = wasm_v128_bitselect(vmin, voutKLMN, wasm_f32x4_lt(voutKLMN, vmin));
228       voutOPQR = wasm_v128_bitselect(vmin, voutOPQR, wasm_f32x4_lt(voutOPQR, vmin));
229       voutSTUV = wasm_v128_bitselect(vmin, voutSTUV, wasm_f32x4_lt(voutSTUV, vmin));
230       wasm_v128_store(output, vout0123);
231       wasm_v128_store(output + 4, vout4567);
232       wasm_v128_store(output + 8, vout89AB);
233       wasm_v128_store(output + 12, voutCDEF);
234       wasm_v128_store(output + 16, voutGHIJ);
235       wasm_v128_store(output + 20, voutKLMN);
236       wasm_v128_store(output + 24, voutOPQR);
237       wasm_v128_store(output + 28, voutSTUV);
238       output = (float*restrict) ((uintptr_t) output + output_stride);
239     } while (--n != 0);
240     output = (float*restrict) ((uintptr_t) output - output_decrement);
241     input += 32;
242     mc -= 32 * sizeof(float);
243   }
244   if XNN_UNLIKELY(mc != 0) {
245     output_decrement += 16 * sizeof(float);
246     if (mc & (16 * sizeof(float))) {
247       const float*restrict w = weights;
248       const int32_t* dmap = widx_dmap;
249       const uint32_t* nnzmap = nidx_nnzmap;
250       size_t n = nc;
251       do {
252         uint32_t nnz = *nnzmap++;
253         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
254         v128_t vacc4567 = vacc0123;
255         v128_t vacc89AB = vacc0123;
256         v128_t vaccCDEF = vacc0123;
257         if XNN_LIKELY(nnz != 0) {
258           do {
259             const intptr_t diff = *dmap++;
260             const v128_t vi0123 = wasm_v128_load(input);
261             const v128_t vi4567 = wasm_v128_load(input + 4);
262             const v128_t vi89AB = wasm_v128_load(input + 8);
263             const v128_t viCDEF = wasm_v128_load(input + 12);
264             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
265             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
266             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
267             vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
268             vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
269             vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
270           } while (--nnz != 0);
271         }
272         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
273         v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
274         v128_t vout89AB = wasm_v128_bitselect(vacc89AB, vmax, wasm_f32x4_le(vacc89AB, vmax));
275         v128_t voutCDEF = wasm_v128_bitselect(vaccCDEF, vmax, wasm_f32x4_le(vaccCDEF, vmax));
276         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
277         vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
278         vout89AB = wasm_v128_bitselect(vmin, vout89AB, wasm_f32x4_lt(vout89AB, vmin));
279         voutCDEF = wasm_v128_bitselect(vmin, voutCDEF, wasm_f32x4_lt(voutCDEF, vmin));
280         wasm_v128_store(output, vout0123);
281 
282         wasm_v128_store(output + 4, vout4567);
283         wasm_v128_store(output + 8, vout89AB);
284         wasm_v128_store(output + 12, voutCDEF);
285         output = (float*restrict) ((uintptr_t) output + output_stride);
286       } while (--n != 0);
287       output = (float*restrict) ((uintptr_t) output - output_decrement);
288       input += 16;
289     }
290     output_decrement += 8 * sizeof(float);
291     if (mc & (8 * sizeof(float))) {
292       const float*restrict w = weights;
293       const int32_t* dmap = widx_dmap;
294       const uint32_t* nnzmap = nidx_nnzmap;
295       size_t n = nc;
296       do {
297         uint32_t nnz = *nnzmap++;
298         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
299         v128_t vacc4567 = vacc0123;
300         if XNN_LIKELY(nnz != 0) {
301           do {
302             const intptr_t diff = *dmap++;
303             const v128_t vi0123 = wasm_v128_load(input);
304             const v128_t vi4567 = wasm_v128_load(input + 4);
305             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
306             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
307             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
308             vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
309           } while (--nnz != 0);
310         }
311         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
312         v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
313         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
314         vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
315         wasm_v128_store(output, vout0123);
316 
317         wasm_v128_store(output + 4, vout4567);
318         output = (float*restrict) ((uintptr_t) output + output_stride);
319       } while (--n != 0);
320       output = (float*restrict) ((uintptr_t) output - output_decrement);
321       input += 8;
322     }
323     output_decrement += 4 * sizeof(float);
324     if (mc & (4 * sizeof(float))) {
325       const float*restrict w = weights;
326       const int32_t* dmap = widx_dmap;
327       const uint32_t* nnzmap = nidx_nnzmap;
328       size_t n = nc;
329       do {
330         uint32_t nnz = *nnzmap++;
331         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
332         if XNN_LIKELY(nnz != 0) {
333           do {
334             const intptr_t diff = *dmap++;
335             const v128_t vi0123 = wasm_v128_load(input);
336             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
337             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
338             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
339           } while (--nnz != 0);
340         }
341         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
342         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
343         wasm_v128_store(output, vout0123);
344 
345         output = (float*restrict) ((uintptr_t) output + output_stride);
346       } while (--n != 0);
347       output = (float*restrict) ((uintptr_t) output - output_decrement);
348       input += 4;
349     }
350     output_decrement += 2 * sizeof(float);
351     if (mc & (2 * sizeof(float))) {
352       const float*restrict w = weights;
353       const int32_t* dmap = widx_dmap;
354       const uint32_t* nnzmap = nidx_nnzmap;
355       size_t n = nc;
356       do {
357         uint32_t nnz = *nnzmap++;
358         v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
359         if XNN_LIKELY(nnz != 0) {
360           do {
361             const intptr_t diff = *dmap++;
362             const v128_t vi01 = wasm_v64x2_load_splat(input);
363             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
364             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
365             vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
366           } while (--nnz != 0);
367         }
368         v128_t vout01 = wasm_v128_bitselect(vacc01, vmax, wasm_f32x4_le(vacc01, vmax));
369         vout01 = wasm_v128_bitselect(vmin, vout01, wasm_f32x4_lt(vout01, vmin));
370         *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
371 
372         output = (float*restrict) ((uintptr_t) output + output_stride);
373       } while (--n != 0);
374       output = (float*restrict) ((uintptr_t) output - output_decrement);
375       input += 2;
376     }
377     output_decrement += 1 * sizeof(float);
378     if (mc & (1 * sizeof(float))) {
379       const float*restrict w = weights;
380       const int32_t* dmap = widx_dmap;
381       const uint32_t* nnzmap = nidx_nnzmap;
382       size_t n = nc;
383       do {
384         uint32_t nnz = *nnzmap++;
385         v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
386         if XNN_LIKELY(nnz != 0) {
387           do {
388             const intptr_t diff = *dmap++;
389             const v128_t vi0 = wasm_v32x4_load_splat(input);
390             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
391             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
392             vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
393           } while (--nnz != 0);
394         }
395         v128_t vout0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
396         vout0 = wasm_v128_bitselect(vmin, vout0, wasm_f32x4_lt(vout0, vmin));
397         *output = wasm_f32x4_extract_lane(vout0, 0);
398 
399         output = (float*restrict) ((uintptr_t) output + output_stride);
400       } while (--n != 0);
401       output = (float*restrict) ((uintptr_t) output - output_decrement);
402       input += 1;
403     }
404   }
405 }
406