• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x4(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x4(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
33   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
34   const v128_t vzero = wasm_f64x2_splat(0.0);
35   size_t output_decrement = output_stride * nc - 16 * sizeof(float);
36   while XNN_LIKELY(mc >= 16 * sizeof(float)) {
37     const float*restrict w = weights;
38     const int32_t* dmap = widx_dmap;
39     const uint32_t* nnzmap = nidx_nnzmap;
40     size_t n = nc;
41     do {
42       uint32_t nnz = *nnzmap++;
43       v128_t vacc0123x0 = wasm_v32x4_load_splat(w);
44       w += 1;
45       v128_t vacc0123x1 = vzero;
46       v128_t vacc0123x2 = vzero;
47       v128_t vacc0123x3 = vzero;
48       v128_t vacc4567x0 = vacc0123x0;
49       v128_t vacc4567x1 = vzero;
50       v128_t vacc4567x2 = vzero;
51       v128_t vacc4567x3 = vzero;
52       v128_t vacc89ABx0 = vacc0123x0;
53       v128_t vacc89ABx1 = vzero;
54       v128_t vacc89ABx2 = vzero;
55       v128_t vacc89ABx3 = vzero;
56       v128_t vaccCDEFx0 = vacc0123x0;
57       v128_t vaccCDEFx1 = vzero;
58       v128_t vaccCDEFx2 = vzero;
59       v128_t vaccCDEFx3 = vzero;
60       for (; nnz >= 4; nnz -= 4) {
61         const intptr_t diff0 = dmap[0];
62         const intptr_t diff1 = dmap[1];
63         const intptr_t diff2 = dmap[2];
64         const intptr_t diff3 = dmap[3];
65         dmap += 4;
66         const v128_t vi0123x0 = wasm_v128_load(input);
67         const v128_t vi4567x0 = wasm_v128_load(input + 4);
68         const v128_t vi89ABx0 = wasm_v128_load(input + 8);
69         const v128_t viCDEFx0 = wasm_v128_load(input + 12);
70         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
71         const v128_t vw0 = wasm_v32x4_load_splat(w);
72         w += 1;
73         vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
74         vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
75         vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
76         vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
77         const v128_t vi0123x1 = wasm_v128_load(input);
78         const v128_t vi4567x1 = wasm_v128_load(input + 4);
79         const v128_t vi89ABx1 = wasm_v128_load(input + 8);
80         const v128_t viCDEFx1 = wasm_v128_load(input + 12);
81         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
82         const v128_t vw1 = wasm_v32x4_load_splat(w);
83         w += 1;
84         vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
85         vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
86         vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
87         vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
88         const v128_t vi0123x2 = wasm_v128_load(input);
89         const v128_t vi4567x2 = wasm_v128_load(input + 4);
90         const v128_t vi89ABx2 = wasm_v128_load(input + 8);
91         const v128_t viCDEFx2 = wasm_v128_load(input + 12);
92         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
93         const v128_t vw2 = wasm_v32x4_load_splat(w);
94         w += 1;
95         vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
96         vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
97         vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
98         vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
99         const v128_t vi0123x3 = wasm_v128_load(input);
100         const v128_t vi4567x3 = wasm_v128_load(input + 4);
101         const v128_t vi89ABx3 = wasm_v128_load(input + 8);
102         const v128_t viCDEFx3 = wasm_v128_load(input + 12);
103         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
104         const v128_t vw3 = wasm_v32x4_load_splat(w);
105         w += 1;
106         vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
107         vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
108         vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
109         vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
110       }
111       v128_t vacc0123 = vacc0123x0;
112       v128_t vacc4567 = vacc4567x0;
113       v128_t vacc89AB = vacc89ABx0;
114       v128_t vaccCDEF = vaccCDEFx0;
115       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
116       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
117       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
118       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
119       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
120       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
121       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
122       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
123       vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
124       vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
125       vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
126       vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
127       if XNN_LIKELY(nnz != 0) {
128         do {
129           const intptr_t diff = *dmap++;
130           const v128_t vi0123 = wasm_v128_load(input);
131           const v128_t vi4567 = wasm_v128_load(input + 4);
132           const v128_t vi89AB = wasm_v128_load(input + 8);
133           const v128_t viCDEF = wasm_v128_load(input + 12);
134           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
135           const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
136           vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
137           vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
138           vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
139           vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
140         } while (--nnz != 0);
141       }
142       v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
143       v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
144       v128_t vout89AB = wasm_v128_bitselect(vacc89AB, vmax, wasm_f32x4_le(vacc89AB, vmax));
145       v128_t voutCDEF = wasm_v128_bitselect(vaccCDEF, vmax, wasm_f32x4_le(vaccCDEF, vmax));
146       vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
147       vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
148       vout89AB = wasm_v128_bitselect(vmin, vout89AB, wasm_f32x4_lt(vout89AB, vmin));
149       voutCDEF = wasm_v128_bitselect(vmin, voutCDEF, wasm_f32x4_lt(voutCDEF, vmin));
150       wasm_v128_store(output, vout0123);
151       wasm_v128_store(output + 4, vout4567);
152       wasm_v128_store(output + 8, vout89AB);
153       wasm_v128_store(output + 12, voutCDEF);
154       output = (float*restrict) ((uintptr_t) output + output_stride);
155     } while (--n != 0);
156     output = (float*restrict) ((uintptr_t) output - output_decrement);
157     input += 16;
158     mc -= 16 * sizeof(float);
159   }
160   if XNN_UNLIKELY(mc != 0) {
161     output_decrement += 8 * sizeof(float);
162     if (mc & (8 * sizeof(float))) {
163       const float*restrict w = weights;
164       const int32_t* dmap = widx_dmap;
165       const uint32_t* nnzmap = nidx_nnzmap;
166       size_t n = nc;
167       do {
168         uint32_t nnz = *nnzmap++;
169         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
170         v128_t vacc4567 = vacc0123;
171         if XNN_LIKELY(nnz != 0) {
172           do {
173             const intptr_t diff = *dmap++;
174             const v128_t vi0123 = wasm_v128_load(input);
175             const v128_t vi4567 = wasm_v128_load(input + 4);
176             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
177             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
178             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
179             vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
180           } while (--nnz != 0);
181         }
182         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
183         v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
184         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
185         vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
186         wasm_v128_store(output, vout0123);
187 
188         wasm_v128_store(output + 4, vout4567);
189         output = (float*restrict) ((uintptr_t) output + output_stride);
190       } while (--n != 0);
191       output = (float*restrict) ((uintptr_t) output - output_decrement);
192       input += 8;
193     }
194     output_decrement += 4 * sizeof(float);
195     if (mc & (4 * sizeof(float))) {
196       const float*restrict w = weights;
197       const int32_t* dmap = widx_dmap;
198       const uint32_t* nnzmap = nidx_nnzmap;
199       size_t n = nc;
200       do {
201         uint32_t nnz = *nnzmap++;
202         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
203         if XNN_LIKELY(nnz != 0) {
204           do {
205             const intptr_t diff = *dmap++;
206             const v128_t vi0123 = wasm_v128_load(input);
207             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
208             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
209             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
210           } while (--nnz != 0);
211         }
212         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
213         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
214         wasm_v128_store(output, vout0123);
215 
216         output = (float*restrict) ((uintptr_t) output + output_stride);
217       } while (--n != 0);
218       output = (float*restrict) ((uintptr_t) output - output_decrement);
219       input += 4;
220     }
221     output_decrement += 2 * sizeof(float);
222     if (mc & (2 * sizeof(float))) {
223       const float*restrict w = weights;
224       const int32_t* dmap = widx_dmap;
225       const uint32_t* nnzmap = nidx_nnzmap;
226       size_t n = nc;
227       do {
228         uint32_t nnz = *nnzmap++;
229         v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
230         if XNN_LIKELY(nnz != 0) {
231           do {
232             const intptr_t diff = *dmap++;
233             const v128_t vi01 = wasm_v64x2_load_splat(input);
234             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
235             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
236             vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
237           } while (--nnz != 0);
238         }
239         v128_t vout01 = wasm_v128_bitselect(vacc01, vmax, wasm_f32x4_le(vacc01, vmax));
240         vout01 = wasm_v128_bitselect(vmin, vout01, wasm_f32x4_lt(vout01, vmin));
241         *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
242 
243         output = (float*restrict) ((uintptr_t) output + output_stride);
244       } while (--n != 0);
245       output = (float*restrict) ((uintptr_t) output - output_decrement);
246       input += 2;
247     }
248     output_decrement += 1 * sizeof(float);
249     if (mc & (1 * sizeof(float))) {
250       const float*restrict w = weights;
251       const int32_t* dmap = widx_dmap;
252       const uint32_t* nnzmap = nidx_nnzmap;
253       size_t n = nc;
254       do {
255         uint32_t nnz = *nnzmap++;
256         v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
257         if XNN_LIKELY(nnz != 0) {
258           do {
259             const intptr_t diff = *dmap++;
260             const v128_t vi0 = wasm_v32x4_load_splat(input);
261             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
262             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
263             vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
264           } while (--nnz != 0);
265         }
266         v128_t vout0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
267         vout0 = wasm_v128_bitselect(vmin, vout0, wasm_f32x4_lt(vout0, vmin));
268         *output = wasm_f32x4_extract_lane(vout0, 0);
269 
270         output = (float*restrict) ((uintptr_t) output + output_stride);
271       } while (--n != 0);
272       output = (float*restrict) ((uintptr_t) output - output_decrement);
273       input += 1;
274     }
275   }
276 }
277