1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
33 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
34 size_t output_decrement = output_stride * nc - 16 * sizeof(float);
35 while XNN_LIKELY(mc >= 16 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
43 v128_t vacc4567 = vacc0123;
44 v128_t vacc89AB = vacc0123;
45 v128_t vaccCDEF = vacc0123;
46 if XNN_LIKELY(nnz != 0) {
47 do {
48 const intptr_t diff = *dmap++;
49 const v128_t vi0123 = wasm_v128_load(input);
50 const v128_t vi4567 = wasm_v128_load(input + 4);
51 const v128_t vi89AB = wasm_v128_load(input + 8);
52 const v128_t viCDEF = wasm_v128_load(input + 12);
53 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
54 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
55 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
56 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
57 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
58 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
59 } while (--nnz != 0);
60 }
61 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
62 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
63 v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
64 v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
65 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
66 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
67 vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
68 voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
69 wasm_v128_store(output, vout0123);
70 wasm_v128_store(output + 4, vout4567);
71 wasm_v128_store(output + 8, vout89AB);
72 wasm_v128_store(output + 12, voutCDEF);
73 output = (float*restrict) ((uintptr_t) output + output_stride);
74 } while (--n != 0);
75 output = (float*restrict) ((uintptr_t) output - output_decrement);
76 input += 16;
77 mc -= 16 * sizeof(float);
78 }
79 if XNN_UNLIKELY(mc != 0) {
80 output_decrement += 8 * sizeof(float);
81 if (mc & (8 * sizeof(float))) {
82 const float*restrict w = weights;
83 const int32_t* dmap = widx_dmap;
84 const uint32_t* nnzmap = nidx_nnzmap;
85 size_t n = nc;
86 do {
87 uint32_t nnz = *nnzmap++;
88 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
89 v128_t vacc4567 = vacc0123;
90 if XNN_LIKELY(nnz != 0) {
91 do {
92 const intptr_t diff = *dmap++;
93 const v128_t vi0123 = wasm_v128_load(input);
94 const v128_t vi4567 = wasm_v128_load(input + 4);
95 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
96 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
97 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
98 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
99 } while (--nnz != 0);
100 }
101 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
102 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
103 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
104 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
105 wasm_v128_store(output, vout0123);
106
107 wasm_v128_store(output + 4, vout4567);
108 output = (float*restrict) ((uintptr_t) output + output_stride);
109 } while (--n != 0);
110 output = (float*restrict) ((uintptr_t) output - output_decrement);
111 input += 8;
112 }
113 output_decrement += 4 * sizeof(float);
114 if (mc & (4 * sizeof(float))) {
115 const float*restrict w = weights;
116 const int32_t* dmap = widx_dmap;
117 const uint32_t* nnzmap = nidx_nnzmap;
118 size_t n = nc;
119 do {
120 uint32_t nnz = *nnzmap++;
121 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
122 if XNN_LIKELY(nnz != 0) {
123 do {
124 const intptr_t diff = *dmap++;
125 const v128_t vi0123 = wasm_v128_load(input);
126 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
127 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
128 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
129 } while (--nnz != 0);
130 }
131 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
132 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
133 wasm_v128_store(output, vout0123);
134
135 output = (float*restrict) ((uintptr_t) output + output_stride);
136 } while (--n != 0);
137 output = (float*restrict) ((uintptr_t) output - output_decrement);
138 input += 4;
139 }
140 output_decrement += 2 * sizeof(float);
141 if (mc & (2 * sizeof(float))) {
142 const float*restrict w = weights;
143 const int32_t* dmap = widx_dmap;
144 const uint32_t* nnzmap = nidx_nnzmap;
145 size_t n = nc;
146 do {
147 uint32_t nnz = *nnzmap++;
148 v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
149 if XNN_LIKELY(nnz != 0) {
150 do {
151 const intptr_t diff = *dmap++;
152 const v128_t vi01 = wasm_v128_load64_splat(input);
153 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
154 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
155 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
156 } while (--nnz != 0);
157 }
158 v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
159 vout01 = wasm_f32x4_pmax(vmin, vout01);
160 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
161
162 output = (float*restrict) ((uintptr_t) output + output_stride);
163 } while (--n != 0);
164 output = (float*restrict) ((uintptr_t) output - output_decrement);
165 input += 2;
166 }
167 output_decrement += 1 * sizeof(float);
168 if (mc & (1 * sizeof(float))) {
169 const float*restrict w = weights;
170 const int32_t* dmap = widx_dmap;
171 const uint32_t* nnzmap = nidx_nnzmap;
172 size_t n = nc;
173 do {
174 uint32_t nnz = *nnzmap++;
175 v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
176 if XNN_LIKELY(nnz != 0) {
177 do {
178 const intptr_t diff = *dmap++;
179 const v128_t vi0 = wasm_v128_load32_splat(input);
180 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
181 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
182 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
183 } while (--nnz != 0);
184 }
185 v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
186 vout0 = wasm_f32x4_pmax(vmin, vout0);
187 *output = wasm_f32x4_extract_lane(vout0, 0);
188
189 output = (float*restrict) ((uintptr_t) output + output_stride);
190 } while (--n != 0);
191 output = (float*restrict) ((uintptr_t) output - output_decrement);
192 input += 1;
193 }
194 }
195 }
196