1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd-pipelined.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined_x2(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
33 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 8 * sizeof(float);
35 while XNN_LIKELY(mc >= 8 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 v128_t vw = wasm_v32x4_load_splat(w); w += 1;
40 intptr_t diff = *dmap++;
41 v128_t vi0123 = wasm_v128_load(input + 0);
42 v128_t vi4567 = wasm_v128_load(input + 4);
43 size_t n = nc;
44 do {
45 uint32_t nnz = *nnzmap++;
46 v128_t vacc0123 = vw;
47 v128_t vacc4567 = vw;
48 vw = wasm_v32x4_load_splat(w); w += 1;
49
50 for (; nnz >= 2; nnz -= 2) {
51 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
52 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
53 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
54 diff = *dmap++;
55 vw = wasm_v32x4_load_splat(w); w += 1;
56 vi0123 = wasm_v128_load(input + 0);
57 vi4567 = wasm_v128_load(input + 4);
58 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
59 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
60 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
61 diff = *dmap++;
62 vw = wasm_v32x4_load_splat(w); w += 1;
63 vi0123 = wasm_v128_load(input + 0);
64 vi4567 = wasm_v128_load(input + 4);
65 }
66
67 if XNN_LIKELY(nnz != 0) {
68 do {
69 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
70 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
71 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
72
73 diff = *dmap++;
74 vw = wasm_v32x4_load_splat(w); w += 1;
75 vi0123 = wasm_v128_load(input + 0);
76 vi4567 = wasm_v128_load(input + 4);
77 } while (--nnz != 0);
78 }
79 v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
80 v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
81 vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
82 vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
83 wasm_v128_store(output, vout0123);
84 wasm_v128_store(output + 4, vout4567);
85 output = (float*restrict) ((uintptr_t) output + output_stride);
86 } while (--n != 0);
87 output = (float*restrict) ((uintptr_t) output - output_decrement);
88 input += 8;
89 mc -= 8 * sizeof(float);
90 }
91 if XNN_UNLIKELY(mc != 0) {
92 output_decrement += 4 * sizeof(float);
93 if (mc & (4 * sizeof(float))) {
94 const float*restrict w = weights;
95 const int32_t* dmap = widx_dmap;
96 const uint32_t* nnzmap = nidx_nnzmap;
97 size_t n = nc;
98 do {
99 uint32_t nnz = *nnzmap++;
100 v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
101 if XNN_LIKELY(nnz != 0) {
102 do {
103 const intptr_t diff = *dmap++;
104 const v128_t vi0123 = wasm_v128_load(input);
105 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
106 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
107 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
108 } while (--nnz != 0);
109 }
110 v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
111 vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
112 wasm_v128_store(output, vout0123);
113
114 output = (float*restrict) ((uintptr_t) output + output_stride);
115 } while (--n != 0);
116 output = (float*restrict) ((uintptr_t) output - output_decrement);
117 input += 4;
118 }
119 output_decrement += 2 * sizeof(float);
120 if (mc & (2 * sizeof(float))) {
121 const float*restrict w = weights;
122 const int32_t* dmap = widx_dmap;
123 const uint32_t* nnzmap = nidx_nnzmap;
124 size_t n = nc;
125 do {
126 uint32_t nnz = *nnzmap++;
127 v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
128 if XNN_LIKELY(nnz != 0) {
129 do {
130 const intptr_t diff = *dmap++;
131 const v128_t vi01 = wasm_v64x2_load_splat(input);
132 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
133 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
134 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
135 } while (--nnz != 0);
136 }
137 v128_t vout01 = wasm_v128_bitselect(vacc01, vmax, wasm_f32x4_le(vacc01, vmax));
138 vout01 = wasm_v128_bitselect(vmin, vout01, wasm_f32x4_lt(vout01, vmin));
139 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
140
141 output = (float*restrict) ((uintptr_t) output + output_stride);
142 } while (--n != 0);
143 output = (float*restrict) ((uintptr_t) output - output_decrement);
144 input += 2;
145 }
146 output_decrement += 1 * sizeof(float);
147 if (mc & (1 * sizeof(float))) {
148 const float*restrict w = weights;
149 const int32_t* dmap = widx_dmap;
150 const uint32_t* nnzmap = nidx_nnzmap;
151 size_t n = nc;
152 do {
153 uint32_t nnz = *nnzmap++;
154 v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
155 if XNN_LIKELY(nnz != 0) {
156 do {
157 const intptr_t diff = *dmap++;
158 const v128_t vi0 = wasm_v32x4_load_splat(input);
159 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
160 const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
161 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
162 } while (--nnz != 0);
163 }
164 v128_t vout0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
165 vout0 = wasm_v128_bitselect(vmin, vout0, wasm_f32x4_lt(vout0, vmin));
166 *output = wasm_f32x4_extract_lane(vout0, 0);
167
168 output = (float*restrict) ((uintptr_t) output + output_stride);
169 } while (--n != 0);
170 output = (float*restrict) ((uintptr_t) output - output_decrement);
171 input += 1;
172 }
173 }
174 }
175