• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/wasmsimd-pipelined.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined_x2(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
33   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
34   size_t output_decrement = output_stride * nc - 4 * sizeof(float);
35   while XNN_LIKELY(mc >= 4 * sizeof(float)) {
36     const float*restrict w = weights;
37     const int32_t* dmap = widx_dmap;
38     const uint32_t* nnzmap = nidx_nnzmap;
39     v128_t vw = wasm_v32x4_load_splat(w); w += 1;
40     intptr_t diff = *dmap++;
41     v128_t vi0123 = wasm_v128_load(input + 0);
42     size_t n = nc;
43     do {
44       uint32_t nnz = *nnzmap++;
45        v128_t vacc0123 = vw;
46       vw = wasm_v32x4_load_splat(w); w += 1;
47 
48       for (; nnz >= 2; nnz -= 2) {
49         vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123,   vw));
50         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
51         diff = *dmap++;
52         vw = wasm_v32x4_load_splat(w); w += 1;
53         vi0123 = wasm_v128_load(input + 0);
54         vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123,   vw));
55         input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
56         diff = *dmap++;
57         vw = wasm_v32x4_load_splat(w); w += 1;
58         vi0123 = wasm_v128_load(input + 0);
59       }
60 
61       if XNN_LIKELY(nnz != 0) {
62         do {
63           vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
64           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
65 
66           diff = *dmap++;
67           vw = wasm_v32x4_load_splat(w); w += 1;
68           vi0123 = wasm_v128_load(input + 0);
69         } while (--nnz != 0);
70       }
71       v128_t vout0123 = wasm_f32x4_min(vacc0123, vmax);
72       vout0123 = wasm_f32x4_max(vout0123, vmin);
73       wasm_v128_store(output, vout0123);
74       output = (float*restrict) ((uintptr_t) output + output_stride);
75     } while (--n != 0);
76     output = (float*restrict) ((uintptr_t) output - output_decrement);
77     input += 4;
78     mc -= 4 * sizeof(float);
79   }
80   if XNN_UNLIKELY(mc != 0) {
81     output_decrement += 2 * sizeof(float);
82     if (mc & (2 * sizeof(float))) {
83       const float*restrict w = weights;
84       const int32_t* dmap = widx_dmap;
85       const uint32_t* nnzmap = nidx_nnzmap;
86       size_t n = nc;
87       do {
88         uint32_t nnz = *nnzmap++;
89         v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
90         if XNN_LIKELY(nnz != 0) {
91           do {
92             const intptr_t diff = *dmap++;
93             const v128_t vi01 = wasm_v64x2_load_splat(input);
94             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
95             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
96             vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
97           } while (--nnz != 0);
98         }
99         v128_t vout01 = wasm_f32x4_min(vacc01, vmax);
100         vout01 = wasm_f32x4_max(vout01, vmin);
101         *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
102 
103         output = (float*restrict) ((uintptr_t) output + output_stride);
104       } while (--n != 0);
105       output = (float*restrict) ((uintptr_t) output - output_decrement);
106       input += 2;
107     }
108     output_decrement += 1 * sizeof(float);
109     if (mc & (1 * sizeof(float))) {
110       const float*restrict w = weights;
111       const int32_t* dmap = widx_dmap;
112       const uint32_t* nnzmap = nidx_nnzmap;
113       size_t n = nc;
114       do {
115         uint32_t nnz = *nnzmap++;
116         v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
117         if XNN_LIKELY(nnz != 0) {
118           do {
119             const intptr_t diff = *dmap++;
120             const v128_t vi0 = wasm_v32x4_load_splat(input);
121             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
122             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
123             vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
124           } while (--nnz != 0);
125         }
126         v128_t vout0 = wasm_f32x4_min(vacc0, vmax);
127         vout0 = wasm_f32x4_max(vout0, vmin);
128         *output = wasm_f32x4_extract_lane(vout0, 0);
129 
130         output = (float*restrict) ((uintptr_t) output + output_stride);
131       } while (--n != 0);
132       output = (float*restrict) ((uintptr_t) output - output_decrement);
133       input += 1;
134     }
135   }
136 }
137