1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon-pipelined.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_ukernel_4x1__neonfma_pipelined(uint32_t m,uint32_t n,const float * restrict a,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict c,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_spmm_ukernel_4x1__neonfma_pipelined(
18 uint32_t m,
19 uint32_t n,
20 const float*restrict a,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict c,
25 const union xnn_f32_output_params params[restrict static 1])
26 {
27 assert(m != 0);
28
29 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
30 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
31 size_t i = m;
32 while XNN_LIKELY(i >= 4) {
33 const float*restrict w = weights;
34 const int32_t* dmap = widx_dmap;
35 const uint32_t* nnzmap = nidx_nnzmap;
36 float32x4_t vw = vld1q_dup_f32(w); w += 1;
37 intptr_t diff = *dmap++;
38 float32x4_t va0123 = vld1q_f32(a);
39 size_t j = n;
40 do {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123 = vw;
43 vw = vld1q_dup_f32(w); w += 1;
44 if XNN_LIKELY(nnz != 0) {
45 do {
46 vacc0123 = vfmaq_f32(vacc0123, va0123, vw);
47 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
48
49 diff = *dmap++;
50 vw = vld1q_dup_f32(w); w += 1;
51 va0123 = vld1q_f32(a);
52 } while (--nnz != 0);
53 }
54 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
55 vout0123 = vmaxq_f32(vout0123, vmin);
56 vst1q_f32(c, vout0123);
57 c += m;
58 } while (--j != 0);
59 c -= m * n;
60 c += 4;
61 a += 4;
62 i -= 4;
63 }
64 if XNN_UNLIKELY(i != 0) {
65 if (i & 2) {
66 const float*restrict w = weights;
67 const int32_t* dmap = widx_dmap;
68 const uint32_t* nnzmap = nidx_nnzmap;
69 size_t j = n;
70 do {
71 uint32_t nnz = *nnzmap++;
72 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
73 if XNN_LIKELY(nnz != 0) {
74 do {
75 const intptr_t diff = *dmap++;
76 const float32x2_t va01 = vld1_f32(a);
77 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
78 const float32x2_t vb = vld1_dup_f32(w); w += 1;
79 vacc01 = vfma_f32(vacc01, va01, vb);
80 } while (--nnz != 0);
81 }
82 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
83 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
84 vst1_f32(c, vout01);
85 c += m;
86 } while (--j != 0);
87 c -= m * n;
88 c += 2;
89 a += 2;
90 }
91 if (i & 1) {
92 const float*restrict w = weights;
93 const int32_t* dmap = widx_dmap;
94 const uint32_t* nnzmap = nidx_nnzmap;
95 size_t j = n;
96 do {
97 uint32_t nnz = *nnzmap++;
98 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
99 if XNN_LIKELY(nnz != 0) {
100 do {
101 const intptr_t diff = *dmap++;
102 const float32x2_t va0 = vld1_dup_f32(a);
103 a = (const float*restrict) ((uintptr_t) a + (uintptr_t) diff);
104 const float32x2_t vb = vld1_dup_f32(w); w += 1;
105 vacc0 = vfma_f32(vacc0, va0, vb);
106 } while (--nnz != 0);
107 }
108 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
109 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
110 vst1_lane_f32(c, vout0, 0);
111 c += m;
112 } while (--j != 0);
113 c -= m * n;
114 c += 1;
115 a += 1;
116 }
117 }
118 }
119