1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon-blocked.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_4x2__neonfma(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_4x2__neonfma(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 4 * sizeof(float);
35 while XNN_LIKELY(mc >= 4 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 while (n >= 2) {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
43 float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
44 if XNN_LIKELY(nnz != 0) {
45 do {
46 const intptr_t diff = *dmap++;
47 const float32x4_t vi0123 = vld1q_f32(input);
48 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
49 __builtin_prefetch(input + 16);
50 const float32x2_t vw = vld1_f32(w); w += 2;
51 __builtin_prefetch(w + 32);
52 vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
53 vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
54 } while (--nnz != 0);
55 }
56 float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
57 float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
58
59 vout0123n0 = vmaxq_f32(vout0123n0, vmin);
60 vout0123n1 = vmaxq_f32(vout0123n1, vmin);
61
62 vst1q_f32(output + 0, vout0123n0);
63 output = (float*restrict) ((uintptr_t) output + output_stride);
64 vst1q_f32(output + 0, vout0123n1);
65 output = (float*restrict) ((uintptr_t) output + output_stride);
66 n -= 2;
67 }
68
69 // clean up loop, fall back to nr=1
70 if XNN_UNLIKELY(n != 0) {
71 do {
72 uint32_t nnz = *nnzmap++;
73 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
74 if XNN_LIKELY(nnz != 0) {
75 do {
76 const intptr_t diff = *dmap++;
77 const float32x4_t vi0123 = vld1q_f32(input);
78 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
79 __builtin_prefetch(input + 16);
80 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
81 __builtin_prefetch(w + 32);
82 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
83 } while (--nnz != 0);
84 }
85 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
86
87 vout0123 = vmaxq_f32(vout0123, vmin);
88
89 vst1q_f32(output + 0, vout0123);
90 output = (float*restrict) ((uintptr_t) output + output_stride);
91 n -= 1;
92 } while (n != 0);
93 }
94 output = (float*restrict) ((uintptr_t) output - output_decrement);
95 input += 4;
96 mc -= 4 * sizeof(float);
97 }
98 if XNN_UNLIKELY(mc != 0) {
99 output_decrement += 2 * sizeof(float);
100 if (mc & (2 * sizeof(float))) {
101 const float*restrict w = weights;
102 const int32_t* dmap = widx_dmap;
103 const uint32_t* nnzmap = nidx_nnzmap;
104 size_t n = nc;
105 while (n >= 2) {
106 uint32_t nnz = *nnzmap++;
107 float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
108 float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
109 if XNN_LIKELY(nnz != 0) {
110 do {
111 const intptr_t diff = *dmap++;
112 const float32x2_t vi01 = vld1_f32(input);
113 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
114 const float32x2_t vw = vld1_f32(w); w += 2;
115
116 vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
117 vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
118 } while (--nnz != 0);
119 }
120 float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
121 float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
122
123 vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
124 vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
125
126 vst1_f32(output + 0, vout01n0);
127 output = (float*restrict) ((uintptr_t) output + output_stride);
128 vst1_f32(output + 0, vout01n1);
129 output = (float*restrict) ((uintptr_t) output + output_stride);
130 n -= 2;
131 }
132
133 // clean up loop, fall back to nr=1
134 if XNN_UNLIKELY(n != 0) {
135 do {
136 uint32_t nnz = *nnzmap++;
137 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
138 if XNN_LIKELY(nnz != 0) {
139 do {
140 const intptr_t diff = *dmap++;
141 const float32x2_t vi01 = vld1_f32(input);
142 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
143 const float32x2_t vw = vld1_dup_f32(w); w += 1;
144 vacc01 = vfma_f32(vacc01, vi01, vw);
145 } while (--nnz != 0);
146 }
147 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
148 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
149
150 vst1_f32(output, vout01);
151 output = (float*restrict) ((uintptr_t) output + output_stride);
152 n -= 1;
153 } while (n != 0);
154 }
155 output = (float*restrict) ((uintptr_t) output - output_decrement);
156 input += 2;
157 }
158 output_decrement += 1 * sizeof(float);
159 if (mc & (1 * sizeof(float))) {
160 const float*restrict w = weights;
161 const int32_t* dmap = widx_dmap;
162 const uint32_t* nnzmap = nidx_nnzmap;
163 size_t n = nc;
164 while (n >= 2) {
165 uint32_t nnz = *nnzmap++;
166 float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
167 float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
168 if XNN_LIKELY(nnz != 0) {
169 do {
170 const intptr_t diff = *dmap++;
171 const float32x2_t vi0 = vld1_dup_f32(input);
172 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
173 const float32x2_t vw = vld1_f32(w); w += 2;
174
175 vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
176 vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
177 } while (--nnz != 0);
178 }
179 float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
180 float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
181
182 vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
183 vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
184
185 vst1_lane_f32(output + 0, vout0n0, 0);
186 output = (float*restrict) ((uintptr_t) output + output_stride);
187 vst1_lane_f32(output + 0, vout0n1, 0);
188 output = (float*restrict) ((uintptr_t) output + output_stride);
189 n -= 2;
190 }
191
192 // clean up loop, fall back to nr=1
193 if XNN_UNLIKELY(n != 0) {
194 do {
195 uint32_t nnz = *nnzmap++;
196 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
197 if XNN_LIKELY(nnz != 0) {
198 do {
199 const intptr_t diff = *dmap++;
200 const float32x2_t vi0 = vld1_dup_f32(input);
201 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
202 const float32x2_t vw = vld1_dup_f32(w); w += 1;
203 vacc0 = vfma_f32(vacc0, vi0, vw);
204 } while (--nnz != 0);
205 }
206 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
207 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
208
209 vst1_lane_f32(output, vout0, 1);
210 output = (float*restrict) ((uintptr_t) output + output_stride);
211 n -= 1;
212 } while (n != 0);
213 }
214 output = (float*restrict) ((uintptr_t) output - output_decrement);
215 input += 1;
216 }
217 }
218 }
219