1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_16x1__neon(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__neon(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 16 * sizeof(float);
35 while XNN_LIKELY(mc >= 16 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
43 float32x4_t vacc4567 = vacc0123;
44 float32x4_t vacc89AB = vacc0123;
45 float32x4_t vaccCDEF = vacc0123;
46 if XNN_LIKELY(nnz != 0) {
47 do {
48 const intptr_t diff = *dmap++;
49 const float32x4_t vi0123 = vld1q_f32(input);
50 const float32x4_t vi4567 = vld1q_f32(input + 4);
51 const float32x4_t vi89AB = vld1q_f32(input + 8);
52 const float32x4_t viCDEF = vld1q_f32(input + 12);
53 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
54 __builtin_prefetch(input + 16);
55 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
56 __builtin_prefetch(w + 32);
57 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
58 vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
59 vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
60 vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
61 } while (--nnz != 0);
62 }
63 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
64 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
65 float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
66 float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
67 vout0123 = vmaxq_f32(vout0123, vmin);
68 vout4567 = vmaxq_f32(vout4567, vmin);
69 vout89AB = vmaxq_f32(vout89AB, vmin);
70 voutCDEF = vmaxq_f32(voutCDEF, vmin);
71 vst1q_f32(output, vout0123);
72 vst1q_f32(output + 4, vout4567);
73 vst1q_f32(output + 8, vout89AB);
74 vst1q_f32(output + 12, voutCDEF);
75 output = (float*restrict) ((uintptr_t) output + output_stride);
76 } while (--n != 0);
77 output = (float*restrict) ((uintptr_t) output - output_decrement);
78 input += 16;
79 mc -= 16 * sizeof(float);
80 }
81 if XNN_UNLIKELY(mc != 0) {
82 output_decrement += 8 * sizeof(float);
83 if (mc & (8 * sizeof(float))) {
84 const float*restrict w = weights;
85 const int32_t* dmap = widx_dmap;
86 const uint32_t* nnzmap = nidx_nnzmap;
87 size_t n = nc;
88 do {
89 uint32_t nnz = *nnzmap++;
90 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
91 float32x4_t vacc4567 = vacc0123;
92 if XNN_LIKELY(nnz != 0) {
93 do {
94 const intptr_t diff = *dmap++;
95 const float32x4_t vi0123 = vld1q_f32(input);
96 const float32x4_t vi4567 = vld1q_f32(input + 4);
97 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
98 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
99 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
100 vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
101 } while (--nnz != 0);
102 }
103 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
104 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
105 vout0123 = vmaxq_f32(vout0123, vmin);
106 vout4567 = vmaxq_f32(vout4567, vmin);
107 vst1q_f32(output, vout0123);
108 vst1q_f32(output + 4, vout4567);
109 output = (float*restrict) ((uintptr_t) output + output_stride);
110 } while (--n != 0);
111 output = (float*restrict) ((uintptr_t) output - output_decrement);
112 input += 8;
113 }
114 output_decrement += 4 * sizeof(float);
115 if (mc & (4 * sizeof(float))) {
116 const float*restrict w = weights;
117 const int32_t* dmap = widx_dmap;
118 const uint32_t* nnzmap = nidx_nnzmap;
119 size_t n = nc;
120 do {
121 uint32_t nnz = *nnzmap++;
122 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
123 if XNN_LIKELY(nnz != 0) {
124 do {
125 const intptr_t diff = *dmap++;
126 const float32x4_t vi0123 = vld1q_f32(input);
127 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
128 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
129 vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
130 } while (--nnz != 0);
131 }
132 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
133 vout0123 = vmaxq_f32(vout0123, vmin);
134 vst1q_f32(output, vout0123);
135 output = (float*restrict) ((uintptr_t) output + output_stride);
136 } while (--n != 0);
137 output = (float*restrict) ((uintptr_t) output - output_decrement);
138 input += 4;
139 }
140 output_decrement += 2 * sizeof(float);
141 if (mc & (2 * sizeof(float))) {
142 const float*restrict w = weights;
143 const int32_t* dmap = widx_dmap;
144 const uint32_t* nnzmap = nidx_nnzmap;
145 size_t n = nc;
146 do {
147 uint32_t nnz = *nnzmap++;
148 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
149 if XNN_LIKELY(nnz != 0) {
150 do {
151 const intptr_t diff = *dmap++;
152 const float32x2_t vi01 = vld1_f32(input);
153 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
154 const float32x2_t vw = vld1_dup_f32(w); w += 1;
155 vacc01 = vmla_f32(vacc01, vi01, vw);
156 } while (--nnz != 0);
157 }
158 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
159 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
160 vst1_f32(output, vout01);
161 output = (float*restrict) ((uintptr_t) output + output_stride);
162 } while (--n != 0);
163 output = (float*restrict) ((uintptr_t) output - output_decrement);
164 input += 2;
165 }
166 output_decrement += 1 * sizeof(float);
167 if (mc & (1 * sizeof(float))) {
168 const float*restrict w = weights;
169 const int32_t* dmap = widx_dmap;
170 const uint32_t* nnzmap = nidx_nnzmap;
171 size_t n = nc;
172 do {
173 uint32_t nnz = *nnzmap++;
174 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
175 if XNN_LIKELY(nnz != 0) {
176 do {
177 const intptr_t diff = *dmap++;
178 const float32x2_t vi0 = vld1_dup_f32(input);
179 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
180 const float32x2_t vw = vld1_dup_f32(w); w += 1;
181 vacc0 = vmla_f32(vacc0, vi0, vw);
182 } while (--nnz != 0);
183 }
184 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
185 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
186 vst1_lane_f32(output, vout0, 0);
187 output = (float*restrict) ((uintptr_t) output + output_stride);
188 } while (--n != 0);
189 output = (float*restrict) ((uintptr_t) output - output_decrement);
190 input += 1;
191 }
192 }
193 }
194