1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon-pipelined.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 8 * sizeof(float);
35 while XNN_LIKELY(mc >= 8 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 float32x4_t vw = vld1q_dup_f32(w); w += 1;
40 intptr_t diff = *dmap++;
41 float32x4_t vi0123 = vld1q_f32(input);
42 float32x4_t vi4567 = vld1q_f32(input + 4);
43 size_t n = nc;
44 do {
45 uint32_t nnz = *nnzmap++;
46 float32x4_t vacc0123 = vw;
47 float32x4_t vacc4567 = vw;
48 vw = vld1q_dup_f32(w); w += 1;
49 if XNN_LIKELY(nnz != 0) {
50 do {
51 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
52 vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
53 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
54 __builtin_prefetch(input + 16);
55 diff = *dmap++;
56 vw = vld1q_dup_f32(w); w += 1;
57 __builtin_prefetch(w + 32);
58 vi0123 = vld1q_f32(input);
59 vi4567 = vld1q_f32(input + 4);
60 } while (--nnz != 0);
61 }
62 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
63 float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
64 vout0123 = vmaxq_f32(vout0123, vmin);
65 vout4567 = vmaxq_f32(vout4567, vmin);
66 vst1q_f32(output, vout0123);
67 vst1q_f32(output + 4, vout4567);
68 output = (float*restrict) ((uintptr_t) output + output_stride);
69 } while (--n != 0);
70 output = (float*restrict) ((uintptr_t) output - output_decrement);
71 input += 8;
72 mc -= 8 * sizeof(float);
73 }
74 if XNN_UNLIKELY(mc != 0) {
75 output_decrement += 4 * sizeof(float);
76 if (mc & (4 * sizeof(float))) {
77 const float*restrict w = weights;
78 const int32_t* dmap = widx_dmap;
79 const uint32_t* nnzmap = nidx_nnzmap;
80 size_t n = nc;
81 do {
82 uint32_t nnz = *nnzmap++;
83 float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
84 if XNN_LIKELY(nnz != 0) {
85 do {
86 const intptr_t diff = *dmap++;
87 const float32x4_t vi0123 = vld1q_f32(input);
88 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
89 __builtin_prefetch(input + 16);
90 const float32x4_t vb = vld1q_dup_f32(w); w += 1;
91 __builtin_prefetch(w + 32);
92 vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
93 } while (--nnz != 0);
94 }
95 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
96 vout0123 = vmaxq_f32(vout0123, vmin);
97 vst1q_f32(output, vout0123);
98 output = (float*restrict) ((uintptr_t) output + output_stride);
99 } while (--n != 0);
100 output = (float*restrict) ((uintptr_t) output - output_decrement);
101 input += 4;
102 }
103 output_decrement += 2 * sizeof(float);
104 if (mc & (2 * sizeof(float))) {
105 const float*restrict w = weights;
106 const int32_t* dmap = widx_dmap;
107 const uint32_t* nnzmap = nidx_nnzmap;
108 size_t n = nc;
109 do {
110 uint32_t nnz = *nnzmap++;
111 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
112 if XNN_LIKELY(nnz != 0) {
113 do {
114 const intptr_t diff = *dmap++;
115 const float32x2_t vi01 = vld1_f32(input);
116 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
117 __builtin_prefetch(input + 16);
118 const float32x2_t vb = vld1_dup_f32(w); w += 1;
119 __builtin_prefetch(w + 32);
120 vacc01 = vfma_f32(vacc01, vi01, vb);
121 } while (--nnz != 0);
122 }
123 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
124 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
125 vst1_f32(output, vout01);
126 output = (float*restrict) ((uintptr_t) output + output_stride);
127 } while (--n != 0);
128 output = (float*restrict) ((uintptr_t) output - output_decrement);
129 input += 2;
130 }
131 output_decrement += 1 * sizeof(float);
132 if (mc & (1 * sizeof(float))) {
133 const float*restrict w = weights;
134 const int32_t* dmap = widx_dmap;
135 const uint32_t* nnzmap = nidx_nnzmap;
136 size_t n = nc;
137 do {
138 uint32_t nnz = *nnzmap++;
139 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
140 if XNN_LIKELY(nnz != 0) {
141 do {
142 const intptr_t diff = *dmap++;
143 const float32x2_t vi0 = vld1_dup_f32(input);
144 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
145 __builtin_prefetch(input + 16);
146 const float32x2_t vb = vld1_dup_f32(w); w += 1;
147 __builtin_prefetch(w + 32);
148 vacc0 = vfma_f32(vacc0, vi0, vb);
149 } while (--nnz != 0);
150 }
151 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
152 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
153 vst1_lane_f32(output, vout0, 0);
154 output = (float*restrict) ((uintptr_t) output + output_stride);
155 } while (--n != 0);
156 output = (float*restrict) ((uintptr_t) output - output_decrement);
157 input += 1;
158 }
159 }
160 }
161