1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
33 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
34 size_t output_decrement = output_stride * nc - 4 * sizeof(float);
35 while XNN_LIKELY(mc >= 4 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
43 float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
44 for (; nnz >= 2; nnz -= 2) {
45 const intptr_t diff0 = dmap[0];
46 const intptr_t diff1 = dmap[1];
47 dmap += 2;
48 const float32x4_t vi0123x0 = vld1q_f32(input);
49 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
50 __builtin_prefetch(input + 16);
51 const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
52 __builtin_prefetch(w + 32);
53 vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
54 const float32x4_t vi0123x1 = vld1q_f32(input);
55 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
56 __builtin_prefetch(input + 16);
57 const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
58 __builtin_prefetch(w + 32);
59 vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
60 }
61 float32x4_t vacc0123 = vacc0123x0;
62 vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
63 if XNN_LIKELY(nnz != 0) {
64 do {
65 const intptr_t diff = *dmap++;
66 const float32x4_t vi0123 = vld1q_f32(input);
67 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
68 __builtin_prefetch(input + 16);
69 const float32x4_t vw = vld1q_dup_f32(w); w += 1;
70 __builtin_prefetch(w + 32);
71 vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
72 } while (--nnz != 0);
73 }
74 float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
75 vout0123 = vmaxq_f32(vout0123, vmin);
76 vst1q_f32(output, vout0123);
77 output = (float*restrict) ((uintptr_t) output + output_stride);
78 } while (--n != 0);
79 output = (float*restrict) ((uintptr_t) output - output_decrement);
80 input += 4;
81 mc -= 4 * sizeof(float);
82 }
83 if XNN_UNLIKELY(mc != 0) {
84 output_decrement += 2 * sizeof(float);
85 if (mc & (2 * sizeof(float))) {
86 const float*restrict w = weights;
87 const int32_t* dmap = widx_dmap;
88 const uint32_t* nnzmap = nidx_nnzmap;
89 size_t n = nc;
90 do {
91 uint32_t nnz = *nnzmap++;
92 float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
93 if XNN_LIKELY(nnz != 0) {
94 do {
95 const intptr_t diff = *dmap++;
96 const float32x2_t vi01 = vld1_f32(input);
97 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
98 const float32x2_t vw = vld1_dup_f32(w); w += 1;
99 vacc01 = vfma_f32(vacc01, vi01, vw);
100 } while (--nnz != 0);
101 }
102 float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
103 vout01 = vmax_f32(vout01, vget_low_f32(vmin));
104 vst1_f32(output, vout01);
105 output = (float*restrict) ((uintptr_t) output + output_stride);
106 } while (--n != 0);
107 output = (float*restrict) ((uintptr_t) output - output_decrement);
108 input += 2;
109 }
110 output_decrement += 1 * sizeof(float);
111 if (mc & (1 * sizeof(float))) {
112 const float*restrict w = weights;
113 const int32_t* dmap = widx_dmap;
114 const uint32_t* nnzmap = nidx_nnzmap;
115 size_t n = nc;
116 do {
117 uint32_t nnz = *nnzmap++;
118 float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
119 if XNN_LIKELY(nnz != 0) {
120 do {
121 const intptr_t diff = *dmap++;
122 const float32x2_t vi0 = vld1_dup_f32(input);
123 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
124 const float32x2_t vw = vld1_dup_f32(w); w += 1;
125 vacc0 = vfma_f32(vacc0, vi0, vw);
126 } while (--nnz != 0);
127 }
128 float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
129 vout0 = vmax_f32(vout0, vget_low_f32(vmin));
130 vst1_lane_f32(output, vout0, 0);
131 output = (float*restrict) ((uintptr_t) output + output_stride);
132 } while (--n != 0);
133 output = (float*restrict) ((uintptr_t) output - output_decrement);
134 input += 1;
135 }
136 }
137 }
138