1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/MRx2-neon-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/igemm.h>
16
17
xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const float**restrict a,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const float* zero,
30 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 4);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(float) == 0);
37 assert(ks != 0);
38 assert(ks % (4 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(float) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 float* c0 = c;
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr != 4) {
55 c3 = c2;
56 }
57
58 do {
59 float32x2_t vacc0x01 = vld1_f32(w); w += 2;
60 float32x2_t vacc1x01 = vacc0x01;
61 float32x2_t vacc2x01 = vacc0x01;
62 float32x2_t vacc3x01 = vacc0x01;
63
64 size_t p = ks;
65 do {
66 const float* restrict a0 = a[0];
67 assert(a0 != NULL);
68 if XNN_UNPREDICTABLE(a0 != zero) {
69 a0 = (const float*) ((uintptr_t) a0 + a_offset);
70 }
71 const float* restrict a1 = a[1];
72 assert(a1 != NULL);
73 if XNN_UNPREDICTABLE(a1 != zero) {
74 a1 = (const float*) ((uintptr_t) a1 + a_offset);
75 }
76 const float* restrict a2 = a[2];
77 assert(a2 != NULL);
78 if XNN_UNPREDICTABLE(a2 != zero) {
79 a2 = (const float*) ((uintptr_t) a2 + a_offset);
80 }
81 const float* restrict a3 = a[3];
82 assert(a3 != NULL);
83 if XNN_UNPREDICTABLE(a3 != zero) {
84 a3 = (const float*) ((uintptr_t) a3 + a_offset);
85 }
86 a += 4;
87
88 size_t k = kc;
89 for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
90 const float32x2_t va0 = vld1_f32(a0); a0 += 2;
91 const float32x2_t va1 = vld1_f32(a1); a1 += 2;
92 const float32x2_t va2 = vld1_f32(a2); a2 += 2;
93 const float32x2_t va3 = vld1_f32(a3); a3 += 2;
94
95 const float32x2_t vb01c0 = vld1_f32(w); w += 2;
96
97 #if XNN_ARCH_ARM64
98 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
99 vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
100 vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
101 vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
102 #else
103 const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
104 const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
105 const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
106 const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
107 vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
108 vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
109 vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
110 vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
111 #endif
112 const float32x2_t vb01c1 = vld1_f32(w); w += 2;
113
114 #if XNN_ARCH_ARM64
115 vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
116 vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
117 vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
118 vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
119 #else
120 const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
121 const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
122 const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
123 const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
124 vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
125 vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
126 vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
127 vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
128 #endif
129 }
130 if XNN_UNLIKELY(k != 0) {
131 const float32x2_t va0 = vld1_dup_f32(a0);
132 const float32x2_t va1 = vld1_dup_f32(a1);
133 const float32x2_t va2 = vld1_dup_f32(a2);
134 const float32x2_t va3 = vld1_dup_f32(a3);
135
136 const float32x2_t vb01 = vld1_f32(w); w += 2;
137
138 vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
139 vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
140 vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
141 vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
142 }
143 p -= 4 * sizeof(void*);
144 } while (p != 0);
145
146 const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
147 vacc0x01 = vmin_f32(vacc0x01, vmax);
148 vacc1x01 = vmin_f32(vacc1x01, vmax);
149 vacc2x01 = vmin_f32(vacc2x01, vmax);
150 vacc3x01 = vmin_f32(vacc3x01, vmax);
151
152 const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
153 vacc0x01 = vmax_f32(vacc0x01, vmin);
154 vacc1x01 = vmax_f32(vacc1x01, vmin);
155 vacc2x01 = vmax_f32(vacc2x01, vmin);
156 vacc3x01 = vmax_f32(vacc3x01, vmin);
157
158 if XNN_LIKELY(nc >= 2) {
159 vst1_f32(c3, vacc3x01);
160 c3 = (float*) ((uintptr_t) c3 + cn_stride);
161 vst1_f32(c2, vacc2x01);
162 c2 = (float*) ((uintptr_t) c2 + cn_stride);
163 vst1_f32(c1, vacc1x01);
164 c1 = (float*) ((uintptr_t) c1 + cn_stride);
165 vst1_f32(c0, vacc0x01);
166 c0 = (float*) ((uintptr_t) c0 + cn_stride);
167
168 a = (const float**restrict) ((uintptr_t) a - ks);
169 nc -= 2;
170 } else {
171 assert(nc == 1);
172 vst1_lane_f32(c3, vacc3x01, 0);
173 vst1_lane_f32(c2, vacc2x01, 0);
174 vst1_lane_f32(c1, vacc1x01, 0);
175 vst1_lane_f32(c0, vacc0x01, 0);
176
177 nc = 0;
178 }
179 } while (nc != 0);
180 }
181