1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/c8-neon-mull.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const int8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 2);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(int8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 8 * sizeof(int8_t));
40 const int8_t* a0 = a;
41 int8_t* c0 = c;
42 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr != 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48
49 do {
50 int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
51 int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
52 int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
53 int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
54 int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
55 int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
56 int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
57 int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
58 int32x4_t vacc1x0 = vacc0x0;
59 int32x4_t vacc1x1 = vacc0x1;
60 int32x4_t vacc1x2 = vacc0x2;
61 int32x4_t vacc1x3 = vacc0x3;
62 int32x4_t vacc1x4 = vacc0x4;
63 int32x4_t vacc1x5 = vacc0x5;
64 int32x4_t vacc1x6 = vacc0x6;
65 int32x4_t vacc1x7 = vacc0x7;
66
67 size_t k = kc;
68 // 2x partial unrolled loop to load 16 bytes at a time using MLA.
69 while (k >= 16 * sizeof(int8_t)) {
70 const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
71 const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
72 const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
73 const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
74
75 const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
76 const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
77 const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
78 const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
79 const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
80 const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
81 const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
82 const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
83
84 const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
85 int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
86 int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0);
87 vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
88 vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1);
89 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
90 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
91 const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
92 int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
93 int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0);
94 vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
95 vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1);
96 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
97 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
98 const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
99 int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
100 int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0);
101 vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
102 vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1);
103 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
104 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
105 const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
106 int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
107 int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0);
108 vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
109 vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1);
110 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
111 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
112 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
113 int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
114 int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0);
115 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
116 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1);
117 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
118 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
119 const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
120 int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
121 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0);
122 vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
123 vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1);
124 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
125 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
126 const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
127 int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
128 int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0);
129 vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
130 vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1);
131 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
132 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
133 const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
134 int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
135 int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0);
136 vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
137 vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1);
138 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
139 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
140
141 k -= 16 * sizeof(int8_t);
142 }
143
144 // Handle 8 bytes at a time using MUL.
145 if (k != 0) {
146 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
147 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
148
149 const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
150 const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
151 const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
152 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
153 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
154 const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
155 const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
156 const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
157 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
158 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
159 const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
160 const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
161 const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
162 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
163 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
164 const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
165 const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
166 const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
167 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
168 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
169 const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
170 const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
171 const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
172 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
173 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
174 const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
175 const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
176 const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
177 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
178 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
179 const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
180 const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
181 const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
182 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
183 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
184 const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
185 const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
186 const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
187 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
188 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
189
190 k -= 8 * sizeof(int8_t);
191 }
192
193 #if XNN_ARCH_ARM64
194 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
195 const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
196 const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
197 const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
198 const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
199 const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
200 const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
201 const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
202
203 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
204 int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
205 int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
206 int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
207 #else
208 const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
209 const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
210 const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
211 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
212 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
213 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
214 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
215 const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
216 const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
217 const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
218 const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
219 const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
220 const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
221 int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
222 const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
223 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
224 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
225 const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
226 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
227 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
228 int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
229 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
230 const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
231 const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
232 const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
233 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
234 const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
235 int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
236 #endif
237
238 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
239 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
240 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
241
242 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
243 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
244 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
245 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
246
247 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
248 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
249 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
250 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
251
252 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
253 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
254 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
255 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
256
257 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
258 #if XNN_ARCH_ARM64
259 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
260 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
261
262 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
263 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
264
265 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
266 #else
267 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
268 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
269
270 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
271 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
272
273 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
274 #endif
275
276 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
277 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
278
279 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
280 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
281
282 if (nc >= 8) {
283 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
284 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
285
286 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
287 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
288
289 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
290 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
291
292 nc -= 8;
293 } else {
294 // Final case where not all of the 8 columns fit in the destination.
295 if (nc & 4) {
296 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
297 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
298 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
299 }
300 if (nc & 2) {
301 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
302 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
303 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
304 }
305 if (nc & 1) {
306 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
307 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
308 }
309
310 nc = 0;
311 }
312 } while (nc != 0);
313 }
314