• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/c16-neon-mlal.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const int8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 2);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(int8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 16 * sizeof(int8_t));
40   const int8_t* a0 = a;
41   int8_t* c0 = c;
42   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr != 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48 
49   do {
50     int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
51     int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
52     int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
53     int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
54     int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
55     int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
56     int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
57     int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
58     int32x4_t vacc1x0 = vacc0x0;
59     int32x4_t vacc1x1 = vacc0x1;
60     int32x4_t vacc1x2 = vacc0x2;
61     int32x4_t vacc1x3 = vacc0x3;
62     int32x4_t vacc1x4 = vacc0x4;
63     int32x4_t vacc1x5 = vacc0x5;
64     int32x4_t vacc1x6 = vacc0x6;
65     int32x4_t vacc1x7 = vacc0x7;
66 
67     // KC loop of 16
68     size_t k = kc;
69     while (k != 0) {
70       const int8x16_t va0 = vld1q_s8(a0); a0 += 16;
71       const int8x16_t va1 = vld1q_s8(a1); a1 += 16;
72 
73       const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
74       const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
75       const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
76       const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
77       const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
78       const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
79       const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
80       const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
81 
82       int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));
83       int16x8_t vprod1x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va1));
84       vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0));
85       vprod1x0 = vmlal_s8(vprod1x0, vget_high_s8(vb0), vget_high_s8(va1));
86       vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
87       vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
88       int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0));
89       int16x8_t vprod1x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va1));
90       vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0));
91       vprod1x1 = vmlal_s8(vprod1x1, vget_high_s8(vb1), vget_high_s8(va1));
92       vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
93       vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
94       int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0));
95       int16x8_t vprod1x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va1));
96       vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0));
97       vprod1x2 = vmlal_s8(vprod1x2, vget_high_s8(vb2), vget_high_s8(va1));
98       vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
99       vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
100       int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0));
101       int16x8_t vprod1x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va1));
102       vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0));
103       vprod1x3 = vmlal_s8(vprod1x3, vget_high_s8(vb3), vget_high_s8(va1));
104       vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
105       vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
106       int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0));
107       int16x8_t vprod1x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va1));
108       vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0));
109       vprod1x4 = vmlal_s8(vprod1x4, vget_high_s8(vb4), vget_high_s8(va1));
110       vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
111       vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
112       int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0));
113       int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1));
114       vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0));
115       vprod1x5 = vmlal_s8(vprod1x5, vget_high_s8(vb5), vget_high_s8(va1));
116       vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
117       vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
118       int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0));
119       int16x8_t vprod1x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va1));
120       vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0));
121       vprod1x6 = vmlal_s8(vprod1x6, vget_high_s8(vb6), vget_high_s8(va1));
122       vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
123       vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
124       int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0));
125       int16x8_t vprod1x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va1));
126       vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0));
127       vprod1x7 = vmlal_s8(vprod1x7, vget_high_s8(vb7), vget_high_s8(va1));
128       vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
129       vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
130 
131       k -= 16 * sizeof(int8_t);
132     }
133 
134 #if XNN_ARCH_ARM64
135     const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
136     const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
137     const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
138     const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
139     const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
140     const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
141     const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
142     const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
143     int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
144     int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
145     int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
146     int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
147 #else
148     const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
149     const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
150     const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
151     const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
152     const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
153     const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
154     int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
155     const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
156     const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
157     const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
158     const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
159     const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
160     const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
161     int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
162     const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
163     const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
164     const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
165     const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
166     const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
167     const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
168     int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
169     const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
170     const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
171     const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
172     const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
173     const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
174     const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
175     int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
176 #endif
177 
178     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
179     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
180     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
181 
182     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
183     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
184     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
185     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
186 
187     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
188     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
189     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
190     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
191 
192     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
193     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
194     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
195     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
196 
197     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
198 #if XNN_ARCH_ARM64
199     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
200     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
201     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
202 #else
203     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
204     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
205 
206     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
207 #endif
208     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
209     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
210 
211     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
212 
213     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
214 
215     if (nc >= 8) {
216       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
217       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
218 
219       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
220       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
221 
222       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
223       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
224 
225       nc -= 8;
226     } else {
227       // Final case where not all of the 8 columns fit in the destination.
228       if (nc & 4) {
229         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
230         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
231         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
232       }
233       if (nc & 2) {
234         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
235         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
236         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
237       }
238       if (nc & 1) {
239         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
240         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
241       }
242 
243       nc = 0;
244     }
245   } while (nc != 0);
246 }
247