• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-dwconv/unipass-neon-mul8.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qu8_dwconv_minmax_rndnu_ukernel_up24x9__neon_mul8(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_dwconv_minmax_rndnu_ukernel_up24x9__neon_mul8(
18     size_t channels,
19     size_t output_width,
20     const uint8_t** input,
21     const void* weights,
22     uint8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const uint8_t* zero,
27     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
33   const uint16x8_t vkernel_zero_point16 = vmovl_u8(vkernel_zero_point);
34   const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
35   const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
36   const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
37   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
38   const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
39   const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
40   do {
41     const uint8_t* i0 = input[0];
42     assert(i0 != NULL);
43     if XNN_UNPREDICTABLE(i0 != zero) {
44       i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
45     }
46     const uint8_t* i1 = input[1];
47     assert(i1 != NULL);
48     if XNN_UNPREDICTABLE(i1 != zero) {
49       i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
50     }
51     const uint8_t* i2 = input[2];
52     assert(i2 != NULL);
53     if XNN_UNPREDICTABLE(i2 != zero) {
54       i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
55     }
56     const uint8_t* i3 = input[3];
57     assert(i3 != NULL);
58     if XNN_UNPREDICTABLE(i3 != zero) {
59       i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
60     }
61     const uint8_t* i4 = input[4];
62     assert(i4 != NULL);
63     if XNN_UNPREDICTABLE(i4 != zero) {
64       i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
65     }
66     const uint8_t* i5 = input[5];
67     assert(i5 != NULL);
68     if XNN_UNPREDICTABLE(i5 != zero) {
69       i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
70     }
71     const uint8_t* i6 = input[6];
72     assert(i6 != NULL);
73     if XNN_UNPREDICTABLE(i6 != zero) {
74       i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
75     }
76     const uint8_t* i7 = input[7];
77     assert(i7 != NULL);
78     if XNN_UNPREDICTABLE(i7 != zero) {
79       i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
80     }
81     const uint8_t* i8 = input[8];
82     assert(i8 != NULL);
83     if XNN_UNPREDICTABLE(i8 != zero) {
84       i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
85     }
86     input = (const uint8_t**) ((uintptr_t) input + input_stride);
87 
88 
89     size_t c = channels;
90     const void* w = weights;
91     for (; c >= 24; c -= 24) {
92       int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
93       int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
94       int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
95       int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
96       int32x4_t vaccGHIJ = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
97       int32x4_t vaccKLMN = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
98 
99 
100       const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
101       const uint8x8_t vk0x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
102       const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
103       const uint8x8_t vk0x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
104       const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
105       const uint8x8_t vk0xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
106 
107       uint16x8_t vprod01234567 = vmull_u8(vi0x01234567, vk0x01234567);
108       uint16x8_t vprod89ABCDEF = vmull_u8(vi0x89ABCDEF, vk0x89ABCDEF);
109       uint16x8_t vprodGHIJKLMN = vmull_u8(vi0xGHIJKLMN, vk0xGHIJKLMN);
110 
111       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
112       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
113       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
114       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
115       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
116       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
117       const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
118       const uint8x8_t vk1x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
119       const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
120       const uint8x8_t vk1x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
121       const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
122       const uint8x8_t vk1xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
123 
124       vprod01234567 = vmull_u8(vi1x01234567, vk1x01234567);
125       uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
126       vprod89ABCDEF = vmull_u8(vi1x89ABCDEF, vk1x89ABCDEF);
127       uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
128       vprodGHIJKLMN = vmull_u8(vi1xGHIJKLMN, vk1xGHIJKLMN);
129       uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
130 
131       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
132       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
133       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
134       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
135       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
136       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
137       const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
138       const uint8x8_t vk2x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
139       const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
140       const uint8x8_t vk2x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
141       const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
142       const uint8x8_t vk2xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
143 
144       vprod01234567 = vmull_u8(vi2x01234567, vk2x01234567);
145       vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
146       vprod89ABCDEF = vmull_u8(vi2x89ABCDEF, vk2x89ABCDEF);
147       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
148       vprodGHIJKLMN = vmull_u8(vi2xGHIJKLMN, vk2xGHIJKLMN);
149       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
150 
151       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
152       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
153       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
154       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
155       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
156       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
157       const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
158       const uint8x8_t vk3x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
159       const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
160       const uint8x8_t vk3x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
161       const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
162       const uint8x8_t vk3xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
163 
164       vprod01234567 = vmull_u8(vi3x01234567, vk3x01234567);
165       vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
166       vprod89ABCDEF = vmull_u8(vi3x89ABCDEF, vk3x89ABCDEF);
167       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
168       vprodGHIJKLMN = vmull_u8(vi3xGHIJKLMN, vk3xGHIJKLMN);
169       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
170 
171       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
172       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
173       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
174       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
175       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
176       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
177       const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
178       const uint8x8_t vk4x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
179       const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
180       const uint8x8_t vk4x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
181       const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
182       const uint8x8_t vk4xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
183 
184       vprod01234567 = vmull_u8(vi4x01234567, vk4x01234567);
185       vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
186       vprod89ABCDEF = vmull_u8(vi4x89ABCDEF, vk4x89ABCDEF);
187       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
188       vprodGHIJKLMN = vmull_u8(vi4xGHIJKLMN, vk4xGHIJKLMN);
189       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
190 
191       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
192       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
193       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
194       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
195       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
196       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
197       const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
198       const uint8x8_t vk5x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
199       const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
200       const uint8x8_t vk5x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
201       const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
202       const uint8x8_t vk5xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
203 
204       vprod01234567 = vmull_u8(vi5x01234567, vk5x01234567);
205       vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
206       vprod89ABCDEF = vmull_u8(vi5x89ABCDEF, vk5x89ABCDEF);
207       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
208       vprodGHIJKLMN = vmull_u8(vi5xGHIJKLMN, vk5xGHIJKLMN);
209       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
210 
211       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
212       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
213       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
214       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
215       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
216       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
217       const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
218       const uint8x8_t vk6x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
219       const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
220       const uint8x8_t vk6x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
221       const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
222       const uint8x8_t vk6xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
223 
224       vprod01234567 = vmull_u8(vi6x01234567, vk6x01234567);
225       vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
226       vprod89ABCDEF = vmull_u8(vi6x89ABCDEF, vk6x89ABCDEF);
227       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
228       vprodGHIJKLMN = vmull_u8(vi6xGHIJKLMN, vk6xGHIJKLMN);
229       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
230 
231       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
232       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
233       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
234       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
235       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
236       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
237       const uint8x8_t vi7x01234567 = vld1_u8(i7); i7 += 8;
238       const uint8x8_t vk7x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
239       const uint8x8_t vi7x89ABCDEF = vld1_u8(i7); i7 += 8;
240       const uint8x8_t vk7x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
241       const uint8x8_t vi7xGHIJKLMN = vld1_u8(i7); i7 += 8;
242       const uint8x8_t vk7xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
243 
244       vprod01234567 = vmull_u8(vi7x01234567, vk7x01234567);
245       vsum01234567 = vaddw_u8(vsum01234567, vi7x01234567);
246       vprod89ABCDEF = vmull_u8(vi7x89ABCDEF, vk7x89ABCDEF);
247       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi7x89ABCDEF);
248       vprodGHIJKLMN = vmull_u8(vi7xGHIJKLMN, vk7xGHIJKLMN);
249       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi7xGHIJKLMN);
250 
251       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
252       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
253       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
254       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
255       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
256       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
257       const uint8x8_t vi8x01234567 = vld1_u8(i8); i8 += 8;
258       const uint8x8_t vk8x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
259       const uint8x8_t vi8x89ABCDEF = vld1_u8(i8); i8 += 8;
260       const uint8x8_t vk8x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
261       const uint8x8_t vi8xGHIJKLMN = vld1_u8(i8); i8 += 8;
262       const uint8x8_t vk8xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
263 
264       vprod01234567 = vmull_u8(vi8x01234567, vk8x01234567);
265       vsum01234567 = vaddw_u8(vsum01234567, vi8x01234567);
266       vprod89ABCDEF = vmull_u8(vi8x89ABCDEF, vk8x89ABCDEF);
267       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi8x89ABCDEF);
268       vprodGHIJKLMN = vmull_u8(vi8xGHIJKLMN, vk8xGHIJKLMN);
269       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi8xGHIJKLMN);
270 
271       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
272       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
273       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
274       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
275       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
276       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
277 
278       vacc0123 = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567), vget_low_u16(vkernel_zero_point16)));
279       vacc4567 = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567), vget_high_u16(vkernel_zero_point16)));
280       vacc89AB = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF), vget_low_u16(vkernel_zero_point16)));
281       vaccCDEF = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF), vget_high_u16(vkernel_zero_point16)));
282       vaccGHIJ = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN), vget_low_u16(vkernel_zero_point16)));
283       vaccKLMN = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN), vget_high_u16(vkernel_zero_point16)));
284 
285       vacc0123 = vshlq_s32(vacc0123, vright_pre_shift);
286       vacc4567 = vshlq_s32(vacc4567, vright_pre_shift);
287       vacc89AB = vshlq_s32(vacc89AB, vright_pre_shift);
288       vaccCDEF = vshlq_s32(vaccCDEF, vright_pre_shift);
289       vaccGHIJ = vshlq_s32(vaccGHIJ, vright_pre_shift);
290       vaccKLMN = vshlq_s32(vaccKLMN, vright_pre_shift);
291 
292       vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
293       vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
294       vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
295       vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
296       vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
297       vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
298 
299       vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
300       vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
301       vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
302       vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
303       vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_post_shift);
304       vaccKLMN = vrshlq_s32(vaccKLMN, vright_post_shift);
305 
306 #if XNN_ARCH_ARM64
307       const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
308       const int16x8_t vacc89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF), voutput_zero_point);
309       const int16x8_t vaccGHIJKLMN = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN), voutput_zero_point);
310 
311       uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
312       uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
313 #else
314       const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
315       const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
316       const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
317 
318       uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
319       uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
320 #endif
321 
322       vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
323       voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
324 
325       vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
326       voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
327 
328       vst1q_u8(output, vout0123456789ABCDEF); output += 16;
329       vst1_u8(output, voutGHIJKLMN); output += 8;
330     }
331     if XNN_UNLIKELY(c != 0) {
332       const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 24);
333       do {
334         int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
335         int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
336 
337         const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
338         const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(k), vkernel_zero_point)); k += 8;
339 
340         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
341         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
342         const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
343         const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 16)), vkernel_zero_point));
344 
345         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
346         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
347         const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
348         const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 40)), vkernel_zero_point));
349 
350         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
351         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
352         const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
353         const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 64)), vkernel_zero_point));
354 
355         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
356         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
357         const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
358         const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 88)), vkernel_zero_point));
359 
360         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
361         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
362         const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
363         const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 112)), vkernel_zero_point));
364 
365         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
366         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
367         const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
368         const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 136)), vkernel_zero_point));
369 
370         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
371         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
372         const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
373         const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 160)), vkernel_zero_point));
374 
375         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
376         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
377         const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
378         const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 184)), vkernel_zero_point));
379 
380         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
381         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
382 
383         vacc0123 = vrshlq_s32(vacc0123, vright_pre_shift);
384         vacc4567 = vrshlq_s32(vacc4567, vright_pre_shift);
385 
386         vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
387         vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
388 
389         vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
390         vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
391 
392 #if XNN_ARCH_ARM64
393         const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
394         uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
395 #else
396         const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
397         uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
398 #endif
399 
400         vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
401         vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
402 
403         if XNN_LIKELY(c >= 8) {
404           vst1_u8(output, vout01234567); output += 8;
405           c -= 8;
406         } else {
407           if (c & 4) {
408             vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
409             vout01234567 = vext_u8(vout01234567, vout01234567, 4);
410           }
411           if (c & 2) {
412             vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
413             vout01234567 = vext_u8(vout01234567, vout01234567, 2);
414           }
415           if (c & 1) {
416             vst1_lane_u8(output, vout01234567, 0); output += 1;
417           }
418           c = 0;
419         }
420       } while (c != 0);
421     }
422 
423     output = (uint8_t*) ((uintptr_t) output + output_increment);
424   } while (--output_width != 0);
425 }
426