1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-neon-mul16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_qu8_dwconv_minmax_rndnu_ukernel_up24x9__neon_mul16(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_dwconv_minmax_rndnu_ukernel_up24x9__neon_mul16(
18 size_t channels,
19 size_t output_width,
20 const uint8_t** input,
21 const void* weights,
22 uint8_t* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const uint8_t* zero,
27 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
33 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
34 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
35 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
36 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
37 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
38 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
39 do {
40 const uint8_t* i0 = input[0];
41 assert(i0 != NULL);
42 if XNN_UNPREDICTABLE(i0 != zero) {
43 i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
44 }
45 const uint8_t* i1 = input[1];
46 assert(i1 != NULL);
47 if XNN_UNPREDICTABLE(i1 != zero) {
48 i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
49 }
50 const uint8_t* i2 = input[2];
51 assert(i2 != NULL);
52 if XNN_UNPREDICTABLE(i2 != zero) {
53 i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
54 }
55 const uint8_t* i3 = input[3];
56 assert(i3 != NULL);
57 if XNN_UNPREDICTABLE(i3 != zero) {
58 i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
59 }
60 const uint8_t* i4 = input[4];
61 assert(i4 != NULL);
62 if XNN_UNPREDICTABLE(i4 != zero) {
63 i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
64 }
65 const uint8_t* i5 = input[5];
66 assert(i5 != NULL);
67 if XNN_UNPREDICTABLE(i5 != zero) {
68 i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
69 }
70 const uint8_t* i6 = input[6];
71 assert(i6 != NULL);
72 if XNN_UNPREDICTABLE(i6 != zero) {
73 i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
74 }
75 const uint8_t* i7 = input[7];
76 assert(i7 != NULL);
77 if XNN_UNPREDICTABLE(i7 != zero) {
78 i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
79 }
80 const uint8_t* i8 = input[8];
81 assert(i8 != NULL);
82 if XNN_UNPREDICTABLE(i8 != zero) {
83 i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
84 }
85 input = (const uint8_t**) ((uintptr_t) input + input_stride);
86
87 size_t c = channels;
88 const void* w = weights;
89 for (; c >= 24; c -= 24) {
90 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
91 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
92 int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
93 int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
94 int32x4_t vaccGHIJ = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
95 int32x4_t vaccKLMN = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
96
97
98 const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
99 const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
100 const int16x8_t vi0x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
101 const int16x8_t vk0x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
102 const int16x8_t vi0xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
103 const int16x8_t vk0xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
104
105 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
106 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
107 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi0x89ABCDEF), vget_low_s16(vk0x89ABCDEF));
108 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi0x89ABCDEF), vget_high_s16(vk0x89ABCDEF));
109 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi0xGHIJKLMN), vget_low_s16(vk0xGHIJKLMN));
110 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi0xGHIJKLMN), vget_high_s16(vk0xGHIJKLMN));
111
112 const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
113 const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
114 const int16x8_t vi1x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
115 const int16x8_t vk1x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
116 const int16x8_t vi1xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
117 const int16x8_t vk1xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
118
119 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
120 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
121 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi1x89ABCDEF), vget_low_s16(vk1x89ABCDEF));
122 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi1x89ABCDEF), vget_high_s16(vk1x89ABCDEF));
123 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi1xGHIJKLMN), vget_low_s16(vk1xGHIJKLMN));
124 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi1xGHIJKLMN), vget_high_s16(vk1xGHIJKLMN));
125
126 const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
127 const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
128 const int16x8_t vi2x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
129 const int16x8_t vk2x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
130 const int16x8_t vi2xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
131 const int16x8_t vk2xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
132
133 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
134 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
135 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi2x89ABCDEF), vget_low_s16(vk2x89ABCDEF));
136 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi2x89ABCDEF), vget_high_s16(vk2x89ABCDEF));
137 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi2xGHIJKLMN), vget_low_s16(vk2xGHIJKLMN));
138 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi2xGHIJKLMN), vget_high_s16(vk2xGHIJKLMN));
139
140 const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
141 const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
142 const int16x8_t vi3x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
143 const int16x8_t vk3x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
144 const int16x8_t vi3xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
145 const int16x8_t vk3xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
146
147 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
148 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
149 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi3x89ABCDEF), vget_low_s16(vk3x89ABCDEF));
150 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi3x89ABCDEF), vget_high_s16(vk3x89ABCDEF));
151 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi3xGHIJKLMN), vget_low_s16(vk3xGHIJKLMN));
152 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi3xGHIJKLMN), vget_high_s16(vk3xGHIJKLMN));
153
154 const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
155 const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
156 const int16x8_t vi4x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
157 const int16x8_t vk4x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
158 const int16x8_t vi4xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
159 const int16x8_t vk4xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
160
161 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
162 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
163 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF));
164 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF));
165 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi4xGHIJKLMN), vget_low_s16(vk4xGHIJKLMN));
166 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi4xGHIJKLMN), vget_high_s16(vk4xGHIJKLMN));
167
168 const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
169 const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
170 const int16x8_t vi5x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
171 const int16x8_t vk5x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
172 const int16x8_t vi5xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
173 const int16x8_t vk5xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
174
175 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
176 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
177 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi5x89ABCDEF), vget_low_s16(vk5x89ABCDEF));
178 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi5x89ABCDEF), vget_high_s16(vk5x89ABCDEF));
179 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi5xGHIJKLMN), vget_low_s16(vk5xGHIJKLMN));
180 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi5xGHIJKLMN), vget_high_s16(vk5xGHIJKLMN));
181
182 const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
183 const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
184 const int16x8_t vi6x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
185 const int16x8_t vk6x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
186 const int16x8_t vi6xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
187 const int16x8_t vk6xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
188
189 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
190 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
191 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi6x89ABCDEF), vget_low_s16(vk6x89ABCDEF));
192 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi6x89ABCDEF), vget_high_s16(vk6x89ABCDEF));
193 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi6xGHIJKLMN), vget_low_s16(vk6xGHIJKLMN));
194 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi6xGHIJKLMN), vget_high_s16(vk6xGHIJKLMN));
195
196 const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
197 const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
198 const int16x8_t vi7x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
199 const int16x8_t vk7x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
200 const int16x8_t vi7xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
201 const int16x8_t vk7xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
202
203 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
204 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
205 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi7x89ABCDEF), vget_low_s16(vk7x89ABCDEF));
206 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi7x89ABCDEF), vget_high_s16(vk7x89ABCDEF));
207 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi7xGHIJKLMN), vget_low_s16(vk7xGHIJKLMN));
208 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi7xGHIJKLMN), vget_high_s16(vk7xGHIJKLMN));
209
210 const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
211 const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
212 const int16x8_t vi8x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
213 const int16x8_t vk8x89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
214 const int16x8_t vi8xGHIJKLMN = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
215 const int16x8_t vk8xGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const void*) ((const uint8_t*) w + 8);
216
217 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
218 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
219 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi8x89ABCDEF), vget_low_s16(vk8x89ABCDEF));
220 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi8x89ABCDEF), vget_high_s16(vk8x89ABCDEF));
221 vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi8xGHIJKLMN), vget_low_s16(vk8xGHIJKLMN));
222 vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi8xGHIJKLMN), vget_high_s16(vk8xGHIJKLMN));
223
224 vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
225 vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
226 vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
227 vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
228 vaccGHIJ = vqshlq_s32(vaccGHIJ, vright_pre_shift);
229 vaccKLMN = vqshlq_s32(vaccKLMN, vright_pre_shift);
230
231 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
232 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
233 vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
234 vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
235 vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
236 vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
237
238 vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
239 vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
240 vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
241 vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
242 vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_post_shift);
243 vaccKLMN = vrshlq_s32(vaccKLMN, vright_post_shift);
244
245 #if XNN_ARCH_ARM64
246 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
247 int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
248 int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
249
250 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
251 vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
252 vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
253
254 uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
255 uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
256 #else // !XNN_ARCH_ARM64
257 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
258 int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
259 int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
260
261 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
262 vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
263 vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
264
265 uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
266 uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
267 #endif // !XNN_ARCH_ARM64
268
269 vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
270 voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
271
272 vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
273 voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
274
275 vst1q_u8(output, vout0123456789ABCDEF); output += 16;
276 vst1_u8(output, voutGHIJKLMN); output += 8;
277 }
278 if XNN_UNLIKELY(c != 0) {
279 const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 24);
280 do {
281 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
282 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
283
284 const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
285 const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(k), vkernel_zero_point)); k += 8;
286
287 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
288 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
289 const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
290 const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 16)), vkernel_zero_point));
291
292 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
293 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
294 const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
295 const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 40)), vkernel_zero_point));
296
297 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
298 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
299 const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
300 const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 64)), vkernel_zero_point));
301
302 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
303 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
304 const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
305 const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 88)), vkernel_zero_point));
306
307 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
308 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
309 const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
310 const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 112)), vkernel_zero_point));
311
312 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
313 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
314 const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
315 const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 136)), vkernel_zero_point));
316
317 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
318 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
319 const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
320 const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 160)), vkernel_zero_point));
321
322 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
323 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
324 const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
325 const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 184)), vkernel_zero_point));
326
327 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
328 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
329
330 vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
331 vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
332
333 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
334 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
335
336 vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
337 vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
338
339 #if XNN_ARCH_ARM64
340 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
341 #else
342 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
343 #endif
344 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
345
346 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
347 vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
348 vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
349
350 if XNN_LIKELY(c >= 8) {
351 vst1_u8(output, vout01234567); output += 8;
352 c -= 8;
353 } else {
354 if (c & 4) {
355 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
356 vout01234567 = vext_u8(vout01234567, vout01234567, 4);
357 }
358 if (c & 2) {
359 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
360 vout01234567 = vext_u8(vout01234567, vout01234567, 2);
361 }
362 if (c & 1) {
363 vst1_lane_u8(output, vout01234567, 0); output += 1;
364 }
365 c = 0;
366 }
367 } while (c != 0);
368 }
369
370 output = (uint8_t*) ((uintptr_t) output + output_increment);
371 } while (--output_width != 0);
372 }
373