• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx_mul32(
19     size_t channels,
20     size_t output_width,
21     const int8_t** input,
22     const void* weights,
23     int8_t* output,
24     size_t input_stride,
25     size_t output_increment,
26     size_t input_offset,
27     const int8_t* zero,
28     const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(channels != 0);
31   assert(output_width != 0);
32 
33   do {
34     const int8_t* i0 = input[0];
35     assert(i0 != NULL);
36     if XNN_UNPREDICTABLE(i0 != zero) {
37       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
38     }
39     const int8_t* i1 = input[1];
40     assert(i1 != NULL);
41     if XNN_UNPREDICTABLE(i1 != zero) {
42       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
43     }
44     const int8_t* i2 = input[2];
45     assert(i2 != NULL);
46     if XNN_UNPREDICTABLE(i2 != zero) {
47       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
48     }
49     const int8_t* i3 = input[3];
50     assert(i3 != NULL);
51     if XNN_UNPREDICTABLE(i3 != zero) {
52       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
53     }
54     const int8_t* i4 = input[4];
55     assert(i4 != NULL);
56     if XNN_UNPREDICTABLE(i4 != zero) {
57       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
58     }
59     const int8_t* i5 = input[5];
60     assert(i5 != NULL);
61     if XNN_UNPREDICTABLE(i5 != zero) {
62       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
63     }
64     const int8_t* i6 = input[6];
65     assert(i6 != NULL);
66     if XNN_UNPREDICTABLE(i6 != zero) {
67       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
68     }
69     const int8_t* i7 = input[7];
70     assert(i7 != NULL);
71     if XNN_UNPREDICTABLE(i7 != zero) {
72       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
73     }
74     const int8_t* i8 = input[8];
75     assert(i8 != NULL);
76     if XNN_UNPREDICTABLE(i8 != zero) {
77       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
78     }
79     const int8_t* i9 = input[9];
80     assert(i9 != NULL);
81     if XNN_UNPREDICTABLE(i9 != zero) {
82       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
83     }
84     const int8_t* i10 = input[10];
85     assert(i10 != NULL);
86     if XNN_UNPREDICTABLE(i10 != zero) {
87       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
88     }
89     const int8_t* i11 = input[11];
90     assert(i11 != NULL);
91     if XNN_UNPREDICTABLE(i11 != zero) {
92       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
93     }
94     const int8_t* i12 = input[12];
95     assert(i12 != NULL);
96     if XNN_UNPREDICTABLE(i12 != zero) {
97       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
98     }
99     const int8_t* i13 = input[13];
100     assert(i13 != NULL);
101     if XNN_UNPREDICTABLE(i13 != zero) {
102       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
103     }
104     const int8_t* i14 = input[14];
105     assert(i14 != NULL);
106     if XNN_UNPREDICTABLE(i14 != zero) {
107       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
108     }
109     const int8_t* i15 = input[15];
110     assert(i15 != NULL);
111     if XNN_UNPREDICTABLE(i15 != zero) {
112       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
113     }
114     const int8_t* i16 = input[16];
115     assert(i16 != NULL);
116     if XNN_UNPREDICTABLE(i16 != zero) {
117       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
118     }
119     const int8_t* i17 = input[17];
120     assert(i17 != NULL);
121     if XNN_UNPREDICTABLE(i17 != zero) {
122       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
123     }
124     const int8_t* i18 = input[18];
125     assert(i18 != NULL);
126     if XNN_UNPREDICTABLE(i18 != zero) {
127       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
128     }
129     const int8_t* i19 = input[19];
130     assert(i19 != NULL);
131     if XNN_UNPREDICTABLE(i19 != zero) {
132       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
133     }
134     const int8_t* i20 = input[20];
135     assert(i20 != NULL);
136     if XNN_UNPREDICTABLE(i20 != zero) {
137       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
138     }
139     const int8_t* i21 = input[21];
140     assert(i21 != NULL);
141     if XNN_UNPREDICTABLE(i21 != zero) {
142       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
143     }
144     const int8_t* i22 = input[22];
145     assert(i22 != NULL);
146     if XNN_UNPREDICTABLE(i22 != zero) {
147       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
148     }
149     const int8_t* i23 = input[23];
150     assert(i23 != NULL);
151     if XNN_UNPREDICTABLE(i23 != zero) {
152       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
153     }
154     const int8_t* i24 = input[24];
155     assert(i24 != NULL);
156     if XNN_UNPREDICTABLE(i24 != zero) {
157       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
158     }
159     input = (const int8_t**) ((uintptr_t) input + input_stride);
160 
161     size_t c = channels;
162     const void* w = weights;
163     for (; c >= 8; c -= 8) {
164       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
165       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
166 
167 
168       const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i0));
169       const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
170       const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i0 + 4));
171       const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(int8_t))));
172       i0 += 8;
173 
174       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
175       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
176 
177       const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i1));
178       const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
179       const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i1 + 4));
180       const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(int8_t))));
181       i1 += 8;
182 
183       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
184       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
185 
186       const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i2));
187       const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
188       const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i2 + 4));
189       const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(int8_t))));
190       i2 += 8;
191 
192       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
193       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
194 
195       const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i3));
196       const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
197       const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i3 + 4));
198       const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(int8_t))));
199       i3 += 8;
200 
201       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
202       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
203 
204       const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i4));
205       const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
206       const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i4 + 4));
207       const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(int8_t))));
208       i4 += 8;
209 
210       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
211       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
212 
213       const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i5));
214       const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
215       const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i5 + 4));
216       const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(int8_t))));
217       i5 += 8;
218 
219       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
220       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
221 
222       const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i6));
223       const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
224       const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i6 + 4));
225       const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(int8_t))));
226       i6 += 8;
227 
228       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
229       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
230 
231       const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i7));
232       const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
233       const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i7 + 4));
234       const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(int8_t))));
235       i7 += 8;
236 
237       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
238       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
239 
240       const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i8));
241       const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
242       const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i8 + 4));
243       const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(int8_t))));
244       i8 += 8;
245 
246       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
247       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
248 
249       const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i9));
250       const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t))));
251       const __m128i vi9x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i9 + 4));
252       const __m128i vk9x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 76 * sizeof(int8_t))));
253       i9 += 8;
254 
255       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
256       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi9x4567, vk9x4567));
257 
258       const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i10));
259       const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t))));
260       const __m128i vi10x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i10 + 4));
261       const __m128i vk10x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 84 * sizeof(int8_t))));
262       i10 += 8;
263 
264       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
265       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi10x4567, vk10x4567));
266 
267       const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i11));
268       const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t))));
269       const __m128i vi11x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i11 + 4));
270       const __m128i vk11x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 92 * sizeof(int8_t))));
271       i11 += 8;
272 
273       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
274       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi11x4567, vk11x4567));
275 
276       const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i12));
277       const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t))));
278       const __m128i vi12x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i12 + 4));
279       const __m128i vk12x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 100 * sizeof(int8_t))));
280       i12 += 8;
281 
282       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
283       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi12x4567, vk12x4567));
284 
285       const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i13));
286       const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t))));
287       const __m128i vi13x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i13 + 4));
288       const __m128i vk13x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 108 * sizeof(int8_t))));
289       i13 += 8;
290 
291       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
292       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi13x4567, vk13x4567));
293 
294       const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i14));
295       const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t))));
296       const __m128i vi14x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i14 + 4));
297       const __m128i vk14x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 116 * sizeof(int8_t))));
298       i14 += 8;
299 
300       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
301       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi14x4567, vk14x4567));
302 
303       const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i15));
304       const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t))));
305       const __m128i vi15x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i15 + 4));
306       const __m128i vk15x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 124 * sizeof(int8_t))));
307       i15 += 8;
308 
309       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
310       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi15x4567, vk15x4567));
311 
312       const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i16));
313       const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t))));
314       const __m128i vi16x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i16 + 4));
315       const __m128i vk16x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 132 * sizeof(int8_t))));
316       i16 += 8;
317 
318       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
319       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi16x4567, vk16x4567));
320 
321       const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i17));
322       const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t))));
323       const __m128i vi17x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i17 + 4));
324       const __m128i vk17x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 140 * sizeof(int8_t))));
325       i17 += 8;
326 
327       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
328       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi17x4567, vk17x4567));
329 
330       const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i18));
331       const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t))));
332       const __m128i vi18x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i18 + 4));
333       const __m128i vk18x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 148 * sizeof(int8_t))));
334       i18 += 8;
335 
336       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
337       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi18x4567, vk18x4567));
338 
339       const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i19));
340       const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t))));
341       const __m128i vi19x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i19 + 4));
342       const __m128i vk19x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 156 * sizeof(int8_t))));
343       i19 += 8;
344 
345       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
346       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi19x4567, vk19x4567));
347 
348       const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i20));
349       const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t))));
350       const __m128i vi20x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i20 + 4));
351       const __m128i vk20x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 164 * sizeof(int8_t))));
352       i20 += 8;
353 
354       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
355       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi20x4567, vk20x4567));
356 
357       const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i21));
358       const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t))));
359       const __m128i vi21x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i21 + 4));
360       const __m128i vk21x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 172 * sizeof(int8_t))));
361       i21 += 8;
362 
363       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
364       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi21x4567, vk21x4567));
365 
366       const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i22));
367       const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t))));
368       const __m128i vi22x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i22 + 4));
369       const __m128i vk22x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 180 * sizeof(int8_t))));
370       i22 += 8;
371 
372       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
373       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi22x4567, vk22x4567));
374 
375       const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i23));
376       const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t))));
377       const __m128i vi23x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i23 + 4));
378       const __m128i vk23x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 188 * sizeof(int8_t))));
379       i23 += 8;
380 
381       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
382       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi23x4567, vk23x4567));
383 
384       const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i24));
385       const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t))));
386       const __m128i vi24x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32(i24 + 4));
387       const __m128i vk24x4567 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 196 * sizeof(int8_t))));
388       i24 += 8;
389 
390       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
391       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi24x4567, vk24x4567));
392 
393       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t));
394 
395       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
396       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
397 
398       const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
399       const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
400       w = (const void*) ((const float*) w + 8);
401       vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
402       vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
403 
404       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse4.output_max_less_zero_point);
405       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
406       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
407 
408       vacc0123 = _mm_cvtps_epi32(vscaled0123);
409       vacc4567 = _mm_cvtps_epi32(vscaled4567);
410 
411       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
412       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
413 
414       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
415       __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
416       vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
417 
418       _mm_storel_epi64((__m128i*) output, vout0123456701234567);
419       output += 8;
420     }
421     if XNN_UNLIKELY(c != 0) {
422       const int8_t* k = (const int8_t*) ((const int32_t*) w + 8);
423       do {
424         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
425 
426         const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i0));
427         const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(k));
428         i0 += 4;
429 
430         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
431         const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i1));
432         const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 8)));
433         i1 += 4;
434 
435         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
436         const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i2));
437         const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 16)));
438         i2 += 4;
439 
440         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
441         const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i3));
442         const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 24)));
443         i3 += 4;
444 
445         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
446         const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i4));
447         const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 32)));
448         i4 += 4;
449 
450         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
451         const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i5));
452         const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 40)));
453         i5 += 4;
454 
455         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
456         const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i6));
457         const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 48)));
458         i6 += 4;
459 
460         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
461         const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i7));
462         const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 56)));
463         i7 += 4;
464 
465         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
466         const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i8));
467         const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 64)));
468         i8 += 4;
469 
470         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
471         const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i9));
472         const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 72)));
473         i9 += 4;
474 
475         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
476         const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i10));
477         const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 80)));
478         i10 += 4;
479 
480         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
481         const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i11));
482         const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 88)));
483         i11 += 4;
484 
485         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
486         const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i12));
487         const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 96)));
488         i12 += 4;
489 
490         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
491         const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i13));
492         const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 104)));
493         i13 += 4;
494 
495         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
496         const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i14));
497         const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 112)));
498         i14 += 4;
499 
500         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
501         const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i15));
502         const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 120)));
503         i15 += 4;
504 
505         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
506         const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i16));
507         const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 128)));
508         i16 += 4;
509 
510         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
511         const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i17));
512         const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 136)));
513         i17 += 4;
514 
515         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
516         const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i18));
517         const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 144)));
518         i18 += 4;
519 
520         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
521         const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i19));
522         const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 152)));
523         i19 += 4;
524 
525         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
526         const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i20));
527         const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 160)));
528         i20 += 4;
529 
530         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
531         const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i21));
532         const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 168)));
533         i21 += 4;
534 
535         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
536         const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i22));
537         const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 176)));
538         i22 += 4;
539 
540         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
541         const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i23));
542         const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 184)));
543         i23 += 4;
544 
545         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
546         const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32(i24));
547         const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_loadu_si32((const void*) (k + 192)));
548         i24 += 4;
549 
550         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
551 
552         k += 4;
553 
554         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
555         const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t)));
556         vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
557         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->sse4.output_max_less_zero_point));
558         vacc0123 = _mm_cvtps_epi32(vscaled0123);
559 
560         w = (const void*) ((const int32_t*) w + 4);
561 
562         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
563         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
564 
565         vout0123 = _mm_packs_epi16(vout0123, vout0123);
566         vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->sse4.output_min));
567 
568         if XNN_LIKELY(c >= 4) {
569           _mm_storeu_si32(output, vout0123);
570           output += 4;
571           c -= 4;
572         } else {
573           if (c & 2) {
574             *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123, 0);
575             vout0123 = _mm_srli_epi32(vout0123, 16);
576             output += 2;
577           }
578           if (c & 1) {
579             *output = (int8_t) _mm_extract_epi8(vout0123, 0);
580             output += 1;
581           }
582           c = 0;
583         }
584       } while (c != 0);
585     }
586 
587     output = (int8_t*) ((uintptr_t) output + output_increment);
588   } while (--output_width != 0);
589 }
590