• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul16(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   do {
33     const int8_t* i0 = input[0];
34     assert(i0 != NULL);
35     if XNN_UNPREDICTABLE(i0 != zero) {
36       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
37     }
38     const int8_t* i1 = input[1];
39     assert(i1 != NULL);
40     if XNN_UNPREDICTABLE(i1 != zero) {
41       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
42     }
43     const int8_t* i2 = input[2];
44     assert(i2 != NULL);
45     if XNN_UNPREDICTABLE(i2 != zero) {
46       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
47     }
48     const int8_t* i3 = input[3];
49     assert(i3 != NULL);
50     if XNN_UNPREDICTABLE(i3 != zero) {
51       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
52     }
53     const int8_t* i4 = input[4];
54     assert(i4 != NULL);
55     if XNN_UNPREDICTABLE(i4 != zero) {
56       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
57     }
58     const int8_t* i5 = input[5];
59     assert(i5 != NULL);
60     if XNN_UNPREDICTABLE(i5 != zero) {
61       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
62     }
63     const int8_t* i6 = input[6];
64     assert(i6 != NULL);
65     if XNN_UNPREDICTABLE(i6 != zero) {
66       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
67     }
68     const int8_t* i7 = input[7];
69     assert(i7 != NULL);
70     if XNN_UNPREDICTABLE(i7 != zero) {
71       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
72     }
73     const int8_t* i8 = input[8];
74     assert(i8 != NULL);
75     if XNN_UNPREDICTABLE(i8 != zero) {
76       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
77     }
78     const int8_t* i9 = input[9];
79     assert(i9 != NULL);
80     if XNN_UNPREDICTABLE(i9 != zero) {
81       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
82     }
83     const int8_t* i10 = input[10];
84     assert(i10 != NULL);
85     if XNN_UNPREDICTABLE(i10 != zero) {
86       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
87     }
88     const int8_t* i11 = input[11];
89     assert(i11 != NULL);
90     if XNN_UNPREDICTABLE(i11 != zero) {
91       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
92     }
93     const int8_t* i12 = input[12];
94     assert(i12 != NULL);
95     if XNN_UNPREDICTABLE(i12 != zero) {
96       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
97     }
98     const int8_t* i13 = input[13];
99     assert(i13 != NULL);
100     if XNN_UNPREDICTABLE(i13 != zero) {
101       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
102     }
103     const int8_t* i14 = input[14];
104     assert(i14 != NULL);
105     if XNN_UNPREDICTABLE(i14 != zero) {
106       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
107     }
108     const int8_t* i15 = input[15];
109     assert(i15 != NULL);
110     if XNN_UNPREDICTABLE(i15 != zero) {
111       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
112     }
113     const int8_t* i16 = input[16];
114     assert(i16 != NULL);
115     if XNN_UNPREDICTABLE(i16 != zero) {
116       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
117     }
118     const int8_t* i17 = input[17];
119     assert(i17 != NULL);
120     if XNN_UNPREDICTABLE(i17 != zero) {
121       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
122     }
123     const int8_t* i18 = input[18];
124     assert(i18 != NULL);
125     if XNN_UNPREDICTABLE(i18 != zero) {
126       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
127     }
128     const int8_t* i19 = input[19];
129     assert(i19 != NULL);
130     if XNN_UNPREDICTABLE(i19 != zero) {
131       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
132     }
133     const int8_t* i20 = input[20];
134     assert(i20 != NULL);
135     if XNN_UNPREDICTABLE(i20 != zero) {
136       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
137     }
138     const int8_t* i21 = input[21];
139     assert(i21 != NULL);
140     if XNN_UNPREDICTABLE(i21 != zero) {
141       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
142     }
143     const int8_t* i22 = input[22];
144     assert(i22 != NULL);
145     if XNN_UNPREDICTABLE(i22 != zero) {
146       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
147     }
148     const int8_t* i23 = input[23];
149     assert(i23 != NULL);
150     if XNN_UNPREDICTABLE(i23 != zero) {
151       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
152     }
153     const int8_t* i24 = input[24];
154     assert(i24 != NULL);
155     if XNN_UNPREDICTABLE(i24 != zero) {
156       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
157     }
158     input = (const int8_t**) ((uintptr_t) input + input_stride);
159 
160     size_t c = channels;
161     const void* w = weights;
162     for (; c >= 24; c -= 24) {
163       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
164       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
165       __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
166       __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
167       __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 16));
168       __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 20));
169 
170 
171       const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
172       const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
173       const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
174       const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
175       const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
176       const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
177       const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
178       const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
179       const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
180       const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(vi0xGHIJKLMN);
181       const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
182       const __m128i vxk0xGHIJKLMN = _mm_cvtepi8_epi16(vk0xGHIJKLMN);
183       i0 += 24;
184 
185 
186       __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
187       __m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
188       __m128i vprodGHIJKLMN = _mm_mullo_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
189 
190       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
191       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
192       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
193       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
194       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
195       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
196 
197       const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
198       const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
199       const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
200       const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
201       const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
202       const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
203       const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
204       const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
205       const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
206       const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(vi1xGHIJKLMN);
207       const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
208       const __m128i vxk1xGHIJKLMN = _mm_cvtepi8_epi16(vk1xGHIJKLMN);
209       i1 += 24;
210 
211 
212       vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
213       vprod89ABCDEF = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
214       vprodGHIJKLMN = _mm_mullo_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
215 
216       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
217       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
218       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
219       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
220       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
221       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
222 
223       const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
224       const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
225       const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
226       const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
227       const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
228       const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
229       const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
230       const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
231       const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
232       const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(vi2xGHIJKLMN);
233       const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
234       const __m128i vxk2xGHIJKLMN = _mm_cvtepi8_epi16(vk2xGHIJKLMN);
235       i2 += 24;
236 
237 
238       vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
239       vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
240       vprodGHIJKLMN = _mm_mullo_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
241 
242       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
243       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
244       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
245       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
246       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
247       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
248 
249       const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
250       const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
251       const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
252       const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
253       const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
254       const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
255       const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
256       const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
257       const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
258       const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(vi3xGHIJKLMN);
259       const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
260       const __m128i vxk3xGHIJKLMN = _mm_cvtepi8_epi16(vk3xGHIJKLMN);
261       i3 += 24;
262 
263 
264       vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
265       vprod89ABCDEF = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
266       vprodGHIJKLMN = _mm_mullo_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
267 
268       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
269       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
270       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
271       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
272       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
273       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
274 
275       const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
276       const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
277       const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
278       const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
279       const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
280       const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
281       const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
282       const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
283       const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
284       const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(vi4xGHIJKLMN);
285       const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
286       const __m128i vxk4xGHIJKLMN = _mm_cvtepi8_epi16(vk4xGHIJKLMN);
287       i4 += 24;
288 
289 
290       vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
291       vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
292       vprodGHIJKLMN = _mm_mullo_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
293 
294       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
295       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
296       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
297       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
298       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
299       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
300 
301       const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
302       const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
303       const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
304       const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
305       const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
306       const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
307       const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
308       const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
309       const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
310       const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(vi5xGHIJKLMN);
311       const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
312       const __m128i vxk5xGHIJKLMN = _mm_cvtepi8_epi16(vk5xGHIJKLMN);
313       i5 += 24;
314 
315 
316       vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
317       vprod89ABCDEF = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
318       vprodGHIJKLMN = _mm_mullo_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
319 
320       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
321       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
322       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
323       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
324       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
325       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
326 
327       const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
328       const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
329       const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
330       const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
331       const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
332       const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
333       const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
334       const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
335       const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
336       const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(vi6xGHIJKLMN);
337       const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
338       const __m128i vxk6xGHIJKLMN = _mm_cvtepi8_epi16(vk6xGHIJKLMN);
339       i6 += 24;
340 
341 
342       vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
343       vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
344       vprodGHIJKLMN = _mm_mullo_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
345 
346       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
347       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
348       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
349       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
350       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
351       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
352 
353       const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
354       const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
355       const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
356       const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
357       const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
358       const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
359       const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
360       const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
361       const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
362       const __m128i vxi7xGHIJKLMN = _mm_cvtepi8_epi16(vi7xGHIJKLMN);
363       const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
364       const __m128i vxk7xGHIJKLMN = _mm_cvtepi8_epi16(vk7xGHIJKLMN);
365       i7 += 24;
366 
367 
368       vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
369       vprod89ABCDEF = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
370       vprodGHIJKLMN = _mm_mullo_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
371 
372       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
373       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
374       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
375       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
376       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
377       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
378 
379       const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
380       const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
381       const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
382       const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
383       const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
384       const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
385       const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
386       const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
387       const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
388       const __m128i vxi8xGHIJKLMN = _mm_cvtepi8_epi16(vi8xGHIJKLMN);
389       const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
390       const __m128i vxk8xGHIJKLMN = _mm_cvtepi8_epi16(vk8xGHIJKLMN);
391       i8 += 24;
392 
393 
394       vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
395       vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
396       vprodGHIJKLMN = _mm_mullo_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
397 
398       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
399       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
400       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
401       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
402       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
403       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
404 
405       const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
406       const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
407       const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t)));
408       const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
409       const __m128i vi9x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i9 + 8));
410       const __m128i vxi9x89ABCDEF = _mm_cvtepi8_epi16(vi9x89ABCDEF);
411       const __m128i vk9x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 224 * sizeof(int8_t)));
412       const __m128i vxk9x89ABCDEF = _mm_cvtepi8_epi16(vk9x89ABCDEF);
413       const __m128i vi9xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i9 + 16));
414       const __m128i vxi9xGHIJKLMN = _mm_cvtepi8_epi16(vi9xGHIJKLMN);
415       const __m128i vk9xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 232 * sizeof(int8_t)));
416       const __m128i vxk9xGHIJKLMN = _mm_cvtepi8_epi16(vk9xGHIJKLMN);
417       i9 += 24;
418 
419 
420       vprod01234567 = _mm_mullo_epi16(vxi9x01234567, vxk9x01234567);
421       vprod89ABCDEF = _mm_mullo_epi16(vxi9x89ABCDEF, vxk9x89ABCDEF);
422       vprodGHIJKLMN = _mm_mullo_epi16(vxi9xGHIJKLMN, vxk9xGHIJKLMN);
423 
424       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
425       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
426       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
427       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
428       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
429       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
430 
431       const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
432       const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
433       const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 240 * sizeof(int8_t)));
434       const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
435       const __m128i vi10x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i10 + 8));
436       const __m128i vxi10x89ABCDEF = _mm_cvtepi8_epi16(vi10x89ABCDEF);
437       const __m128i vk10x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 248 * sizeof(int8_t)));
438       const __m128i vxk10x89ABCDEF = _mm_cvtepi8_epi16(vk10x89ABCDEF);
439       const __m128i vi10xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i10 + 16));
440       const __m128i vxi10xGHIJKLMN = _mm_cvtepi8_epi16(vi10xGHIJKLMN);
441       const __m128i vk10xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 256 * sizeof(int8_t)));
442       const __m128i vxk10xGHIJKLMN = _mm_cvtepi8_epi16(vk10xGHIJKLMN);
443       i10 += 24;
444 
445 
446       vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
447       vprod89ABCDEF = _mm_mullo_epi16(vxi10x89ABCDEF, vxk10x89ABCDEF);
448       vprodGHIJKLMN = _mm_mullo_epi16(vxi10xGHIJKLMN, vxk10xGHIJKLMN);
449 
450       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
451       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
452       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
453       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
454       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
455       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
456 
457       const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
458       const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
459       const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 264 * sizeof(int8_t)));
460       const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
461       const __m128i vi11x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i11 + 8));
462       const __m128i vxi11x89ABCDEF = _mm_cvtepi8_epi16(vi11x89ABCDEF);
463       const __m128i vk11x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 272 * sizeof(int8_t)));
464       const __m128i vxk11x89ABCDEF = _mm_cvtepi8_epi16(vk11x89ABCDEF);
465       const __m128i vi11xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i11 + 16));
466       const __m128i vxi11xGHIJKLMN = _mm_cvtepi8_epi16(vi11xGHIJKLMN);
467       const __m128i vk11xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 280 * sizeof(int8_t)));
468       const __m128i vxk11xGHIJKLMN = _mm_cvtepi8_epi16(vk11xGHIJKLMN);
469       i11 += 24;
470 
471 
472       vprod01234567 = _mm_mullo_epi16(vxi11x01234567, vxk11x01234567);
473       vprod89ABCDEF = _mm_mullo_epi16(vxi11x89ABCDEF, vxk11x89ABCDEF);
474       vprodGHIJKLMN = _mm_mullo_epi16(vxi11xGHIJKLMN, vxk11xGHIJKLMN);
475 
476       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
477       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
478       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
479       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
480       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
481       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
482 
483       const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
484       const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
485       const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 288 * sizeof(int8_t)));
486       const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
487       const __m128i vi12x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i12 + 8));
488       const __m128i vxi12x89ABCDEF = _mm_cvtepi8_epi16(vi12x89ABCDEF);
489       const __m128i vk12x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 296 * sizeof(int8_t)));
490       const __m128i vxk12x89ABCDEF = _mm_cvtepi8_epi16(vk12x89ABCDEF);
491       const __m128i vi12xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i12 + 16));
492       const __m128i vxi12xGHIJKLMN = _mm_cvtepi8_epi16(vi12xGHIJKLMN);
493       const __m128i vk12xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 304 * sizeof(int8_t)));
494       const __m128i vxk12xGHIJKLMN = _mm_cvtepi8_epi16(vk12xGHIJKLMN);
495       i12 += 24;
496 
497 
498       vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
499       vprod89ABCDEF = _mm_mullo_epi16(vxi12x89ABCDEF, vxk12x89ABCDEF);
500       vprodGHIJKLMN = _mm_mullo_epi16(vxi12xGHIJKLMN, vxk12xGHIJKLMN);
501 
502       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
503       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
504       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
505       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
506       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
507       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
508 
509       const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
510       const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
511       const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 312 * sizeof(int8_t)));
512       const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
513       const __m128i vi13x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i13 + 8));
514       const __m128i vxi13x89ABCDEF = _mm_cvtepi8_epi16(vi13x89ABCDEF);
515       const __m128i vk13x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 320 * sizeof(int8_t)));
516       const __m128i vxk13x89ABCDEF = _mm_cvtepi8_epi16(vk13x89ABCDEF);
517       const __m128i vi13xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i13 + 16));
518       const __m128i vxi13xGHIJKLMN = _mm_cvtepi8_epi16(vi13xGHIJKLMN);
519       const __m128i vk13xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 328 * sizeof(int8_t)));
520       const __m128i vxk13xGHIJKLMN = _mm_cvtepi8_epi16(vk13xGHIJKLMN);
521       i13 += 24;
522 
523 
524       vprod01234567 = _mm_mullo_epi16(vxi13x01234567, vxk13x01234567);
525       vprod89ABCDEF = _mm_mullo_epi16(vxi13x89ABCDEF, vxk13x89ABCDEF);
526       vprodGHIJKLMN = _mm_mullo_epi16(vxi13xGHIJKLMN, vxk13xGHIJKLMN);
527 
528       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
529       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
530       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
531       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
532       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
533       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
534 
535       const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
536       const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
537       const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 336 * sizeof(int8_t)));
538       const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
539       const __m128i vi14x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i14 + 8));
540       const __m128i vxi14x89ABCDEF = _mm_cvtepi8_epi16(vi14x89ABCDEF);
541       const __m128i vk14x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 344 * sizeof(int8_t)));
542       const __m128i vxk14x89ABCDEF = _mm_cvtepi8_epi16(vk14x89ABCDEF);
543       const __m128i vi14xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i14 + 16));
544       const __m128i vxi14xGHIJKLMN = _mm_cvtepi8_epi16(vi14xGHIJKLMN);
545       const __m128i vk14xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 352 * sizeof(int8_t)));
546       const __m128i vxk14xGHIJKLMN = _mm_cvtepi8_epi16(vk14xGHIJKLMN);
547       i14 += 24;
548 
549 
550       vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
551       vprod89ABCDEF = _mm_mullo_epi16(vxi14x89ABCDEF, vxk14x89ABCDEF);
552       vprodGHIJKLMN = _mm_mullo_epi16(vxi14xGHIJKLMN, vxk14xGHIJKLMN);
553 
554       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
555       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
556       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
557       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
558       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
559       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
560 
561       const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
562       const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
563       const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 360 * sizeof(int8_t)));
564       const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
565       const __m128i vi15x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i15 + 8));
566       const __m128i vxi15x89ABCDEF = _mm_cvtepi8_epi16(vi15x89ABCDEF);
567       const __m128i vk15x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 368 * sizeof(int8_t)));
568       const __m128i vxk15x89ABCDEF = _mm_cvtepi8_epi16(vk15x89ABCDEF);
569       const __m128i vi15xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i15 + 16));
570       const __m128i vxi15xGHIJKLMN = _mm_cvtepi8_epi16(vi15xGHIJKLMN);
571       const __m128i vk15xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 376 * sizeof(int8_t)));
572       const __m128i vxk15xGHIJKLMN = _mm_cvtepi8_epi16(vk15xGHIJKLMN);
573       i15 += 24;
574 
575 
576       vprod01234567 = _mm_mullo_epi16(vxi15x01234567, vxk15x01234567);
577       vprod89ABCDEF = _mm_mullo_epi16(vxi15x89ABCDEF, vxk15x89ABCDEF);
578       vprodGHIJKLMN = _mm_mullo_epi16(vxi15xGHIJKLMN, vxk15xGHIJKLMN);
579 
580       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
581       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
582       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
583       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
584       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
585       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
586 
587       const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
588       const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
589       const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 384 * sizeof(int8_t)));
590       const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
591       const __m128i vi16x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i16 + 8));
592       const __m128i vxi16x89ABCDEF = _mm_cvtepi8_epi16(vi16x89ABCDEF);
593       const __m128i vk16x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 392 * sizeof(int8_t)));
594       const __m128i vxk16x89ABCDEF = _mm_cvtepi8_epi16(vk16x89ABCDEF);
595       const __m128i vi16xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i16 + 16));
596       const __m128i vxi16xGHIJKLMN = _mm_cvtepi8_epi16(vi16xGHIJKLMN);
597       const __m128i vk16xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 400 * sizeof(int8_t)));
598       const __m128i vxk16xGHIJKLMN = _mm_cvtepi8_epi16(vk16xGHIJKLMN);
599       i16 += 24;
600 
601 
602       vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
603       vprod89ABCDEF = _mm_mullo_epi16(vxi16x89ABCDEF, vxk16x89ABCDEF);
604       vprodGHIJKLMN = _mm_mullo_epi16(vxi16xGHIJKLMN, vxk16xGHIJKLMN);
605 
606       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
607       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
608       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
609       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
610       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
611       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
612 
613       const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
614       const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
615       const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 408 * sizeof(int8_t)));
616       const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
617       const __m128i vi17x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i17 + 8));
618       const __m128i vxi17x89ABCDEF = _mm_cvtepi8_epi16(vi17x89ABCDEF);
619       const __m128i vk17x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 416 * sizeof(int8_t)));
620       const __m128i vxk17x89ABCDEF = _mm_cvtepi8_epi16(vk17x89ABCDEF);
621       const __m128i vi17xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i17 + 16));
622       const __m128i vxi17xGHIJKLMN = _mm_cvtepi8_epi16(vi17xGHIJKLMN);
623       const __m128i vk17xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 424 * sizeof(int8_t)));
624       const __m128i vxk17xGHIJKLMN = _mm_cvtepi8_epi16(vk17xGHIJKLMN);
625       i17 += 24;
626 
627 
628       vprod01234567 = _mm_mullo_epi16(vxi17x01234567, vxk17x01234567);
629       vprod89ABCDEF = _mm_mullo_epi16(vxi17x89ABCDEF, vxk17x89ABCDEF);
630       vprodGHIJKLMN = _mm_mullo_epi16(vxi17xGHIJKLMN, vxk17xGHIJKLMN);
631 
632       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
633       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
634       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
635       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
636       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
637       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
638 
639       const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
640       const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
641       const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 432 * sizeof(int8_t)));
642       const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
643       const __m128i vi18x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i18 + 8));
644       const __m128i vxi18x89ABCDEF = _mm_cvtepi8_epi16(vi18x89ABCDEF);
645       const __m128i vk18x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 440 * sizeof(int8_t)));
646       const __m128i vxk18x89ABCDEF = _mm_cvtepi8_epi16(vk18x89ABCDEF);
647       const __m128i vi18xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i18 + 16));
648       const __m128i vxi18xGHIJKLMN = _mm_cvtepi8_epi16(vi18xGHIJKLMN);
649       const __m128i vk18xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 448 * sizeof(int8_t)));
650       const __m128i vxk18xGHIJKLMN = _mm_cvtepi8_epi16(vk18xGHIJKLMN);
651       i18 += 24;
652 
653 
654       vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
655       vprod89ABCDEF = _mm_mullo_epi16(vxi18x89ABCDEF, vxk18x89ABCDEF);
656       vprodGHIJKLMN = _mm_mullo_epi16(vxi18xGHIJKLMN, vxk18xGHIJKLMN);
657 
658       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
659       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
660       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
661       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
662       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
663       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
664 
665       const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
666       const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
667       const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 456 * sizeof(int8_t)));
668       const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
669       const __m128i vi19x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i19 + 8));
670       const __m128i vxi19x89ABCDEF = _mm_cvtepi8_epi16(vi19x89ABCDEF);
671       const __m128i vk19x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 464 * sizeof(int8_t)));
672       const __m128i vxk19x89ABCDEF = _mm_cvtepi8_epi16(vk19x89ABCDEF);
673       const __m128i vi19xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i19 + 16));
674       const __m128i vxi19xGHIJKLMN = _mm_cvtepi8_epi16(vi19xGHIJKLMN);
675       const __m128i vk19xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 472 * sizeof(int8_t)));
676       const __m128i vxk19xGHIJKLMN = _mm_cvtepi8_epi16(vk19xGHIJKLMN);
677       i19 += 24;
678 
679 
680       vprod01234567 = _mm_mullo_epi16(vxi19x01234567, vxk19x01234567);
681       vprod89ABCDEF = _mm_mullo_epi16(vxi19x89ABCDEF, vxk19x89ABCDEF);
682       vprodGHIJKLMN = _mm_mullo_epi16(vxi19xGHIJKLMN, vxk19xGHIJKLMN);
683 
684       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
685       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
686       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
687       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
688       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
689       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
690 
691       const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
692       const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
693       const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 480 * sizeof(int8_t)));
694       const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
695       const __m128i vi20x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i20 + 8));
696       const __m128i vxi20x89ABCDEF = _mm_cvtepi8_epi16(vi20x89ABCDEF);
697       const __m128i vk20x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 488 * sizeof(int8_t)));
698       const __m128i vxk20x89ABCDEF = _mm_cvtepi8_epi16(vk20x89ABCDEF);
699       const __m128i vi20xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i20 + 16));
700       const __m128i vxi20xGHIJKLMN = _mm_cvtepi8_epi16(vi20xGHIJKLMN);
701       const __m128i vk20xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 496 * sizeof(int8_t)));
702       const __m128i vxk20xGHIJKLMN = _mm_cvtepi8_epi16(vk20xGHIJKLMN);
703       i20 += 24;
704 
705 
706       vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
707       vprod89ABCDEF = _mm_mullo_epi16(vxi20x89ABCDEF, vxk20x89ABCDEF);
708       vprodGHIJKLMN = _mm_mullo_epi16(vxi20xGHIJKLMN, vxk20xGHIJKLMN);
709 
710       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
711       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
712       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
713       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
714       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
715       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
716 
717       const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
718       const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
719       const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 504 * sizeof(int8_t)));
720       const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
721       const __m128i vi21x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i21 + 8));
722       const __m128i vxi21x89ABCDEF = _mm_cvtepi8_epi16(vi21x89ABCDEF);
723       const __m128i vk21x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 512 * sizeof(int8_t)));
724       const __m128i vxk21x89ABCDEF = _mm_cvtepi8_epi16(vk21x89ABCDEF);
725       const __m128i vi21xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i21 + 16));
726       const __m128i vxi21xGHIJKLMN = _mm_cvtepi8_epi16(vi21xGHIJKLMN);
727       const __m128i vk21xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 520 * sizeof(int8_t)));
728       const __m128i vxk21xGHIJKLMN = _mm_cvtepi8_epi16(vk21xGHIJKLMN);
729       i21 += 24;
730 
731 
732       vprod01234567 = _mm_mullo_epi16(vxi21x01234567, vxk21x01234567);
733       vprod89ABCDEF = _mm_mullo_epi16(vxi21x89ABCDEF, vxk21x89ABCDEF);
734       vprodGHIJKLMN = _mm_mullo_epi16(vxi21xGHIJKLMN, vxk21xGHIJKLMN);
735 
736       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
737       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
738       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
739       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
740       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
741       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
742 
743       const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
744       const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
745       const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 528 * sizeof(int8_t)));
746       const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
747       const __m128i vi22x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i22 + 8));
748       const __m128i vxi22x89ABCDEF = _mm_cvtepi8_epi16(vi22x89ABCDEF);
749       const __m128i vk22x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 536 * sizeof(int8_t)));
750       const __m128i vxk22x89ABCDEF = _mm_cvtepi8_epi16(vk22x89ABCDEF);
751       const __m128i vi22xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i22 + 16));
752       const __m128i vxi22xGHIJKLMN = _mm_cvtepi8_epi16(vi22xGHIJKLMN);
753       const __m128i vk22xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 544 * sizeof(int8_t)));
754       const __m128i vxk22xGHIJKLMN = _mm_cvtepi8_epi16(vk22xGHIJKLMN);
755       i22 += 24;
756 
757 
758       vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
759       vprod89ABCDEF = _mm_mullo_epi16(vxi22x89ABCDEF, vxk22x89ABCDEF);
760       vprodGHIJKLMN = _mm_mullo_epi16(vxi22xGHIJKLMN, vxk22xGHIJKLMN);
761 
762       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
763       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
764       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
765       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
766       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
767       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
768 
769       const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
770       const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
771       const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 552 * sizeof(int8_t)));
772       const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
773       const __m128i vi23x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i23 + 8));
774       const __m128i vxi23x89ABCDEF = _mm_cvtepi8_epi16(vi23x89ABCDEF);
775       const __m128i vk23x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 560 * sizeof(int8_t)));
776       const __m128i vxk23x89ABCDEF = _mm_cvtepi8_epi16(vk23x89ABCDEF);
777       const __m128i vi23xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i23 + 16));
778       const __m128i vxi23xGHIJKLMN = _mm_cvtepi8_epi16(vi23xGHIJKLMN);
779       const __m128i vk23xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 568 * sizeof(int8_t)));
780       const __m128i vxk23xGHIJKLMN = _mm_cvtepi8_epi16(vk23xGHIJKLMN);
781       i23 += 24;
782 
783 
784       vprod01234567 = _mm_mullo_epi16(vxi23x01234567, vxk23x01234567);
785       vprod89ABCDEF = _mm_mullo_epi16(vxi23x89ABCDEF, vxk23x89ABCDEF);
786       vprodGHIJKLMN = _mm_mullo_epi16(vxi23xGHIJKLMN, vxk23xGHIJKLMN);
787 
788       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
789       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
790       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
791       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
792       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
793       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
794 
795       const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
796       const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
797       const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 576 * sizeof(int8_t)));
798       const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
799       const __m128i vi24x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i24 + 8));
800       const __m128i vxi24x89ABCDEF = _mm_cvtepi8_epi16(vi24x89ABCDEF);
801       const __m128i vk24x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 584 * sizeof(int8_t)));
802       const __m128i vxk24x89ABCDEF = _mm_cvtepi8_epi16(vk24x89ABCDEF);
803       const __m128i vi24xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i24 + 16));
804       const __m128i vxi24xGHIJKLMN = _mm_cvtepi8_epi16(vi24xGHIJKLMN);
805       const __m128i vk24xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 592 * sizeof(int8_t)));
806       const __m128i vxk24xGHIJKLMN = _mm_cvtepi8_epi16(vk24xGHIJKLMN);
807       i24 += 24;
808 
809 
810       vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
811       vprod89ABCDEF = _mm_mullo_epi16(vxi24x89ABCDEF, vxk24x89ABCDEF);
812       vprodGHIJKLMN = _mm_mullo_epi16(vxi24xGHIJKLMN, vxk24xGHIJKLMN);
813 
814       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
815       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
816       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
817       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
818       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
819       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
820 
821       w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 600 * sizeof(int8_t));
822 
823       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
824       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
825       __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
826       __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
827       __m128 vscaledGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
828       __m128 vscaledKLMN = _mm_cvtepi32_ps(vaccKLMN);
829 
830       const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
831       vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
832       vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
833       vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
834       vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
835       vscaledGHIJ = _mm_mul_ps(vscaledGHIJ, vscale);
836       vscaledKLMN = _mm_mul_ps(vscaledKLMN, vscale);
837 
838       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
839       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
840       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
841       vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
842       vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
843       vscaledGHIJ = _mm_min_ps(vscaledGHIJ, voutput_max_less_zero_point);
844       vscaledKLMN = _mm_min_ps(vscaledKLMN, voutput_max_less_zero_point);
845 
846       vacc0123 = _mm_cvtps_epi32(vscaled0123);
847       vacc4567 = _mm_cvtps_epi32(vscaled4567);
848       vacc89AB = _mm_cvtps_epi32(vscaled89AB);
849       vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
850       vaccGHIJ = _mm_cvtps_epi32(vscaledGHIJ);
851       vaccKLMN = _mm_cvtps_epi32(vscaledKLMN);
852 
853       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
854       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
855       __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
856       __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
857 
858 
859       __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
860       __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
861 
862       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
863       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
864       voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
865 
866       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
867       _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
868       output += 24;
869     }
870     if XNN_UNLIKELY(c != 0) {
871       const int8_t* k = (const int8_t*) ((const int32_t*) w + 24);
872       do {
873         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
874         __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
875 
876 
877         const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
878         const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
879         const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
880         const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
881         i0 += 8;
882 
883 
884         __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
885 
886         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
887         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
888 
889         const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
890         const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
891         const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
892         const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
893         i1 += 8;
894 
895 
896         vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
897 
898         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
899         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
900 
901         const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
902         const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
903         const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
904         const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
905         i2 += 8;
906 
907 
908         vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
909 
910         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
911         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
912 
913         const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
914         const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
915         const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
916         const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
917         i3 += 8;
918 
919 
920         vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
921 
922         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
923         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
924 
925         const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
926         const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
927         const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
928         const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
929         i4 += 8;
930 
931 
932         vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
933 
934         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
935         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
936 
937         const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
938         const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
939         const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
940         const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
941         i5 += 8;
942 
943 
944         vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
945 
946         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
947         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
948 
949         const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
950         const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
951         const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
952         const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
953         i6 += 8;
954 
955 
956         vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
957 
958         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
959         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
960 
961         const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
962         const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
963         const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
964         const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
965         i7 += 8;
966 
967 
968         vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
969 
970         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
971         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
972 
973         const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
974         const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
975         const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
976         const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
977         i8 += 8;
978 
979 
980         vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
981 
982         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
983         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
984 
985         const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
986         const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
987         const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) (k + 216));
988         const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
989         i9 += 8;
990 
991 
992         vprod01234567 = _mm_mullo_epi16(vxi9x01234567, vxk9x01234567);
993 
994         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
995         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
996 
997         const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
998         const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
999         const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) (k + 240));
1000         const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
1001         i10 += 8;
1002 
1003 
1004         vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
1005 
1006         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1007         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1008 
1009         const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
1010         const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
1011         const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) (k + 264));
1012         const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
1013         i11 += 8;
1014 
1015 
1016         vprod01234567 = _mm_mullo_epi16(vxi11x01234567, vxk11x01234567);
1017 
1018         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1019         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1020 
1021         const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
1022         const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
1023         const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) (k + 288));
1024         const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
1025         i12 += 8;
1026 
1027 
1028         vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
1029 
1030         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1031         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1032 
1033         const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
1034         const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
1035         const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) (k + 312));
1036         const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
1037         i13 += 8;
1038 
1039 
1040         vprod01234567 = _mm_mullo_epi16(vxi13x01234567, vxk13x01234567);
1041 
1042         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1043         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1044 
1045         const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
1046         const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
1047         const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) (k + 336));
1048         const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
1049         i14 += 8;
1050 
1051 
1052         vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
1053 
1054         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1055         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1056 
1057         const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
1058         const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
1059         const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) (k + 360));
1060         const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
1061         i15 += 8;
1062 
1063 
1064         vprod01234567 = _mm_mullo_epi16(vxi15x01234567, vxk15x01234567);
1065 
1066         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1067         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1068 
1069         const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
1070         const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
1071         const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) (k + 384));
1072         const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
1073         i16 += 8;
1074 
1075 
1076         vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
1077 
1078         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1079         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1080 
1081         const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
1082         const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
1083         const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) (k + 408));
1084         const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
1085         i17 += 8;
1086 
1087 
1088         vprod01234567 = _mm_mullo_epi16(vxi17x01234567, vxk17x01234567);
1089 
1090         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1091         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1092 
1093         const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
1094         const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
1095         const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) (k + 432));
1096         const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
1097         i18 += 8;
1098 
1099 
1100         vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
1101 
1102         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1103         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1104 
1105         const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
1106         const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
1107         const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) (k + 456));
1108         const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
1109         i19 += 8;
1110 
1111 
1112         vprod01234567 = _mm_mullo_epi16(vxi19x01234567, vxk19x01234567);
1113 
1114         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1115         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1116 
1117         const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
1118         const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
1119         const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) (k + 480));
1120         const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
1121         i20 += 8;
1122 
1123 
1124         vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
1125 
1126         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1127         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1128 
1129         const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
1130         const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
1131         const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) (k + 504));
1132         const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
1133         i21 += 8;
1134 
1135 
1136         vprod01234567 = _mm_mullo_epi16(vxi21x01234567, vxk21x01234567);
1137 
1138         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1139         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1140 
1141         const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
1142         const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
1143         const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) (k + 528));
1144         const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
1145         i22 += 8;
1146 
1147 
1148         vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
1149 
1150         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1151         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1152 
1153         const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
1154         const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
1155         const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) (k + 552));
1156         const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
1157         i23 += 8;
1158 
1159 
1160         vprod01234567 = _mm_mullo_epi16(vxi23x01234567, vxk23x01234567);
1161 
1162         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1163         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1164 
1165         const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
1166         const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
1167         const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) (k + 576));
1168         const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
1169         i24 += 8;
1170 
1171 
1172         vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
1173 
1174         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1175         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1176 
1177         k += 8;
1178 
1179         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
1180         __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
1181 
1182         const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
1183         vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
1184         vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
1185 
1186         const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
1187         vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
1188         vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
1189 
1190         vacc0123 = _mm_cvtps_epi32(vscaled0123);
1191         vacc4567 = _mm_cvtps_epi32(vscaled4567);
1192 
1193         w = (const void*) ((const int32_t*) w + 8);
1194 
1195         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
1196         __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
1197 
1198 
1199         __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
1200 
1201         vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
1202 
1203         if XNN_LIKELY(c >= 8) {
1204           _mm_storel_epi64((__m128i*) output, vout0123456701234567);
1205           output += 8;
1206           c -= 8;
1207         } else {
1208           if (c & 4) {
1209             *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
1210             vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
1211             output += 4;
1212           }
1213           if (c & 2) {
1214             *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
1215             vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
1216             output += 2;
1217           }
1218           if (c & 1) {
1219             *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
1220             output += 1;
1221           }
1222           c = 0;
1223         }
1224       } while (c != 0);
1225     }
1226 
1227     output = (int8_t*) ((uintptr_t) output + output_increment);
1228   } while (--output_width != 0);
1229 }
1230