• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/wasmsimd-s4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_minmax_ukernel_6x8s4__wasmsimd_x86(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_6x8s4__wasmsimd_x86(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 6);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60   if XNN_UNPREDICTABLE(mr <= 4) {
61     a4 = a3;
62     c4 = c3;
63   }
64   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66   if XNN_UNPREDICTABLE(mr != 6) {
67     a5 = a4;
68     c5 = c4;
69   }
70 
71   const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
72   const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
73   do {
74     v128_t vacc0x0123 = wasm_v128_load(w + 0);
75     v128_t vacc0x4567 = wasm_v128_load(w + 4);
76     v128_t vacc1x0123 = vacc0x0123;
77     v128_t vacc1x4567 = vacc0x4567;
78     v128_t vacc2x0123 = vacc0x0123;
79     v128_t vacc2x4567 = vacc0x4567;
80     v128_t vacc3x0123 = vacc0x0123;
81     v128_t vacc3x4567 = vacc0x4567;
82     v128_t vacc4x0123 = vacc0x0123;
83     v128_t vacc4x4567 = vacc0x4567;
84     v128_t vacc5x0123 = vacc0x0123;
85     v128_t vacc5x4567 = vacc0x4567;
86     w += 8;
87 
88     size_t k = kc;
89     while (k >= 4 * sizeof(float)) {
90       v128_t va0 = wasm_v128_load(a0);
91       a0 += 4;
92       v128_t va1 = wasm_v128_load(a1);
93       a1 += 4;
94       v128_t va2 = wasm_v128_load(a2);
95       a2 += 4;
96       v128_t va3 = wasm_v128_load(a3);
97       a3 += 4;
98       v128_t va4 = wasm_v128_load(a4);
99       a4 += 4;
100       v128_t va5 = wasm_v128_load(a5);
101       a5 += 4;
102 
103 
104       const v128_t vb0123c0 = wasm_v128_load(w + 0);
105       const v128_t vb4567c0 = wasm_v128_load(w + 4);
106 
107       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c0));
108       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c0));
109       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c0));
110       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c0));
111       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c0));
112       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c0));
113       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c0));
114       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c0));
115       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c0));
116       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c0));
117       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c0));
118       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c0));
119 
120       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
121       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
122       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
123       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
124       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
125       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
126 
127       const v128_t vb0123c1 = wasm_v128_load(w + 8);
128       const v128_t vb4567c1 = wasm_v128_load(w + 12);
129 
130       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c1));
131       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c1));
132       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c1));
133       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c1));
134       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c1));
135       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c1));
136       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c1));
137       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c1));
138       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c1));
139       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c1));
140       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c1));
141       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c1));
142 
143       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
144       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
145       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
146       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
147       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
148       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
149 
150       const v128_t vb0123c2 = wasm_v128_load(w + 16);
151       const v128_t vb4567c2 = wasm_v128_load(w + 20);
152 
153       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c2));
154       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c2));
155       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c2));
156       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c2));
157       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c2));
158       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c2));
159       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c2));
160       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c2));
161       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c2));
162       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c2));
163       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c2));
164       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c2));
165 
166       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
167       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
168       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
169       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
170       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
171       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
172 
173       const v128_t vb0123c3 = wasm_v128_load(w + 24);
174       const v128_t vb4567c3 = wasm_v128_load(w + 28);
175 
176       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c3));
177       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c3));
178       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c3));
179       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c3));
180       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c3));
181       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c3));
182       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c3));
183       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c3));
184       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c3));
185       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c3));
186       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c3));
187       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c3));
188 
189 
190       w += 32;
191       k -= 4 * sizeof(float);
192     }
193     if XNN_UNLIKELY(k != 0) {
194       v128_t va0 = wasm_v128_load(a0);
195       a0 = (const float*) ((uintptr_t) a0 + k);
196       v128_t va1 = wasm_v128_load(a1);
197       a1 = (const float*) ((uintptr_t) a1 + k);
198       v128_t va2 = wasm_v128_load(a2);
199       a2 = (const float*) ((uintptr_t) a2 + k);
200       v128_t va3 = wasm_v128_load(a3);
201       a3 = (const float*) ((uintptr_t) a3 + k);
202       v128_t va4 = wasm_v128_load(a4);
203       a4 = (const float*) ((uintptr_t) a4 + k);
204       v128_t va5 = wasm_v128_load(a5);
205       a5 = (const float*) ((uintptr_t) a5 + k);
206 
207       const v128_t vzero = wasm_f32x4_const_splat(0.0f);
208 
209       const v128_t vb0123c0 = wasm_v128_load(w + 0);
210       const v128_t vb4567c0 = wasm_v128_load(w + 4);
211 
212       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
213       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
214       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
215       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
216       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
217       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
218       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
219       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
220       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
221       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
222       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
223       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
224 
225       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
226       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
227       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
228       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
229       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
230       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
231 
232       const v128_t vb0123c1 = wasm_v128_load(w + 8);
233       const v128_t vb4567c1 = wasm_v128_load(w + 12);
234 
235       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
236       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
237       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
238       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
239       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
240       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
241       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
242       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
243       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
244       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
245       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
246       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
247 
248       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
249       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
250       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
251       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
252       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
253       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
254 
255       const v128_t vb0123c2 = wasm_v128_load(w + 16);
256       const v128_t vb4567c2 = wasm_v128_load(w + 20);
257 
258       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
259       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
260       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
261       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
262       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
263       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
264       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
265       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
266       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
267       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
268       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
269       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
270 
271       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
272       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
273       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
274       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
275       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
276       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
277 
278       const v128_t vb0123c3 = wasm_v128_load(w + 24);
279       const v128_t vb4567c3 = wasm_v128_load(w + 28);
280 
281       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
282       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
283       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
284       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
285       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
286       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
287       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
288       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
289       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
290       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
291       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
292       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
293 
294 
295       w += 32;
296     }
297 
298     vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
299     vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
300     vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
301     vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
302     vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
303     vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
304     vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
305     vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
306     vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
307     vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
308     vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
309     vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
310 
311     vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
312     vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
313     vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
314     vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
315     vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
316     vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
317     vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
318     vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
319     vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
320     vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
321     vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
322     vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
323 
324     if XNN_LIKELY(nc >= 8) {
325       wasm_v128_store(c5, vacc5x0123);
326       wasm_v128_store(c5 + 4, vacc5x4567);
327       c5 = (float*) ((uintptr_t) c5 + cn_stride);
328       wasm_v128_store(c4, vacc4x0123);
329       wasm_v128_store(c4 + 4, vacc4x4567);
330       c4 = (float*) ((uintptr_t) c4 + cn_stride);
331       wasm_v128_store(c3, vacc3x0123);
332       wasm_v128_store(c3 + 4, vacc3x4567);
333       c3 = (float*) ((uintptr_t) c3 + cn_stride);
334       wasm_v128_store(c2, vacc2x0123);
335       wasm_v128_store(c2 + 4, vacc2x4567);
336       c2 = (float*) ((uintptr_t) c2 + cn_stride);
337       wasm_v128_store(c1, vacc1x0123);
338       wasm_v128_store(c1 + 4, vacc1x4567);
339       c1 = (float*) ((uintptr_t) c1 + cn_stride);
340       wasm_v128_store(c0, vacc0x0123);
341       wasm_v128_store(c0 + 4, vacc0x4567);
342       c0 = (float*) ((uintptr_t) c0 + cn_stride);
343 
344       a5 = (const float*) ((uintptr_t) a5 - kc);
345       a4 = (const float*) ((uintptr_t) a4 - kc);
346       a3 = (const float*) ((uintptr_t) a3 - kc);
347       a2 = (const float*) ((uintptr_t) a2 - kc);
348       a1 = (const float*) ((uintptr_t) a1 - kc);
349       a0 = (const float*) ((uintptr_t) a0 - kc);
350 
351       nc -= 8;
352     } else {
353       if (nc & 4) {
354         wasm_v128_store(c5, vacc5x0123);
355         wasm_v128_store(c4, vacc4x0123);
356         wasm_v128_store(c3, vacc3x0123);
357         wasm_v128_store(c2, vacc2x0123);
358         wasm_v128_store(c1, vacc1x0123);
359         wasm_v128_store(c0, vacc0x0123);
360 
361         vacc5x0123 = vacc5x4567;
362         vacc4x0123 = vacc4x4567;
363         vacc3x0123 = vacc3x4567;
364         vacc2x0123 = vacc2x4567;
365         vacc1x0123 = vacc1x4567;
366         vacc0x0123 = vacc0x4567;
367 
368         c5 += 4;
369         c4 += 4;
370         c3 += 4;
371         c2 += 4;
372         c1 += 4;
373         c0 += 4;
374       }
375       if (nc & 2) {
376         *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
377         *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
378         *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
379         *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
380         *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
381         *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
382 
383         vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
384         vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
385         vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
386         vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
387         vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
388         vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
389 
390         c5 += 2;
391         c4 += 2;
392         c3 += 2;
393         c2 += 2;
394         c1 += 2;
395         c0 += 2;
396       }
397       if (nc & 1) {
398         *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
399         *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
400         *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
401         *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
402         *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
403         *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
404       }
405 
406       nc = 0;
407     }
408   } while (nc != 0);
409 }
410