1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
73
74 const float* i0 = zero;
75 const float* i1 = zero;
76 const float* i2 = input;
77 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
78 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
79
80 float* o0 = output;
81
82 size_t output_height = input_height;
83 do {
84 if XNN_UNPREDICTABLE(output_height < 2) {
85 i3 = zero;
86 }
87 if XNN_UNPREDICTABLE(output_height < 3) {
88 i4 = zero;
89 }
90
91 v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
92 v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
93 v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
94 v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
95 v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
96
97 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
98 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
99 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
100 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
101 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
102
103 size_t w = input_width;
104 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
105 v128_t vo0p0 = vbias;
106
107 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
108 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
109 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
110 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
111 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
112
113 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
114
115 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
116
117 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
118
119 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
120
121 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
122
123 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
124 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
125 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
126 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
127 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
128
129 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
130
131 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
132
133 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
134
135 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
136
137 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
138
139 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
140 vi0x0123 = vi0x4567;
141 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
142 vi1x0123 = vi1x4567;
143 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
144 vi2x0123 = vi2x4567;
145 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
146 vi3x0123 = vi3x4567;
147 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
148 vi4x0123 = vi4x4567;
149
150 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
151
152 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
153
154 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
155
156 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
157
158 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
159
160 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
161 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
162 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
163 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
164 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
165
166 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
167
168 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
169
170 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
171
172 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
173
174 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
175
176 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
177 vi0x4567 = vi0x89AB;
178 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
179 vi1x4567 = vi1x89AB;
180 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
181 vi2x4567 = vi2x89AB;
182 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
183 vi3x4567 = vi3x89AB;
184 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
185 vi4x4567 = vi4x89AB;
186
187 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
188
189 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
190
191 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
192
193 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
194
195 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
196
197 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
198 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
199 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
200 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
201
202 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
203 vo0 = wasm_f32x4_pmin(vmax, vo0);
204
205 wasm_v128_store(o0, vo0); o0 += 4;
206 }
207 // Always process the last block of 5..8 pixels.
208 if XNN_LIKELY(w > 4 * sizeof(float)) {
209 v128_t vo0p0 = vbias;
210
211 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
212 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
213 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
214 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
215 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
216
217 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
218 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
219 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
220 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
221 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
222
223 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
224
225 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
226
227 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
228
229 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
230
231 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
232
233 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
234 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
235 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
236 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
237 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
238
239 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
240
241 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
242
243 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
244
245 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
246
247 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
248
249 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
250 vi0x0123 = vi0x4567;
251 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
252 vi1x0123 = vi1x4567;
253 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
254 vi2x0123 = vi2x4567;
255 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
256 vi3x0123 = vi3x4567;
257 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
258 vi4x0123 = vi4x4567;
259
260 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
261
262 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
263
264 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
265
266 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
267
268 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
269
270 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
271 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
272 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
273 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
274 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
275
276 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
277
278 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
279
280 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
281
282 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
283
284 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
285
286 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
287 vi0x4567 = vi0x89AB;
288 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
289 vi1x4567 = vi1x89AB;
290 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
291 vi2x4567 = vi2x89AB;
292 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
293 vi3x4567 = vi3x89AB;
294 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
295 vi4x4567 = vi4x89AB;
296
297 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
298
299 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
300
301 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
302
303 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
304
305 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
306
307 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
308 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
309 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
310 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
311
312 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
313 vo0 = wasm_f32x4_pmin(vmax, vo0);
314
315 wasm_v128_store(o0, vo0); o0 += 4;
316
317 w -= 4 * sizeof(float);
318 }
319 assert(w >= 1 * sizeof(float));
320 assert(w <= 4 * sizeof(float));
321 {
322 v128_t vo0p0 = vbias;
323
324 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
325 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
326 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
327 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
328 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
329
330 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
331
332 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
333
334 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
335
336 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
337
338 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
339
340 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
341 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
342 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
343 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
344 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
345
346 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
347
348 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
349
350 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
351
352 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
353
354 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
355
356 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
357 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
358 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
359 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
360 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
361
362 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
363
364 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
365
366 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
367
368 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
369
370 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
371
372 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
373 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
374 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
375 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
376 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
377 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
378
379 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
380
381 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
382
383 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
384
385 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
386
387 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
388
389 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
390 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
391 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
392 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
393 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
394
395 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
396
397 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
398
399 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
400
401 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
402
403 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
404
405 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
406 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
407 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
408 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
409
410 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
411 vo0 = wasm_f32x4_pmin(vmax, vo0);
412
413 if XNN_LIKELY(w & (4 * sizeof(float))) {
414 wasm_v128_store(o0, vo0); o0 += 4;
415 } else {
416 if (w & (2 * sizeof(float))) {
417 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
418
419 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
420 }
421 if (w & (1 * sizeof(float))) {
422 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
423 }
424 }
425 }
426
427 i0 = (const float*) ((uintptr_t) i1 - input_decrement);
428 i1 = (const float*) ((uintptr_t) i2 - input_decrement);
429 i2 = (const float*) ((uintptr_t) i1 + input_width);
430 i3 = (const float*) ((uintptr_t) i2 + input_width);
431 i4 = (const float*) ((uintptr_t) i3 + input_width);
432
433
434 } while (--output_height != 0);
435 }
436