1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_2x4_acc2(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const v128_t vzero = wasm_f32x4_splat(0.0f);
73
74 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
75
76 const float* i0 = zero;
77 const float* i1 = zero;
78 const float* i2 = input;
79 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
82
83 float* o0 = output;
84 float* o1 = (float*) ((uintptr_t) o0 + input_width);
85
86 size_t output_height = input_height;
87 do {
88 if XNN_UNPREDICTABLE(output_height < 2) {
89 i3 = zero;
90 o1 = o0;
91 }
92 if XNN_UNPREDICTABLE(output_height < 3) {
93 i4 = zero;
94 }
95 if XNN_UNPREDICTABLE(output_height < 4) {
96 i5 = zero;
97 }
98
99 v128_t vi0x0123 = vzero;
100 v128_t vi1x0123 = vzero;
101 v128_t vi2x0123 = vzero;
102 v128_t vi3x0123 = vzero;
103 v128_t vi4x0123 = vzero;
104 v128_t vi5x0123 = vzero;
105
106 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
107 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
108 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
109 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
110 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
111 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
112
113 size_t w = input_width;
114 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
115 v128_t vo0p0 = vbias;
116 v128_t vo1p0 = vbias;
117
118 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
119 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
120 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
121 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
122 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
123 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
124
125 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
126 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
127
128 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
129 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
130
131 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
132 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
133
134 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
135 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
136
137 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
138 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
139
140 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
141 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
142 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
143 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
144 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
145 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
146
147 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
148 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
149
150 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
151 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
152
153 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
154 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
155
156 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
157 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
158
159 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
160 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
161
162 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
163 vi0x0123 = vi0x4567;
164 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
165 vi1x0123 = vi1x4567;
166 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
167 vi2x0123 = vi2x4567;
168 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
169 vi3x0123 = vi3x4567;
170 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
171 vi4x0123 = vi4x4567;
172 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
173 vi5x0123 = vi5x4567;
174
175 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
176 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
177
178 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
179 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
180
181 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
182 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
183
184 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
185 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
186
187 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
188 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
189
190 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
191 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
192 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
193 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
194 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
195 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
196
197 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
198 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
199
200 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
201 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
202
203 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
204 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
205
206 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
207 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
208
209 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
210 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
211
212 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
213 vi0x4567 = vi0x89AB;
214 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
215 vi1x4567 = vi1x89AB;
216 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
217 vi2x4567 = vi2x89AB;
218 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
219 vi3x4567 = vi3x89AB;
220 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
221 vi4x4567 = vi4x89AB;
222 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
223 vi5x4567 = vi5x89AB;
224
225 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
226 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
227
228 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
229 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
230
231 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
232 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
233
234 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
235 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
236
237 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
238 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
239
240 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
241 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
242
243 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
244 v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
245 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
246 vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
247
248 wasm_v128_store(o1, vo1); o1 += 4;
249 wasm_v128_store(o0, vo0); o0 += 4;
250 }
251 // Always process the last block of 5..8 pixels.
252 if XNN_LIKELY(w > 4 * sizeof(float)) {
253 v128_t vo0p0 = vbias;
254 v128_t vo1p0 = vbias;
255
256 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
257 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
258 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
259 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
260 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
261 v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
262
263 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
264 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
265 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
266 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
267 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
268 vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
269
270 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
271 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
272
273 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
274 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
275
276 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
277 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
278
279 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
280 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
281
282 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
283 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
284
285 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
286 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
287 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
288 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
289 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
290 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
291
292 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
293 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
294
295 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
296 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
297
298 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
299 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
300
301 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
302 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
303
304 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
305 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
306
307 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
308 vi0x0123 = vi0x4567;
309 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
310 vi1x0123 = vi1x4567;
311 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
312 vi2x0123 = vi2x4567;
313 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
314 vi3x0123 = vi3x4567;
315 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
316 vi4x0123 = vi4x4567;
317 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
318 vi5x0123 = vi5x4567;
319
320 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
321 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
322
323 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
324 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
325
326 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
327 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
328
329 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
330 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
331
332 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
333 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
334
335 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
336 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
337 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
338 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
339 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
340 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
341
342 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
343 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
344
345 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
346 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
347
348 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
349 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
350
351 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
352 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
353
354 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
355 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
356
357 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
358 vi0x4567 = vi0x89AB;
359 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
360 vi1x4567 = vi1x89AB;
361 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
362 vi2x4567 = vi2x89AB;
363 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
364 vi3x4567 = vi3x89AB;
365 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
366 vi4x4567 = vi4x89AB;
367 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
368 vi5x4567 = vi5x89AB;
369
370 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
371 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
372
373 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
374 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
375
376 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
377 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
378
379 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
380 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
381
382 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
383 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
384
385 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
386 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
387
388 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
389 v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
390 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
391 vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
392
393 wasm_v128_store(o1, vo1); o1 += 4;
394 wasm_v128_store(o0, vo0); o0 += 4;
395
396 w -= 4 * sizeof(float);
397 }
398 assert(w >= 1 * sizeof(float));
399 assert(w <= 4 * sizeof(float));
400 {
401 v128_t vo0p0 = vbias;
402 v128_t vo1p0 = vbias;
403
404 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
405 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
406 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
407 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
408 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
409 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
410
411 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
412 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
413
414 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
415 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
416
417 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
418 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
419
420 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
421 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
422
423 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
424 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
425
426 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
427 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
428 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
429 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
430 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
431 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
432
433 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
434 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
435
436 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
437 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
438
439 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
440 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
441
442 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
443 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
444
445 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
446 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
447
448 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
449 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
450 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
451 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
452 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
453 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
454
455 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
456 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
457
458 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
459 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
460
461 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
462 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
463
464 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
465 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
466
467 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
468 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
469
470 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
471 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
472 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
473 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
474 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
475 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
476
477 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
478 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
479
480 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
481 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
482
483 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
484 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
485
486 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
487 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
488
489 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
490 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
491
492 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
493 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
494 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
495 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
496 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
497 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
498
499 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
500 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
501
502 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
503 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
504
505 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
506 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
507
508 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
509 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
510
511 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
512 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
513
514 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
515 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
516
517 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
518 v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
519 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
520 vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
521
522 if XNN_LIKELY(w & (4 * sizeof(float))) {
523 wasm_v128_store(o1, vo1); o1 += 4;
524 wasm_v128_store(o0, vo0); o0 += 4;
525 } else {
526 if (w & (2 * sizeof(float))) {
527 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
528 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
529
530 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
531 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
532 }
533 if (w & (1 * sizeof(float))) {
534 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
535 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
536 }
537 }
538 }
539
540 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
541 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
542 i2 = (const float*) ((uintptr_t) i1 + input_width);
543 i3 = (const float*) ((uintptr_t) i2 + input_width);
544 i4 = (const float*) ((uintptr_t) i3 + input_width);
545 i5 = (const float*) ((uintptr_t) i4 + input_width);
546
547 o0 = o1;
548 o1 = (float*) ((uintptr_t) o0 + input_width);
549
550 output_height = doz(output_height, 2);
551 } while (output_height != 0);
552 }
553