• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 3);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (3 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52 
53   const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
54   const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
55   do {
56     v128_t vacc0x0123 = wasm_v128_load(w);
57     v128_t vacc0x4567 = wasm_v128_load(w + 4);
58     v128_t vacc1x0123 = vacc0x0123;
59     v128_t vacc1x4567 = vacc0x4567;
60     v128_t vacc2x0123 = vacc0x0123;
61     v128_t vacc2x4567 = vacc0x4567;
62     w += 8;
63 
64     size_t p = ks;
65     do {
66       const float* restrict a0 = a[0];
67       assert(a0 != NULL);
68       if XNN_UNPREDICTABLE(a0 != zero) {
69         a0 = (const float*) ((uintptr_t) a0 + a_offset);
70       }
71       const float* restrict a1 = a[1];
72       assert(a1 != NULL);
73       if XNN_UNPREDICTABLE(a1 != zero) {
74         a1 = (const float*) ((uintptr_t) a1 + a_offset);
75       }
76       const float* restrict a2 = a[2];
77       assert(a2 != NULL);
78       if XNN_UNPREDICTABLE(a2 != zero) {
79         a2 = (const float*) ((uintptr_t) a2 + a_offset);
80       }
81       a += 3;
82 
83       size_t k = kc;
84       while (k >= 4 * sizeof(float)) {
85         const v128_t va0 = wasm_v128_load(a0);
86         a0 += 4;
87         const v128_t va1 = wasm_v128_load(a1);
88         a1 += 4;
89         const v128_t va2 = wasm_v128_load(a2);
90         a2 += 4;
91 
92         const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
93         const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
94         const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
95 
96         const v128_t vb0123c0 = wasm_v128_load(w + 0);
97         const v128_t vb4567c0 = wasm_v128_load(w + 4);
98 
99         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
100         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
101         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
102         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
103         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
104         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
105         const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
106         const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
107         const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
108 
109         const v128_t vb0123c1 = wasm_v128_load(w + 8);
110         const v128_t vb4567c1 = wasm_v128_load(w + 12);
111 
112         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
113         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
114         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
115         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
116         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
117         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
118         const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
119         const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
120         const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
121 
122         const v128_t vb0123c2 = wasm_v128_load(w + 16);
123         const v128_t vb4567c2 = wasm_v128_load(w + 20);
124 
125         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
126         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
127         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
128         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
129         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
130         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
131         const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
132         const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
133         const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
134 
135         const v128_t vb0123c3 = wasm_v128_load(w + 24);
136         const v128_t vb4567c3 = wasm_v128_load(w + 28);
137 
138         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
139         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
140         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
141         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
142         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
143         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
144 
145         w += 32;
146         k -= 4 * sizeof(float);
147       }
148       if XNN_UNLIKELY(k != 0) {
149         do {
150           const v128_t vb0123 = wasm_v128_load(w);
151           const v128_t vb4567 = wasm_v128_load(w + 4);
152           w += 8;
153 
154           const v128_t va0 = wasm_v128_load32_splat(a0);
155           a0 += 1;
156           const v128_t va1 = wasm_v128_load32_splat(a1);
157           a1 += 1;
158           const v128_t va2 = wasm_v128_load32_splat(a2);
159           a2 += 1;
160 
161           vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
162           vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
163           vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
164           vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
165           vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
166           vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
167           k -= sizeof(float);
168         } while (k != 0);
169       }
170       p -= 3 * sizeof(void*);
171     } while (p != 0);
172 
173     vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
174     vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
175     vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
176     vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
177     vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
178     vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
179 
180     vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
181     vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
182     vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
183     vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
184     vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
185     vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
186 
187     if XNN_LIKELY(nc >= 8) {
188       wasm_v128_store(c2, vacc2x0123);
189       wasm_v128_store(c2 + 4, vacc2x4567);
190       c2 = (float*) ((uintptr_t) c2 + cn_stride);
191       wasm_v128_store(c1, vacc1x0123);
192       wasm_v128_store(c1 + 4, vacc1x4567);
193       c1 = (float*) ((uintptr_t) c1 + cn_stride);
194       wasm_v128_store(c0, vacc0x0123);
195       wasm_v128_store(c0 + 4, vacc0x4567);
196       c0 = (float*) ((uintptr_t) c0 + cn_stride);
197 
198       a = (const float**restrict) ((uintptr_t) a - ks);
199       nc -= 8;
200     } else {
201       if (nc & 4) {
202         wasm_v128_store(c2, vacc2x0123);
203         wasm_v128_store(c1, vacc1x0123);
204         wasm_v128_store(c0, vacc0x0123);
205 
206         vacc2x0123 = vacc2x4567;
207         vacc1x0123 = vacc1x4567;
208         vacc0x0123 = vacc0x4567;
209 
210         c2 += 4;
211         c1 += 4;
212         c0 += 4;
213       }
214       if (nc & 2) {
215         *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
216         *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
217         *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
218 
219         vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
220         vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
221         vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
222 
223         c2 += 2;
224         c1 += 2;
225         c0 += 2;
226       }
227       if (nc & 1) {
228         *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
229         *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
230         *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
231       }
232 
233       nc = 0;
234     }
235   } while (nc != 0);
236 }
237