• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_relu_ukernel_4x2c4__wasmsimd(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_relu_ukernel_4x2c4__wasmsimd(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30 {
31   assert(mr != 0);
32   assert(mr <= 4);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (4 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr != 4) {
54     c3 = c2;
55   }
56 
57   do {
58     v128_t vacc0x0c4 = wasm_f32x4_replace_lane(wasm_f32x4_splat(0.0f), 0, w[0]);
59     v128_t vacc0x1c4 = wasm_f32x4_replace_lane(vacc0x0c4, 0, w[1]);
60     v128_t vacc1x0c4 = vacc0x0c4;
61     v128_t vacc1x1c4 = vacc0x1c4;
62     v128_t vacc2x0c4 = vacc0x0c4;
63     v128_t vacc2x1c4 = vacc0x1c4;
64     v128_t vacc3x0c4 = vacc0x0c4;
65     v128_t vacc3x1c4 = vacc0x1c4;
66     w += 2;
67 
68     size_t p = ks;
69     do {
70       const float* restrict a0 = a[0];
71       assert(a0 != NULL);
72       if XNN_UNPREDICTABLE(a0 != zero) {
73         a0 = (const float*) ((uintptr_t) a0 + a_offset);
74       }
75       const float* restrict a1 = a[1];
76       assert(a1 != NULL);
77       if XNN_UNPREDICTABLE(a1 != zero) {
78         a1 = (const float*) ((uintptr_t) a1 + a_offset);
79       }
80       const float* restrict a2 = a[2];
81       assert(a2 != NULL);
82       if XNN_UNPREDICTABLE(a2 != zero) {
83         a2 = (const float*) ((uintptr_t) a2 + a_offset);
84       }
85       const float* restrict a3 = a[3];
86       assert(a3 != NULL);
87       if XNN_UNPREDICTABLE(a3 != zero) {
88         a3 = (const float*) ((uintptr_t) a3 + a_offset);
89       }
90       a += 4;
91 
92       size_t k = kc;
93       for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
94         const v128_t va0 = wasm_v128_load(a0);
95         a0 += 4;
96         const v128_t va1 = wasm_v128_load(a1);
97         a1 += 4;
98         const v128_t va2 = wasm_v128_load(a2);
99         a2 += 4;
100         const v128_t va3 = wasm_v128_load(a3);
101         a3 += 4;
102 
103         const v128_t vb0 = wasm_v128_load(w);
104         const v128_t vb1 = wasm_v128_load(w + 4);
105         w += 8;
106 
107         vacc0x0c4 = wasm_f32x4_add(vacc0x0c4, wasm_f32x4_mul(va0, vb0));
108         vacc0x1c4 = wasm_f32x4_add(vacc0x1c4, wasm_f32x4_mul(va0, vb1));
109         vacc1x0c4 = wasm_f32x4_add(vacc1x0c4, wasm_f32x4_mul(va1, vb0));
110         vacc1x1c4 = wasm_f32x4_add(vacc1x1c4, wasm_f32x4_mul(va1, vb1));
111         vacc2x0c4 = wasm_f32x4_add(vacc2x0c4, wasm_f32x4_mul(va2, vb0));
112         vacc2x1c4 = wasm_f32x4_add(vacc2x1c4, wasm_f32x4_mul(va2, vb1));
113         vacc3x0c4 = wasm_f32x4_add(vacc3x0c4, wasm_f32x4_mul(va3, vb0));
114         vacc3x1c4 = wasm_f32x4_add(vacc3x1c4, wasm_f32x4_mul(va3, vb1));
115       }
116       if XNN_UNLIKELY(k != 0) {
117         const v128_t va0 = wasm_v128_load(a0);
118         const v128_t va1 = wasm_v128_load(a1);
119         const v128_t va2 = wasm_v128_load(a2);
120         const v128_t va3 = wasm_v128_load(a3);
121 
122         const v128_t vb0 = wasm_v128_load(w);
123         const v128_t vb1 = wasm_v128_load(w + 4);
124         w += 8;
125 
126         const v128_t vzero = wasm_f32x4_splat(0.0f);
127         const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
128         const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
129 
130         vacc0x0c4 = wasm_f32x4_add(vacc0x0c4, wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0));
131         vacc0x1c4 = wasm_f32x4_add(vacc0x1c4, wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1));
132         vacc1x0c4 = wasm_f32x4_add(vacc1x0c4, wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0));
133         vacc1x1c4 = wasm_f32x4_add(vacc1x1c4, wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1));
134         vacc2x0c4 = wasm_f32x4_add(vacc2x0c4, wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0));
135         vacc2x1c4 = wasm_f32x4_add(vacc2x1c4, wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1));
136         vacc3x0c4 = wasm_f32x4_add(vacc3x0c4, wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0));
137         vacc3x1c4 = wasm_f32x4_add(vacc3x1c4, wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1));
138       }
139       p -= 4 * sizeof(void*);
140     } while (p != 0);
141 
142     const v128_t vacc0x01c2 = wasm_f32x4_add(
143       wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
144       wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
145     const v128_t vacc1x01c2 = wasm_f32x4_add(
146       wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
147       wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
148     const v128_t vacc2x01c2 = wasm_f32x4_add(
149       wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
150       wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
151     const v128_t vacc3x01c2 = wasm_f32x4_add(
152       wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
153       wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
154 
155     v128_t vacc01x01 = wasm_f32x4_add(
156       wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
157       wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
158     v128_t vacc23x01 = wasm_f32x4_add(
159       wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
160       wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
161 
162     const v128_t vzero = wasm_f32x4_splat(0.0f);
163     vacc01x01 = wasm_i32x4_max(vacc01x01, vzero);
164     vacc23x01 = wasm_i32x4_max(vacc23x01, vzero);
165 
166     if XNN_LIKELY(nc >= 2) {
167       *((double*) c3) = wasm_f64x2_extract_lane(vacc23x01, 1);
168       c3 = (float*) ((uintptr_t) c3 + cn_stride);
169       *((double*) c2) = wasm_f64x2_extract_lane(vacc23x01, 0);
170       c2 = (float*) ((uintptr_t) c2 + cn_stride);
171       *((double*) c1) = wasm_f64x2_extract_lane(vacc01x01, 1);
172       c1 = (float*) ((uintptr_t) c1 + cn_stride);
173       *((double*) c0) = wasm_f64x2_extract_lane(vacc01x01, 0);
174       c0 = (float*) ((uintptr_t) c0 + cn_stride);
175 
176       a = (const float**restrict) ((uintptr_t) a - ks);
177       nc -= 2;
178     } else {
179       assert(nc == 1);
180       *c3 = wasm_f32x4_extract_lane(vacc23x01, 2);
181       *c2 = wasm_f32x4_extract_lane(vacc23x01, 0);
182       *c1 = wasm_f32x4_extract_lane(vacc01x01, 2);
183       *c0 = wasm_f32x4_extract_lane(vacc01x01, 0);
184 
185       nc = 0;
186     }
187   } while (nc != 0);
188 }
189