• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/gemm.h>
13 #include <xnnpack/math.h>
14 
15 
xnn_f32_gemm_minmax_ukernel_4x4__wasm(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_gemm_minmax_ukernel_4x4__wasm(
17     size_t mr,
18     size_t nc,
19     size_t kc,
20     const float* restrict a,
21     size_t a_stride,
22     const float* restrict w,
23     float* restrict c,
24     size_t cm_stride,
25     size_t cn_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mr != 0);
29   assert(mr <= 4);
30   assert(nc != 0);
31   assert(kc != 0);
32   assert(kc % sizeof(float) == 0);
33   assert(a != NULL);
34   assert(w != NULL);
35   assert(c != NULL);
36 
37   const float* a0 = a;
38   float* c0 = c;
39   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
40   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
41   if XNN_UNPREDICTABLE(mr < 2) {
42     a1 = a0;
43     c1 = c0;
44   }
45   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
46   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
47   if XNN_UNPREDICTABLE(mr <= 2) {
48     a2 = a1;
49     c2 = c1;
50   }
51   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr != 4) {
54     a3 = a2;
55     c3 = c2;
56   }
57 
58   const float vmin = params->scalar.min;
59   const float vmax = params->scalar.max;
60   do {
61     float vacc00 = w[0];
62     float vacc01 = w[1];
63     float vacc02 = w[2];
64     float vacc03 = w[3];
65     w += 4;
66     float vacc10 = vacc00;
67     float vacc11 = vacc01;
68     float vacc12 = vacc02;
69     float vacc13 = vacc03;
70     float vacc20 = vacc00;
71     float vacc21 = vacc01;
72     float vacc22 = vacc02;
73     float vacc23 = vacc03;
74     float vacc30 = vacc00;
75     float vacc31 = vacc01;
76     float vacc32 = vacc02;
77     float vacc33 = vacc03;
78 
79     size_t k = kc;
80     do {
81       const float va0 = *a0++;
82       const float va1 = *a1++;
83       const float va2 = *a2++;
84       const float va3 = *a3++;
85 
86       const float vb0 = w[0];
87       const float vb1 = w[1];
88       const float vb2 = w[2];
89       const float vb3 = w[3];
90       w += 4;
91 
92       vacc00 = math_muladd_f32(va0, vb0, vacc00);
93       vacc01 = math_muladd_f32(va0, vb1, vacc01);
94       vacc02 = math_muladd_f32(va0, vb2, vacc02);
95       vacc03 = math_muladd_f32(va0, vb3, vacc03);
96       vacc10 = math_muladd_f32(va1, vb0, vacc10);
97       vacc11 = math_muladd_f32(va1, vb1, vacc11);
98       vacc12 = math_muladd_f32(va1, vb2, vacc12);
99       vacc13 = math_muladd_f32(va1, vb3, vacc13);
100       vacc20 = math_muladd_f32(va2, vb0, vacc20);
101       vacc21 = math_muladd_f32(va2, vb1, vacc21);
102       vacc22 = math_muladd_f32(va2, vb2, vacc22);
103       vacc23 = math_muladd_f32(va2, vb3, vacc23);
104       vacc30 = math_muladd_f32(va3, vb0, vacc30);
105       vacc31 = math_muladd_f32(va3, vb1, vacc31);
106       vacc32 = math_muladd_f32(va3, vb2, vacc32);
107       vacc33 = math_muladd_f32(va3, vb3, vacc33);
108 
109       k -= sizeof(float);
110     } while (k != 0);
111 
112     vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
113     vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
114     vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
115     vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
116     vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
117     vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
118     vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
119     vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
120     vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
121     vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
122     vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
123     vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
124     vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
125     vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
126     vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
127     vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
128 
129     vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
130     vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
131     vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
132     vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
133     vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
134     vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
135     vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
136     vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
137     vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
138     vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
139     vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
140     vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
141     vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
142     vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
143     vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
144     vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
145 
146     if XNN_LIKELY(nc >= 4) {
147       c3[0] = vacc30;
148       c3[1] = vacc31;
149       c3[2] = vacc32;
150       c3[3] = vacc33;
151       c3 = (float*) ((uintptr_t) c3 + cn_stride);
152       c2[0] = vacc20;
153       c2[1] = vacc21;
154       c2[2] = vacc22;
155       c2[3] = vacc23;
156       c2 = (float*) ((uintptr_t) c2 + cn_stride);
157       c1[0] = vacc10;
158       c1[1] = vacc11;
159       c1[2] = vacc12;
160       c1[3] = vacc13;
161       c1 = (float*) ((uintptr_t) c1 + cn_stride);
162       c0[0] = vacc00;
163       c0[1] = vacc01;
164       c0[2] = vacc02;
165       c0[3] = vacc03;
166       c0 = (float*) ((uintptr_t) c0 + cn_stride);
167 
168       a3 = (const void*) ((uintptr_t) a3 - kc);
169       a2 = (const void*) ((uintptr_t) a2 - kc);
170       a1 = (const void*) ((uintptr_t) a1 - kc);
171       a0 = (const void*) ((uintptr_t) a0 - kc);
172 
173       nc -= 4;
174     } else {
175       if (nc & 2) {
176         c3[0] = vacc30;
177         c3[1] = vacc31;
178         vacc30 = vacc32;
179         c3 += 2;
180         c2[0] = vacc20;
181         c2[1] = vacc21;
182         vacc20 = vacc22;
183         c2 += 2;
184         c1[0] = vacc10;
185         c1[1] = vacc11;
186         vacc10 = vacc12;
187         c1 += 2;
188         c0[0] = vacc00;
189         c0[1] = vacc01;
190         vacc00 = vacc02;
191         c0 += 2;
192       }
193       if (nc & 1) {
194         c3[0] = vacc30;
195         c2[0] = vacc20;
196         c1[0] = vacc10;
197         c0[0] = vacc00;
198       }
199 
200       nc = 0;
201     }
202   } while (nc != 0);
203 }
204