• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/gemm.h>
13 #include <xnnpack/math.h>
14 
15 
xnn_f32_gemm_relu_ukernel_4x4__scalar(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_gemm_relu_ukernel_4x4__scalar(
17     size_t mr,
18     size_t nc,
19     size_t kc,
20     const float* restrict a,
21     size_t a_stride,
22     const float* restrict w,
23     float* restrict c,
24     size_t cm_stride,
25     size_t cn_stride,
26     const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mr != 0);
29   assert(mr <= 4);
30   assert(nc != 0);
31   assert(kc != 0);
32   assert(kc % sizeof(float) == 0);
33   assert(a != NULL);
34   assert(w != NULL);
35   assert(c != NULL);
36 
37   const float* a0 = a;
38   float* c0 = c;
39   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
40   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
41   if XNN_UNPREDICTABLE(mr < 2) {
42     a1 = a0;
43     c1 = c0;
44   }
45   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
46   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
47   if XNN_UNPREDICTABLE(mr <= 2) {
48     a2 = a1;
49     c2 = c1;
50   }
51   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr != 4) {
54     a3 = a2;
55     c3 = c2;
56   }
57 
58   do {
59     float vacc00 = w[0];
60     float vacc01 = w[1];
61     float vacc02 = w[2];
62     float vacc03 = w[3];
63     w += 4;
64     float vacc10 = vacc00;
65     float vacc11 = vacc01;
66     float vacc12 = vacc02;
67     float vacc13 = vacc03;
68     float vacc20 = vacc00;
69     float vacc21 = vacc01;
70     float vacc22 = vacc02;
71     float vacc23 = vacc03;
72     float vacc30 = vacc00;
73     float vacc31 = vacc01;
74     float vacc32 = vacc02;
75     float vacc33 = vacc03;
76 
77     size_t k = kc;
78     do {
79       const float va0 = *a0++;
80       const float va1 = *a1++;
81       const float va2 = *a2++;
82       const float va3 = *a3++;
83 
84       const float vb0 = w[0];
85       const float vb1 = w[1];
86       const float vb2 = w[2];
87       const float vb3 = w[3];
88       w += 4;
89 
90       vacc00 = math_muladd_f32(va0, vb0, vacc00);
91       vacc01 = math_muladd_f32(va0, vb1, vacc01);
92       vacc02 = math_muladd_f32(va0, vb2, vacc02);
93       vacc03 = math_muladd_f32(va0, vb3, vacc03);
94       vacc10 = math_muladd_f32(va1, vb0, vacc10);
95       vacc11 = math_muladd_f32(va1, vb1, vacc11);
96       vacc12 = math_muladd_f32(va1, vb2, vacc12);
97       vacc13 = math_muladd_f32(va1, vb3, vacc13);
98       vacc20 = math_muladd_f32(va2, vb0, vacc20);
99       vacc21 = math_muladd_f32(va2, vb1, vacc21);
100       vacc22 = math_muladd_f32(va2, vb2, vacc22);
101       vacc23 = math_muladd_f32(va2, vb3, vacc23);
102       vacc30 = math_muladd_f32(va3, vb0, vacc30);
103       vacc31 = math_muladd_f32(va3, vb1, vacc31);
104       vacc32 = math_muladd_f32(va3, vb2, vacc32);
105       vacc33 = math_muladd_f32(va3, vb3, vacc33);
106 
107       k -= sizeof(float);
108     } while (k != 0);
109 
110     vacc00 = math_max_f32(vacc00, 0.0f);
111     vacc01 = math_max_f32(vacc01, 0.0f);
112     vacc02 = math_max_f32(vacc02, 0.0f);
113     vacc03 = math_max_f32(vacc03, 0.0f);
114     vacc10 = math_max_f32(vacc10, 0.0f);
115     vacc11 = math_max_f32(vacc11, 0.0f);
116     vacc12 = math_max_f32(vacc12, 0.0f);
117     vacc13 = math_max_f32(vacc13, 0.0f);
118     vacc20 = math_max_f32(vacc20, 0.0f);
119     vacc21 = math_max_f32(vacc21, 0.0f);
120     vacc22 = math_max_f32(vacc22, 0.0f);
121     vacc23 = math_max_f32(vacc23, 0.0f);
122     vacc30 = math_max_f32(vacc30, 0.0f);
123     vacc31 = math_max_f32(vacc31, 0.0f);
124     vacc32 = math_max_f32(vacc32, 0.0f);
125     vacc33 = math_max_f32(vacc33, 0.0f);
126 
127     if XNN_LIKELY(nc >= 4) {
128       c3[0] = vacc30;
129       c3[1] = vacc31;
130       c3[2] = vacc32;
131       c3[3] = vacc33;
132       c3 = (float*) ((uintptr_t) c3 + cn_stride);
133       c2[0] = vacc20;
134       c2[1] = vacc21;
135       c2[2] = vacc22;
136       c2[3] = vacc23;
137       c2 = (float*) ((uintptr_t) c2 + cn_stride);
138       c1[0] = vacc10;
139       c1[1] = vacc11;
140       c1[2] = vacc12;
141       c1[3] = vacc13;
142       c1 = (float*) ((uintptr_t) c1 + cn_stride);
143       c0[0] = vacc00;
144       c0[1] = vacc01;
145       c0[2] = vacc02;
146       c0[3] = vacc03;
147       c0 = (float*) ((uintptr_t) c0 + cn_stride);
148 
149       a3 = (const void*) ((uintptr_t) a3 - kc);
150       a2 = (const void*) ((uintptr_t) a2 - kc);
151       a1 = (const void*) ((uintptr_t) a1 - kc);
152       a0 = (const void*) ((uintptr_t) a0 - kc);
153 
154       nc -= 4;
155     } else {
156       if (nc & 2) {
157         c3[0] = vacc30;
158         c3[1] = vacc31;
159         vacc30 = vacc32;
160         c3 += 2;
161         c2[0] = vacc20;
162         c2[1] = vacc21;
163         vacc20 = vacc22;
164         c2 += 2;
165         c1[0] = vacc10;
166         c1[1] = vacc11;
167         vacc10 = vacc12;
168         c1 += 2;
169         c0[0] = vacc00;
170         c0[1] = vacc01;
171         vacc00 = vacc02;
172         c0 += 2;
173       }
174       if (nc & 1) {
175         c3[0] = vacc30;
176         c2[0] = vacc20;
177         c1[0] = vacc10;
178         c0[0] = vacc00;
179       }
180 
181       nc = 0;
182     }
183   } while (nc != 0);
184 }
185