• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/dwconv.h>
13 #include <xnnpack/math.h>
14 
15 
xnn_f32_dwconv_minmax_ukernel_up2x9__scalar(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_dwconv_minmax_ukernel_up2x9__scalar(
17     size_t channels,
18     size_t output_width,
19     const float** input,
20     const float* weights,
21     float* output,
22     size_t input_stride,
23     size_t output_increment,
24     size_t input_offset,
25     const float* zero,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(channels != 0);
29   assert(output_width != 0);
30 
31   const float vmin = params->scalar.min;
32   const float vmax = params->scalar.max;
33   do {
34     const float* i0 = input[0];
35     assert(i0 != NULL);
36     if XNN_UNPREDICTABLE(i0 != zero) {
37       i0 = (const float*) ((uintptr_t) i0 + input_offset);
38     }
39     const float* i1 = input[1];
40     assert(i1 != NULL);
41     if XNN_UNPREDICTABLE(i1 != zero) {
42       i1 = (const float*) ((uintptr_t) i1 + input_offset);
43     }
44     const float* i2 = input[2];
45     assert(i2 != NULL);
46     if XNN_UNPREDICTABLE(i2 != zero) {
47       i2 = (const float*) ((uintptr_t) i2 + input_offset);
48     }
49     const float* i3 = input[3];
50     assert(i3 != NULL);
51     if XNN_UNPREDICTABLE(i3 != zero) {
52       i3 = (const float*) ((uintptr_t) i3 + input_offset);
53     }
54     const float* i4 = input[4];
55     assert(i4 != NULL);
56     if XNN_UNPREDICTABLE(i4 != zero) {
57       i4 = (const float*) ((uintptr_t) i4 + input_offset);
58     }
59     const float* i5 = input[5];
60     assert(i5 != NULL);
61     if XNN_UNPREDICTABLE(i5 != zero) {
62       i5 = (const float*) ((uintptr_t) i5 + input_offset);
63     }
64     const float* i6 = input[6];
65     assert(i6 != NULL);
66     if XNN_UNPREDICTABLE(i6 != zero) {
67       i6 = (const float*) ((uintptr_t) i6 + input_offset);
68     }
69     const float* i7 = input[7];
70     assert(i7 != NULL);
71     if XNN_UNPREDICTABLE(i7 != zero) {
72       i7 = (const float*) ((uintptr_t) i7 + input_offset);
73     }
74     const float* i8 = input[8];
75     assert(i8 != NULL);
76     if XNN_UNPREDICTABLE(i8 != zero) {
77       i8 = (const float*) ((uintptr_t) i8 + input_offset);
78     }
79     input = (const float**) ((uintptr_t) input + input_stride);
80 
81     size_t c = channels;
82     const float* w = weights;
83     for (; c >= 2; c -= 2) {
84       float vacc0p0 = w[0];
85       float vacc1p0 = w[1];
86 
87 
88       const float vi0x0 = i0[0];
89       const float vi0x1 = i0[1];
90       i0 += 2;
91 
92       const float vk0x0 = w[2];
93       vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
94       const float vk0x1 = w[3];
95       vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
96 
97       const float vi1x0 = i1[0];
98       const float vi1x1 = i1[1];
99       i1 += 2;
100 
101       const float vk1x0 = w[4];
102       vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
103       const float vk1x1 = w[5];
104       vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
105 
106       const float vi2x0 = i2[0];
107       const float vi2x1 = i2[1];
108       i2 += 2;
109 
110       const float vk2x0 = w[6];
111       vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
112       const float vk2x1 = w[7];
113       vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
114 
115       const float vi3x0 = i3[0];
116       const float vi3x1 = i3[1];
117       i3 += 2;
118 
119       const float vk3x0 = w[8];
120       vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
121       const float vk3x1 = w[9];
122       vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
123 
124       const float vi4x0 = i4[0];
125       const float vi4x1 = i4[1];
126       i4 += 2;
127 
128       const float vk4x0 = w[10];
129       vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
130       const float vk4x1 = w[11];
131       vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
132 
133       const float vi5x0 = i5[0];
134       const float vi5x1 = i5[1];
135       i5 += 2;
136 
137       const float vk5x0 = w[12];
138       vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
139       const float vk5x1 = w[13];
140       vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
141 
142       const float vi6x0 = i6[0];
143       const float vi6x1 = i6[1];
144       i6 += 2;
145 
146       const float vk6x0 = w[14];
147       vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
148       const float vk6x1 = w[15];
149       vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
150 
151       const float vi7x0 = i7[0];
152       const float vi7x1 = i7[1];
153       i7 += 2;
154 
155       const float vk7x0 = w[16];
156       vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
157       const float vk7x1 = w[17];
158       vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
159 
160       const float vi8x0 = i8[0];
161       const float vi8x1 = i8[1];
162       i8 += 2;
163 
164       const float vk8x0 = w[18];
165       vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
166       const float vk8x1 = w[19];
167       vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
168 
169       w += 20;
170 
171 
172       float vacc0 = math_max_f32(vacc0p0, vmin);
173       float vacc1 = math_max_f32(vacc1p0, vmin);
174 
175       vacc0 = math_min_f32(vacc0, vmax);
176       vacc1 = math_min_f32(vacc1, vmax);
177 
178       output[0] = vacc0;
179       output[1] = vacc1;
180       output += 2;
181     }
182     for (; c >= 1; c -= 1) {
183       float vacc0p0 = *w++;
184 
185       const float vi0 = *i0++;
186       const float vk0 = w[1];
187       vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
188       const float vi1 = *i1++;
189       const float vk1 = w[3];
190       vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
191       const float vi2 = *i2++;
192       const float vk2 = w[5];
193       vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
194       const float vi3 = *i3++;
195       const float vk3 = w[7];
196       vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
197       const float vi4 = *i4++;
198       const float vk4 = w[9];
199       vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
200       const float vi5 = *i5++;
201       const float vk5 = w[11];
202       vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
203       const float vi6 = *i6++;
204       const float vk6 = w[13];
205       vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
206       const float vi7 = *i7++;
207       const float vk7 = w[15];
208       vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
209       const float vi8 = *i8++;
210       const float vk8 = w[17];
211       vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
212 
213 
214       float vacc0 = math_max_f32(vacc0p0, vmin);
215       vacc0 = math_min_f32(vacc0, vmax);
216       *output++ = vacc0;
217     }
218 
219     output = (float*) ((uintptr_t) output + output_increment);
220   } while (--output_width != 0);
221 }
222