• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/dwconv.h>
13 #include <xnnpack/math.h>
14 
15 
xnn_f32_dwconv_minmax_ukernel_up2x9__wasm_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_dwconv_minmax_ukernel_up2x9__wasm_acc2(
17     size_t channels,
18     size_t output_width,
19     const float** input,
20     const float* weights,
21     float* output,
22     size_t input_stride,
23     size_t output_increment,
24     size_t input_offset,
25     const float* zero,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(channels != 0);
29   assert(output_width != 0);
30 
31   const float vmin = params->scalar.min;
32   const float vmax = params->scalar.max;
33   do {
34     const float* i0 = input[0];
35     assert(i0 != NULL);
36     if XNN_UNPREDICTABLE(i0 != zero) {
37       i0 = (const float*) ((uintptr_t) i0 + input_offset);
38     }
39     const float* i1 = input[1];
40     assert(i1 != NULL);
41     if XNN_UNPREDICTABLE(i1 != zero) {
42       i1 = (const float*) ((uintptr_t) i1 + input_offset);
43     }
44     const float* i2 = input[2];
45     assert(i2 != NULL);
46     if XNN_UNPREDICTABLE(i2 != zero) {
47       i2 = (const float*) ((uintptr_t) i2 + input_offset);
48     }
49     const float* i3 = input[3];
50     assert(i3 != NULL);
51     if XNN_UNPREDICTABLE(i3 != zero) {
52       i3 = (const float*) ((uintptr_t) i3 + input_offset);
53     }
54     const float* i4 = input[4];
55     assert(i4 != NULL);
56     if XNN_UNPREDICTABLE(i4 != zero) {
57       i4 = (const float*) ((uintptr_t) i4 + input_offset);
58     }
59     const float* i5 = input[5];
60     assert(i5 != NULL);
61     if XNN_UNPREDICTABLE(i5 != zero) {
62       i5 = (const float*) ((uintptr_t) i5 + input_offset);
63     }
64     const float* i6 = input[6];
65     assert(i6 != NULL);
66     if XNN_UNPREDICTABLE(i6 != zero) {
67       i6 = (const float*) ((uintptr_t) i6 + input_offset);
68     }
69     const float* i7 = input[7];
70     assert(i7 != NULL);
71     if XNN_UNPREDICTABLE(i7 != zero) {
72       i7 = (const float*) ((uintptr_t) i7 + input_offset);
73     }
74     const float* i8 = input[8];
75     assert(i8 != NULL);
76     if XNN_UNPREDICTABLE(i8 != zero) {
77       i8 = (const float*) ((uintptr_t) i8 + input_offset);
78     }
79     input = (const float**) ((uintptr_t) input + input_stride);
80 
81     size_t c = channels;
82     const float* w = weights;
83     for (; c >= 2; c -= 2) {
84       float vacc0p0 = w[0];
85       float vacc1p0 = w[1];
86 
87 
88       const float vi0x0 = i0[0];
89       const float vi0x1 = i0[1];
90       i0 += 2;
91 
92       const float vk0x0 = w[2];
93       vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
94       const float vk0x1 = w[3];
95       vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
96 
97       const float vi1x0 = i1[0];
98       const float vi1x1 = i1[1];
99       i1 += 2;
100 
101       const float vk1x0 = w[4];
102       float vacc0p1 = vi1x0 * vk1x0;
103       const float vk1x1 = w[5];
104       float vacc1p1 = vi1x1 * vk1x1;
105 
106       const float vi2x0 = i2[0];
107       const float vi2x1 = i2[1];
108       i2 += 2;
109 
110       const float vk2x0 = w[6];
111       vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
112       const float vk2x1 = w[7];
113       vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
114 
115       const float vi3x0 = i3[0];
116       const float vi3x1 = i3[1];
117       i3 += 2;
118 
119       const float vk3x0 = w[8];
120       vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
121       const float vk3x1 = w[9];
122       vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
123 
124       const float vi4x0 = i4[0];
125       const float vi4x1 = i4[1];
126       i4 += 2;
127 
128       const float vk4x0 = w[10];
129       vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
130       const float vk4x1 = w[11];
131       vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
132 
133       const float vi5x0 = i5[0];
134       const float vi5x1 = i5[1];
135       i5 += 2;
136 
137       const float vk5x0 = w[12];
138       vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
139       const float vk5x1 = w[13];
140       vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
141 
142       const float vi6x0 = i6[0];
143       const float vi6x1 = i6[1];
144       i6 += 2;
145 
146       const float vk6x0 = w[14];
147       vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
148       const float vk6x1 = w[15];
149       vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
150 
151       const float vi7x0 = i7[0];
152       const float vi7x1 = i7[1];
153       i7 += 2;
154 
155       const float vk7x0 = w[16];
156       vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
157       const float vk7x1 = w[17];
158       vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
159 
160       const float vi8x0 = i8[0];
161       const float vi8x1 = i8[1];
162       i8 += 2;
163 
164       const float vk8x0 = w[18];
165       vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
166       const float vk8x1 = w[19];
167       vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
168 
169       w += 20;
170 
171       // Add up all accumulators to vacc01p0
172       vacc0p0 = vacc0p0 + vacc0p1;
173       vacc1p0 = vacc1p0 + vacc1p1;
174 
175       float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
176       float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
177 
178       vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
179       vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
180 
181       output[0] = vacc0;
182       output[1] = vacc1;
183       output += 2;
184     }
185     for (; c >= 1; c -= 1) {
186       float vacc0p0 = *w++;
187 
188       const float vi0 = *i0++;
189       const float vk0 = w[1];
190       vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
191       const float vi1 = *i1++;
192       const float vk1 = w[3];
193       float vacc0p1 = vi1 * vk1;
194       const float vi2 = *i2++;
195       const float vk2 = w[5];
196       vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
197       const float vi3 = *i3++;
198       const float vk3 = w[7];
199       vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
200       const float vi4 = *i4++;
201       const float vk4 = w[9];
202       vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
203       const float vi5 = *i5++;
204       const float vk5 = w[11];
205       vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
206       const float vi6 = *i6++;
207       const float vk6 = w[13];
208       vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
209       const float vi7 = *i7++;
210       const float vk7 = w[15];
211       vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
212       const float vi8 = *i8++;
213       const float vk8 = w[17];
214       vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
215 
216       // Add up all accumulators to vacc01p0
217       vacc0p0 = vacc0p0 + vacc0p1;
218 
219       float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
220       vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
221       *output++ = vacc0;
222     }
223 
224     output = (float*) ((uintptr_t) output + output_increment);
225   } while (--output_width != 0);
226 }
227