1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-scalar.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xnnpack/dwconv.h>
13 #include <xnnpack/math.h>
14 #include <xnnpack/unaligned.h>
15
16
xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic(
18 size_t channels,
19 size_t output_width,
20 const int8_t** input,
21 const void* weights,
22 int8_t* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const int8_t* zero,
27 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
33 const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
34 const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
35 const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
36 do {
37 const int8_t* i0 = input[0];
38 assert(i0 != NULL);
39 if XNN_UNPREDICTABLE(i0 != zero) {
40 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
41 }
42 const int8_t* i1 = input[1];
43 assert(i1 != NULL);
44 if XNN_UNPREDICTABLE(i1 != zero) {
45 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
46 }
47 const int8_t* i2 = input[2];
48 assert(i2 != NULL);
49 if XNN_UNPREDICTABLE(i2 != zero) {
50 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
51 }
52 const int8_t* i3 = input[3];
53 assert(i3 != NULL);
54 if XNN_UNPREDICTABLE(i3 != zero) {
55 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
56 }
57 const int8_t* i4 = input[4];
58 assert(i4 != NULL);
59 if XNN_UNPREDICTABLE(i4 != zero) {
60 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
61 }
62 const int8_t* i5 = input[5];
63 assert(i5 != NULL);
64 if XNN_UNPREDICTABLE(i5 != zero) {
65 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
66 }
67 const int8_t* i6 = input[6];
68 assert(i6 != NULL);
69 if XNN_UNPREDICTABLE(i6 != zero) {
70 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
71 }
72 const int8_t* i7 = input[7];
73 assert(i7 != NULL);
74 if XNN_UNPREDICTABLE(i7 != zero) {
75 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
76 }
77 const int8_t* i8 = input[8];
78 assert(i8 != NULL);
79 if XNN_UNPREDICTABLE(i8 != zero) {
80 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
81 }
82 input = (const int8_t**) ((uintptr_t) input + input_stride);
83
84 size_t c = channels;
85 const void* w = weights;
86 for (; c >= 2; c -= 2) {
87 int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
88 int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
89
90
91 const int32_t vi0x0 = (int32_t) i0[0];
92 const int32_t vi0x1 = (int32_t) i0[1];
93 i0 += 2;
94
95 const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
96 const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
97
98 vacc0 += vi0x0 * vk0x0;
99 vacc1 += vi0x1 * vk0x1;
100
101 const int32_t vi1x0 = (int32_t) i1[0];
102 const int32_t vi1x1 = (int32_t) i1[1];
103 i1 += 2;
104
105 const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
106 const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
107
108 vacc0 += vi1x0 * vk1x0;
109 vacc1 += vi1x1 * vk1x1;
110
111 const int32_t vi2x0 = (int32_t) i2[0];
112 const int32_t vi2x1 = (int32_t) i2[1];
113 i2 += 2;
114
115 const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
116 const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
117
118 vacc0 += vi2x0 * vk2x0;
119 vacc1 += vi2x1 * vk2x1;
120
121 const int32_t vi3x0 = (int32_t) i3[0];
122 const int32_t vi3x1 = (int32_t) i3[1];
123 i3 += 2;
124
125 const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
126 const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
127
128 vacc0 += vi3x0 * vk3x0;
129 vacc1 += vi3x1 * vk3x1;
130
131 const int32_t vi4x0 = (int32_t) i4[0];
132 const int32_t vi4x1 = (int32_t) i4[1];
133 i4 += 2;
134
135 const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
136 const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
137
138 vacc0 += vi4x0 * vk4x0;
139 vacc1 += vi4x1 * vk4x1;
140
141 const int32_t vi5x0 = (int32_t) i5[0];
142 const int32_t vi5x1 = (int32_t) i5[1];
143 i5 += 2;
144
145 const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
146 const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
147
148 vacc0 += vi5x0 * vk5x0;
149 vacc1 += vi5x1 * vk5x1;
150
151 const int32_t vi6x0 = (int32_t) i6[0];
152 const int32_t vi6x1 = (int32_t) i6[1];
153 i6 += 2;
154
155 const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
156 const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
157
158 vacc0 += vi6x0 * vk6x0;
159 vacc1 += vi6x1 * vk6x1;
160
161 const int32_t vi7x0 = (int32_t) i7[0];
162 const int32_t vi7x1 = (int32_t) i7[1];
163 i7 += 2;
164
165 const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
166 const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
167
168 vacc0 += vi7x0 * vk7x0;
169 vacc1 += vi7x1 * vk7x1;
170
171 const int32_t vi8x0 = (int32_t) i8[0];
172 const int32_t vi8x1 = (int32_t) i8[1];
173 i8 += 2;
174
175 const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
176 const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
177
178 vacc0 += vi8x0 * vk8x0;
179 vacc1 += vi8x1 * vk8x1;
180
181 w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
182
183 float vfpacc0 = (float) vacc0;
184 float vfpacc1 = (float) vacc1;
185
186 const float vscale0 = unaligned_indexed_load_f32(w, 0);
187 const float vscale1 = unaligned_indexed_load_f32(w, 1);
188 w = (const void*) ((const float*) w + 2);
189
190 vfpacc0 *= vscale0;
191 vfpacc1 *= vscale1;
192
193 vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
194 vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
195
196 vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
197 vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
198
199 vfpacc0 += vmagic_bias;
200 vfpacc1 += vmagic_bias;
201
202 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
203 int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
204
205 output[0] = (int8_t) vout0;
206 output[1] = (int8_t) vout1;
207 output += 2;
208 }
209 if XNN_UNLIKELY(c != 0) {
210 int32_t vacc = unaligned_load_s32(w);
211
212 const int32_t vi0 = (int32_t) *i0;
213 const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
214 vacc += vi0 * vk0;
215 const int32_t vi1 = (int32_t) *i1;
216 const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
217 vacc += vi1 * vk1;
218 const int32_t vi2 = (int32_t) *i2;
219 const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
220 vacc += vi2 * vk2;
221 const int32_t vi3 = (int32_t) *i3;
222 const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
223 vacc += vi3 * vk3;
224 const int32_t vi4 = (int32_t) *i4;
225 const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
226 vacc += vi4 * vk4;
227 const int32_t vi5 = (int32_t) *i5;
228 const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
229 vacc += vi5 * vk5;
230 const int32_t vi6 = (int32_t) *i6;
231 const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
232 vacc += vi6 * vk6;
233 const int32_t vi7 = (int32_t) *i7;
234 const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
235 vacc += vi7 * vk7;
236 const int32_t vi8 = (int32_t) *i8;
237 const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
238 vacc += vi8 * vk8;
239
240 typedef XNN_UNALIGNED float unaligned_float;
241 const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
242 float vfpacc = (float) vacc * vscale;
243
244 vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
245 vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
246 vfpacc += vmagic_bias;
247 int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
248
249 *output++ = (int8_t) vout;
250 }
251
252 output = (int8_t*) ((uintptr_t) output + output_increment);
253 } while (--output_width != 0);
254 }
255