1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <math.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15 #include <string.h>
16
17 #include <fp16.h>
18
19 #include <xnnpack.h>
20 #include <xnnpack/allocator.h>
21 #include <xnnpack/common.h>
22 #include <xnnpack/compute.h>
23 #include <xnnpack/indirection.h>
24 #include <xnnpack/log.h>
25 #include <xnnpack/math.h>
26 #include <xnnpack/operator.h>
27 #include <xnnpack/pack.h>
28 #include <xnnpack/params.h>
29 #include <xnnpack/params-init.h>
30
31
compute_output_dimension(size_t padded_input_dimension,size_t kernel_dimension,size_t dilation_dimension,size_t subsampling_dimension)32 static inline size_t compute_output_dimension(
33 size_t padded_input_dimension,
34 size_t kernel_dimension,
35 size_t dilation_dimension,
36 size_t subsampling_dimension)
37 {
38 const size_t effective_kernel_dimension = (kernel_dimension - 1) * dilation_dimension + 1;
39 return doz(padded_input_dimension, effective_kernel_dimension) / subsampling_dimension + 1;
40 }
41
compute_output_dimension_with_tf_same_padding(size_t input_dimension,size_t subsampling_dimension)42 static inline size_t compute_output_dimension_with_tf_same_padding(
43 size_t input_dimension,
44 size_t subsampling_dimension)
45 {
46 return divide_round_up(input_dimension, subsampling_dimension);
47 }
48
find_dwconv_ukernel(size_t kernel_size,const struct dwconv_parameters * ukernel,size_t num_ukernels)49 static inline const struct dwconv_parameters* find_dwconv_ukernel(
50 size_t kernel_size,
51 const struct dwconv_parameters* ukernel,
52 size_t num_ukernels)
53 {
54 while (num_ukernels-- != 0) {
55 if (ukernel->primary_tile == kernel_size) {
56 return ukernel;
57 }
58 ukernel++;
59 }
60 return NULL;
61 }
62
create_convolution2d_nhwc(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,const void * kernel,const void * bias,uint32_t flags,uint32_t log2_input_element_size,uint32_t log2_filter_element_size,uint32_t bias_element_size,xnn_pack_vmulcaddc_w_function pack_vmulcaddc_w,xnn_pack_dwconv_hwg_w_function pack_dwconv_hwg_w,xnn_pack_dwconv_ghw_w_function pack_dwconv_ghw_w,xnn_pack_gemm_goi_w_function pack_gemm_goi_w,xnn_pack_conv_kgo_w_function pack_conv_kgo_w,xnn_pack_conv_goki_w_function pack_conv_goki_w,const void * packing_params,int input_padding_byte,int packed_weights_padding_byte,size_t extra_weights_bytes,xnn_init_qc8_scale_params_fn init_scale_params,const float * scale_params,const void * gemm_params,size_t gemm_params_size,const void * dwconv_params,size_t dwconv_params_size,const void * vmulcaddc_params,size_t vmulcaddc_params_size,const struct gemm_parameters * gemm_parameters,const struct dwconv_parameters * dwconv_ukernel,const struct vmulcaddc_parameters * vmulcaddc_parameters,struct jit_gemm_params * jit_gemm_params,bool linear_activation,bool relu_activation,uint32_t datatype_init_flags,enum xnn_operator_type operator_type,xnn_operator_t * convolution_op_out)63 static enum xnn_status create_convolution2d_nhwc(
64 uint32_t input_padding_top,
65 uint32_t input_padding_right,
66 uint32_t input_padding_bottom,
67 uint32_t input_padding_left,
68 uint32_t kernel_height,
69 uint32_t kernel_width,
70 uint32_t subsampling_height,
71 uint32_t subsampling_width,
72 uint32_t dilation_height,
73 uint32_t dilation_width,
74 uint32_t groups,
75 size_t group_input_channels,
76 size_t group_output_channels,
77 size_t input_channel_stride,
78 size_t output_channel_stride,
79 const void* kernel,
80 const void* bias,
81 uint32_t flags,
82 uint32_t log2_input_element_size,
83 uint32_t log2_filter_element_size,
84 uint32_t bias_element_size,
85 xnn_pack_vmulcaddc_w_function pack_vmulcaddc_w,
86 xnn_pack_dwconv_hwg_w_function pack_dwconv_hwg_w,
87 xnn_pack_dwconv_ghw_w_function pack_dwconv_ghw_w,
88 xnn_pack_gemm_goi_w_function pack_gemm_goi_w,
89 xnn_pack_conv_kgo_w_function pack_conv_kgo_w,
90 xnn_pack_conv_goki_w_function pack_conv_goki_w,
91 const void* packing_params,
92 int input_padding_byte,
93 int packed_weights_padding_byte,
94 size_t extra_weights_bytes,
95 xnn_init_qc8_scale_params_fn init_scale_params,
96 const float* scale_params,
97 const void* gemm_params,
98 size_t gemm_params_size,
99 const void* dwconv_params,
100 size_t dwconv_params_size,
101 const void* vmulcaddc_params,
102 size_t vmulcaddc_params_size,
103 const struct gemm_parameters* gemm_parameters,
104 const struct dwconv_parameters* dwconv_ukernel,
105 const struct vmulcaddc_parameters* vmulcaddc_parameters,
106 struct jit_gemm_params* jit_gemm_params,
107 bool linear_activation,
108 bool relu_activation,
109 uint32_t datatype_init_flags,
110 enum xnn_operator_type operator_type,
111 xnn_operator_t* convolution_op_out)
112 {
113 xnn_operator_t convolution_op = NULL;
114 enum xnn_status status = xnn_status_uninitialized;
115
116 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
117 xnn_log_error(
118 "failed to create %s operator: XNNPACK is not initialized",
119 xnn_operator_type_to_string(operator_type));
120 goto error;
121 }
122
123 status = xnn_status_unsupported_hardware;
124
125 if ((xnn_params.init_flags & datatype_init_flags) != datatype_init_flags) {
126 xnn_log_error(
127 "failed to create %s operator: operations on data type are not supported",
128 xnn_operator_type_to_string(operator_type));
129 goto error;
130 }
131
132 status = xnn_status_invalid_parameter;
133
134 if (kernel_width == 0 || kernel_height == 0) {
135 xnn_log_error(
136 "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
137 xnn_operator_type_to_string(operator_type), kernel_width, kernel_height);
138 goto error;
139 }
140
141 if (subsampling_width == 0 || subsampling_height == 0) {
142 xnn_log_error(
143 "failed to create %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
144 xnn_operator_type_to_string(operator_type), subsampling_width, subsampling_height);
145 goto error;
146 }
147
148 if (dilation_width == 0 || dilation_height == 0) {
149 xnn_log_error(
150 "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
151 xnn_operator_type_to_string(operator_type), dilation_width, dilation_height);
152 goto error;
153 }
154
155 if (groups == 0) {
156 xnn_log_error(
157 "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
158 xnn_operator_type_to_string(operator_type), groups);
159 goto error;
160 }
161
162 if (group_input_channels == 0) {
163 xnn_log_error(
164 "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
165 xnn_operator_type_to_string(operator_type), group_input_channels);
166 goto error;
167 }
168
169 if (group_output_channels == 0) {
170 xnn_log_error(
171 "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
172 xnn_operator_type_to_string(operator_type), group_output_channels);
173 goto error;
174 }
175
176 const size_t input_channels = groups * group_input_channels;
177 if (input_channel_stride < input_channels) {
178 xnn_log_error(
179 "failed to create %s operator with input channel stride of %zu: "
180 "stride must be at least as large as the number of input channels (%" PRIu32 "x%zu)",
181 xnn_operator_type_to_string(operator_type),
182 input_channel_stride, groups, group_input_channels);
183 goto error;
184 }
185
186 const size_t output_channels = groups * group_output_channels;
187 if (output_channel_stride < output_channels) {
188 xnn_log_error(
189 "failed to create %s operator with output channel stride of %zu: "
190 "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
191 xnn_operator_type_to_string(operator_type),
192 output_channel_stride, groups, group_output_channels);
193 goto error;
194 }
195
196 if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
197 xnn_log_error(
198 "failed to create depthwise %s operator with %zu input channels per group: "
199 "depthwise convolution must have exactly 1 input channel per group",
200 xnn_operator_type_to_string(operator_type), group_input_channels);
201 goto error;
202 }
203
204 const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
205 if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
206 if (any_padding) {
207 xnn_log_error(
208 "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
209 "TensorFlow SAME padding can't be combined with explicit padding specification",
210 xnn_operator_type_to_string(operator_type),
211 input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
212 goto error;
213 }
214 }
215
216 status = xnn_status_out_of_memory;
217
218 convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
219 if (convolution_op == NULL) {
220 xnn_log_error(
221 "failed to allocate %zu bytes for %s operator descriptor",
222 sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
223 goto error;
224 }
225
226 const size_t kernel_size = kernel_height * kernel_width;
227
228 enum xnn_ukernel_type ukernel_type = xnn_ukernel_type_default;
229 const bool unit_subsampling = (subsampling_width | subsampling_height) == 1;
230 if (group_input_channels == 1 && group_output_channels == 1 && kernel_size == 1 && unit_subsampling && !any_padding && vmulcaddc_parameters != NULL) {
231 ukernel_type = xnn_ukernel_type_vmulcaddc;
232 } else if (group_input_channels == 1 && group_output_channels == 1 && dwconv_ukernel != NULL)
233 {
234 ukernel_type = xnn_ukernel_type_dwconv;
235 } else if (kernel_size == 1 && unit_subsampling && !any_padding) {
236 ukernel_type = xnn_ukernel_type_gemm;
237 } else {
238 ukernel_type = xnn_ukernel_type_igemm;
239 }
240 assert(ukernel_type != xnn_ukernel_type_default);
241
242 size_t zero_size = 0;
243 switch (ukernel_type) {
244 case xnn_ukernel_type_vmulcaddc:
245 {
246 assert(vmulcaddc_parameters != NULL);
247 assert(vmulcaddc_params != NULL);
248
249 const size_t c_stride = round_up_po2(groups, vmulcaddc_parameters->channel_tile);
250 const size_t packed_weights_size = ((UINT32_C(1) << log2_filter_element_size) + bias_element_size) * c_stride;
251 convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
252 if (convolution_op->packed_weights == NULL) {
253 xnn_log_error(
254 "failed to allocate %zu bytes for %s operator packed weights",
255 packed_weights_size, xnn_operator_type_to_string(operator_type));
256 goto error;
257 }
258
259 pack_vmulcaddc_w(
260 groups, vmulcaddc_parameters->channel_tile,
261 kernel, bias, convolution_op->packed_weights, packing_params);
262
263 memcpy(&convolution_op->params, vmulcaddc_params, vmulcaddc_params_size);
264
265 convolution_op->ukernel.vmulcaddc = (struct xnn_ukernel_vmulcaddc) {
266 .function = vmulcaddc_parameters->ukernel,
267 .mr = vmulcaddc_parameters->row_tile,
268 };
269 break;
270 }
271 case xnn_ukernel_type_dwconv:
272 {
273 assert(dwconv_ukernel != NULL);
274 assert(dwconv_ukernel->primary_tile == kernel_size);
275
276 const size_t c_stride = round_up_po2(groups, dwconv_ukernel->channel_tile);
277 const size_t packed_weights_size = ((kernel_size << log2_filter_element_size) + bias_element_size + extra_weights_bytes) * c_stride;
278 convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
279 if (convolution_op->packed_weights == NULL) {
280 xnn_log_error(
281 "failed to allocate %zu bytes for %s operator packed weights",
282 packed_weights_size, xnn_operator_type_to_string(operator_type));
283 goto error;
284 }
285 memset(convolution_op->packed_weights, packed_weights_padding_byte, packed_weights_size);
286 memcpy(&convolution_op->params, dwconv_params, dwconv_params_size);
287
288 if (flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) {
289 pack_dwconv_hwg_w(
290 kernel_height, kernel_width,
291 groups, dwconv_ukernel->channel_tile,
292 kernel, bias, convolution_op->packed_weights,
293 dwconv_ukernel->channel_tile * extra_weights_bytes,
294 packing_params);
295 } else {
296 pack_dwconv_ghw_w(
297 kernel_height, kernel_width,
298 groups, dwconv_ukernel->channel_tile,
299 kernel, bias, convolution_op->packed_weights,
300 dwconv_ukernel->channel_tile * extra_weights_bytes,
301 packing_params);
302 }
303
304 if (scale_params != NULL) {
305 assert(init_scale_params != NULL);
306
307 init_scale_params(
308 groups, dwconv_ukernel->channel_tile,
309 dwconv_ukernel->channel_tile * ((kernel_size << log2_filter_element_size) + bias_element_size + extra_weights_bytes),
310 scale_params,
311 (void*) ((uintptr_t) convolution_op->packed_weights + dwconv_ukernel->channel_tile * ((kernel_size << log2_filter_element_size) + bias_element_size)));
312 }
313
314 const union dwconv_fused_ukernels* ukernels = &dwconv_ukernel->minmax;
315 if (linear_activation && dwconv_ukernel->linear.unipass != NULL) {
316 ukernels = &dwconv_ukernel->linear;
317 }
318 convolution_op->ukernel.dwconv = (struct xnn_ukernel_dwconv) {
319 .unipass_function = ukernels->unipass,
320 .primary_tile = dwconv_ukernel->primary_tile,
321 .incremental_tile = dwconv_ukernel->incremental_tile,
322 };
323
324 zero_size = XNN_EXTRA_BYTES + (c_stride << log2_input_element_size);
325 break;
326 }
327 case xnn_ukernel_type_gemm:
328 case xnn_ukernel_type_igemm:
329 {
330 const uint32_t nr = gemm_parameters->nr;
331 const uint32_t kr = UINT32_C(1) << gemm_parameters->log2_kr;
332 const uint32_t sr = UINT32_C(1) << gemm_parameters->log2_sr;
333 const size_t n_stride = round_up(group_output_channels, nr);
334 const size_t k_stride = round_up_po2(group_input_channels, kr * sr);
335
336 const size_t packed_group_weights_size = ((kernel_size * k_stride << log2_filter_element_size) + bias_element_size + extra_weights_bytes) * n_stride;
337 convolution_op->packed_weights = xnn_allocate_simd_memory(packed_group_weights_size * groups);
338 if (convolution_op->packed_weights == NULL) {
339 xnn_log_error(
340 "failed to allocate %zu bytes for %s operator packed weights",
341 packed_group_weights_size * groups, xnn_operator_type_to_string(operator_type));
342 goto error;
343 }
344 memset(convolution_op->packed_weights, packed_weights_padding_byte, packed_group_weights_size * groups);
345 memcpy(&convolution_op->params, gemm_params, gemm_params_size);
346
347 const struct gemm_fused_ukernels* gemm_ukernels = &gemm_parameters->minmax;
348 if (linear_activation && gemm_parameters->linear.gemm.function[XNN_UARCH_DEFAULT] != NULL) {
349 gemm_ukernels = &gemm_parameters->linear;
350 } else if (relu_activation && gemm_parameters->relu.gemm.function[XNN_UARCH_DEFAULT] != NULL) {
351 gemm_ukernels = &gemm_parameters->relu;
352 }
353 switch (ukernel_type) {
354 case xnn_ukernel_type_gemm:
355 pack_gemm_goi_w(
356 groups, group_output_channels, group_input_channels,
357 nr, kr, sr,
358 kernel, bias, convolution_op->packed_weights, gemm_parameters->nr * extra_weights_bytes, packing_params);
359 convolution_op->ukernel.gemm = (struct xnn_ukernel_gemm) {
360 .mr = gemm_parameters->mr,
361 .nr = nr,
362 .kr = kr,
363 .sr = sr,
364 .general_case = gemm_ukernels->gemm,
365 .mr1_case = gemm_ukernels->gemm1,
366 };
367
368 #if XNN_PLATFORM_JIT
369 if (gemm_parameters->generator.gemm.function[XNN_UARCH_DEFAULT] != NULL) {
370 struct xnn_code_buffer* code_buffer = &convolution_op->ukernel.gemm.general_code_buffer;
371 if (xnn_status_success != xnn_allocate_code_memory(code_buffer, XNN_DEFAULT_CODE_BUFFER_SIZE)) {
372 break;
373 };
374 const xnn_jit_gemm_code_generator_function gen = gemm_parameters->generator.gemm.function[XNN_UARCH_DEFAULT];
375 if (xnn_status_success == gen(code_buffer, 0, 0, (void*) jit_gemm_params)) {
376 const xnn_gemm_ukernel_function generated_gemm = (xnn_gemm_ukernel_function)code_buffer->code;
377 convolution_op->ukernel.gemm.general_case.function[XNN_UARCH_DEFAULT] = generated_gemm;
378 } else {
379 xnn_release_code_memory(code_buffer);
380 }
381 }
382 #endif // XNN_PLATFORM_JIT
383
384 break;
385 case xnn_ukernel_type_igemm:
386 if (flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) {
387 pack_conv_kgo_w(
388 groups, group_output_channels, kernel_size,
389 nr, kr, sr,
390 kernel, bias, convolution_op->packed_weights, gemm_parameters->nr * extra_weights_bytes, packing_params);
391 } else {
392 pack_conv_goki_w(
393 groups, group_output_channels, kernel_size, group_input_channels,
394 nr, kr, sr,
395 kernel, bias, convolution_op->packed_weights, gemm_parameters->nr * extra_weights_bytes, packing_params);
396 }
397 convolution_op->ukernel.igemm = (struct xnn_ukernel_igemm) {
398 .mr = gemm_parameters->mr,
399 .nr = nr,
400 .kr = kr,
401 .sr = sr,
402 .general_case = gemm_ukernels->igemm,
403 .mr1_case = gemm_ukernels->igemm1,
404 };
405 break;
406 default:
407 XNN_UNREACHABLE;
408 }
409
410 if (scale_params != NULL) {
411 assert(init_scale_params != NULL);
412
413 void* group_weights = (void*)
414 ((uintptr_t) convolution_op->packed_weights + gemm_parameters->nr * ((kernel_size * k_stride << log2_filter_element_size) + bias_element_size));
415 const size_t weights_stride = (kernel_size * k_stride << log2_filter_element_size) + bias_element_size + extra_weights_bytes;
416 for (uint32_t group = 0; group < groups; group++) {
417 init_scale_params(
418 group_output_channels, gemm_parameters->nr,
419 gemm_parameters->nr * weights_stride,
420 scale_params, group_weights);
421 scale_params += group_output_channels;
422 group_weights = (void*) ((uintptr_t) group_weights + n_stride * weights_stride);
423 }
424 }
425
426 zero_size = XNN_EXTRA_BYTES + (k_stride << log2_input_element_size);
427 break;
428 }
429 default:
430 XNN_UNREACHABLE;
431 }
432
433 const bool tf_same_padding = (flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0 && kernel_size != 1;
434 if (any_padding || tf_same_padding) {
435 convolution_op->zero_buffer = xnn_allocate_simd_memory(zero_size);
436 if (convolution_op->zero_buffer == NULL) {
437 xnn_log_error(
438 "failed to allocate %zu bytes for %s operator zero padding",
439 zero_size, xnn_operator_type_to_string(operator_type));
440 goto error;
441 }
442 memset(convolution_op->zero_buffer, input_padding_byte, zero_size);
443 }
444
445 convolution_op->padding_top = input_padding_top;
446 convolution_op->padding_right = input_padding_right;
447 convolution_op->padding_bottom = input_padding_bottom;
448 convolution_op->padding_left = input_padding_left;
449
450 convolution_op->kernel_height = kernel_height;
451 convolution_op->kernel_width = kernel_width;
452 convolution_op->stride_height = subsampling_height;
453 convolution_op->stride_width = subsampling_width;
454 convolution_op->dilation_height = dilation_height;
455 convolution_op->dilation_width = dilation_width;
456 convolution_op->groups = groups;
457 convolution_op->group_input_channels = group_input_channels;
458 convolution_op->group_output_channels = group_output_channels;
459 convolution_op->input_pixel_stride = input_channel_stride;
460 convolution_op->output_pixel_stride = output_channel_stride;
461
462 convolution_op->type = operator_type;
463 convolution_op->ukernel.type = ukernel_type;
464 convolution_op->flags = flags & ~XNN_FLAG_TENSORFLOW_SAME_PADDING;
465 if (tf_same_padding) {
466 convolution_op->flags |= XNN_FLAG_TENSORFLOW_SAME_PADDING;
467 }
468
469 convolution_op->state = xnn_run_state_invalid;
470
471 *convolution_op_out = convolution_op;
472 return xnn_status_success;
473
474 error:
475 xnn_delete_operator(convolution_op);
476 return status;
477 }
478
xnn_create_convolution2d_nhwc_qu8(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,uint8_t input_zero_point,float input_scale,uint8_t kernel_zero_point,float kernel_scale,const uint8_t * kernel,const int32_t * bias,uint8_t output_zero_point,float output_scale,uint8_t output_min,uint8_t output_max,uint32_t flags,xnn_operator_t * convolution_op_out)479 enum xnn_status xnn_create_convolution2d_nhwc_qu8(
480 uint32_t input_padding_top,
481 uint32_t input_padding_right,
482 uint32_t input_padding_bottom,
483 uint32_t input_padding_left,
484 uint32_t kernel_height,
485 uint32_t kernel_width,
486 uint32_t subsampling_height,
487 uint32_t subsampling_width,
488 uint32_t dilation_height,
489 uint32_t dilation_width,
490 uint32_t groups,
491 size_t group_input_channels,
492 size_t group_output_channels,
493 size_t input_channel_stride,
494 size_t output_channel_stride,
495 uint8_t input_zero_point,
496 float input_scale,
497 uint8_t kernel_zero_point,
498 float kernel_scale,
499 const uint8_t* kernel,
500 const int32_t* bias,
501 uint8_t output_zero_point,
502 float output_scale,
503 uint8_t output_min,
504 uint8_t output_max,
505 uint32_t flags,
506 xnn_operator_t* convolution_op_out)
507 {
508 if (input_scale <= 0.0f || !isnormal(input_scale)) {
509 xnn_log_error(
510 "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
511 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8), input_scale);
512 return xnn_status_invalid_parameter;
513 }
514
515 if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
516 xnn_log_error(
517 "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
518 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8), kernel_scale);
519 return xnn_status_invalid_parameter;
520 }
521
522 if (output_scale <= 0.0f || !isnormal(output_scale)) {
523 xnn_log_error(
524 "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
525 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8), output_scale);
526 return xnn_status_invalid_parameter;
527 }
528
529 if (output_min >= output_max) {
530 xnn_log_error(
531 "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
532 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8), output_min, output_max);
533 return xnn_status_invalid_parameter;
534 }
535
536 const float requantization_scale = input_scale * kernel_scale / output_scale;
537 if (requantization_scale >= 256.0f) {
538 xnn_log_error(
539 "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
540 "requantization scale %.7g is greater or equal to 256.0",
541 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8),
542 input_scale, kernel_scale, output_scale, requantization_scale);
543 return xnn_status_unsupported_parameter;
544 }
545
546 const struct xnn_qu8_packing_params packing_params = {
547 .input_zero_point = input_zero_point,
548 .kernel_zero_point = kernel_zero_point,
549 };
550
551
552 union xnn_qu8_conv_minmax_params gemm_params;
553 if XNN_LIKELY(xnn_params.qu8.gemm.init.qu8 != NULL) {
554 xnn_params.qu8.gemm.init.qu8(&gemm_params,
555 kernel_zero_point, requantization_scale, output_zero_point, output_min, output_max);
556 }
557
558 union xnn_qu8_conv_minmax_params dwconv_params;
559 const struct dwconv_parameters* dwconv_ukernel =
560 find_dwconv_ukernel(kernel_height * kernel_width, xnn_params.qu8.dwconv, XNN_MAX_QU8_DWCONV_UKERNELS);
561 if XNN_LIKELY(dwconv_ukernel != NULL) {
562 dwconv_ukernel->init.qu8(&dwconv_params,
563 kernel_zero_point, requantization_scale, output_zero_point, output_min, output_max);
564 }
565
566 return create_convolution2d_nhwc(
567 input_padding_top, input_padding_right, input_padding_bottom, input_padding_left,
568 kernel_height, kernel_width,
569 subsampling_height, subsampling_width,
570 dilation_height, dilation_width,
571 groups, group_input_channels, group_output_channels,
572 input_channel_stride, output_channel_stride,
573 kernel, bias, flags,
574 0 /* log2(sizeof(input element)) = log2(sizeof(uint8_t)) */,
575 0 /* log2(sizeof(filter element)) = log2(sizeof(uint8_t)) */,
576 sizeof(int32_t) /* sizeof(bias element) */,
577 (xnn_pack_vmulcaddc_w_function) NULL,
578 (xnn_pack_dwconv_hwg_w_function) xnn_pack_qu8_dwconv_hwg_w,
579 (xnn_pack_dwconv_ghw_w_function) xnn_pack_qu8_dwconv_ghw_w,
580 (xnn_pack_gemm_goi_w_function) xnn_pack_qu8_gemm_goi_w,
581 (xnn_pack_conv_kgo_w_function) xnn_pack_qu8_conv_kgo_w,
582 (xnn_pack_conv_goki_w_function) xnn_pack_qu8_conv_goki_w,
583 &packing_params, input_zero_point /* input padding byte */, kernel_zero_point /* packed weights padding byte */,
584 0 /* extra weights bytes */, NULL /* init scale params fn */, NULL /* scale params */,
585 &gemm_params, sizeof(gemm_params),
586 &dwconv_params, sizeof(dwconv_params),
587 NULL /* vmulcaddc params */, 0,
588 &xnn_params.qu8.gemm, dwconv_ukernel, NULL /* vmulcaddc parameters */,
589 NULL /* jit_gemm_params */,
590 false /* linear activation */, false /* relu activation */, XNN_INIT_FLAG_QU8,
591 xnn_operator_type_convolution_nhwc_qu8,
592 convolution_op_out);
593 }
594
xnn_create_convolution2d_nhwc_qs8(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,int8_t input_zero_point,float input_scale,float kernel_scale,const int8_t * kernel,const int32_t * bias,int8_t output_zero_point,float output_scale,int8_t output_min,int8_t output_max,uint32_t flags,xnn_operator_t * convolution_op_out)595 enum xnn_status xnn_create_convolution2d_nhwc_qs8(
596 uint32_t input_padding_top,
597 uint32_t input_padding_right,
598 uint32_t input_padding_bottom,
599 uint32_t input_padding_left,
600 uint32_t kernel_height,
601 uint32_t kernel_width,
602 uint32_t subsampling_height,
603 uint32_t subsampling_width,
604 uint32_t dilation_height,
605 uint32_t dilation_width,
606 uint32_t groups,
607 size_t group_input_channels,
608 size_t group_output_channels,
609 size_t input_channel_stride,
610 size_t output_channel_stride,
611 int8_t input_zero_point,
612 float input_scale,
613 float kernel_scale,
614 const int8_t* kernel,
615 const int32_t* bias,
616 int8_t output_zero_point,
617 float output_scale,
618 int8_t output_min,
619 int8_t output_max,
620 uint32_t flags,
621 xnn_operator_t* convolution_op_out)
622 {
623 if (input_scale <= 0.0f || !isnormal(input_scale)) {
624 xnn_log_error(
625 "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
626 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8), input_scale);
627 return xnn_status_invalid_parameter;
628 }
629
630 if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
631 xnn_log_error(
632 "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
633 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8), kernel_scale);
634 return xnn_status_invalid_parameter;
635 }
636
637 if (output_scale <= 0.0f || !isnormal(output_scale)) {
638 xnn_log_error(
639 "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
640 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8), output_scale);
641 return xnn_status_invalid_parameter;
642 }
643
644 if (output_min >= output_max) {
645 xnn_log_error(
646 "failed to create %s operator with [%" PRId8 ", %" PRId8 "] output range: range min must be below range max",
647 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8), output_min, output_max);
648 return xnn_status_invalid_parameter;
649 }
650
651 const float requantization_scale = input_scale * kernel_scale / output_scale;
652 if (requantization_scale >= 256.0f) {
653 xnn_log_error(
654 "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
655 "requantization scale %.7g is greater or equal to 256.0",
656 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8),
657 input_scale, kernel_scale, output_scale, requantization_scale);
658 return xnn_status_unsupported_parameter;
659 }
660
661 const struct xnn_qs8_packing_params packing_params = { .input_zero_point = input_zero_point, };
662
663 union xnn_qs8_conv_minmax_params gemm_params;
664 if XNN_LIKELY(xnn_params.qs8.gemm.init.qs8 != NULL) {
665 xnn_params.qs8.gemm.init.qs8(&gemm_params,
666 requantization_scale, output_zero_point, output_min, output_max);
667 }
668
669 union xnn_qs8_conv_minmax_params dwconv_params;
670 const struct dwconv_parameters* dwconv_ukernel =
671 find_dwconv_ukernel(kernel_height * kernel_width, xnn_params.qs8.dwconv, XNN_MAX_QS8_DWCONV_UKERNELS);
672 if XNN_LIKELY(dwconv_ukernel != NULL) {
673 dwconv_ukernel->init.qs8(&dwconv_params,
674 requantization_scale, output_zero_point, output_min, output_max);
675 }
676
677 return create_convolution2d_nhwc(
678 input_padding_top, input_padding_right, input_padding_bottom, input_padding_left,
679 kernel_height, kernel_width,
680 subsampling_height, subsampling_width,
681 dilation_height, dilation_width,
682 groups, group_input_channels, group_output_channels,
683 input_channel_stride, output_channel_stride,
684 kernel, bias, flags,
685 0 /* log2(sizeof(input element)) = log2(sizeof(int8_t)) */,
686 0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
687 sizeof(int32_t) /* sizeof(bias element) */,
688 (xnn_pack_vmulcaddc_w_function) NULL,
689 (xnn_pack_dwconv_hwg_w_function) xnn_pack_qs8_dwconv_hwg_w,
690 (xnn_pack_dwconv_ghw_w_function) xnn_pack_qs8_dwconv_ghw_w,
691 (xnn_pack_gemm_goi_w_function) xnn_pack_qs8_gemm_goi_w,
692 (xnn_pack_conv_kgo_w_function) xnn_pack_qs8_conv_kgo_w,
693 (xnn_pack_conv_goki_w_function) xnn_pack_qs8_conv_goki_w,
694 &packing_params, input_zero_point /* input padding byte */, 0 /* packed weights padding byte */,
695 0 /* extra weights bytes */, NULL /* init scale params fn */, NULL /* scale params */,
696 &gemm_params, sizeof(gemm_params),
697 &dwconv_params, sizeof(dwconv_params),
698 NULL /* vmulcaddc params */, 0,
699 &xnn_params.qs8.gemm, dwconv_ukernel, NULL /* vmulcaddc parameters */,
700 NULL /* jit_gemm_params */,
701 false /* linear activation */, false /* relu activation */, XNN_INIT_FLAG_QS8,
702 xnn_operator_type_convolution_nhwc_qs8,
703 convolution_op_out);
704 }
705
xnn_create_convolution2d_nhwc_qc8(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,int8_t input_zero_point,float input_scale,const float * kernel_scale,const int8_t * kernel,const int32_t * bias,int8_t output_zero_point,float output_scale,int8_t output_min,int8_t output_max,uint32_t flags,xnn_operator_t * convolution_op_out)706 enum xnn_status xnn_create_convolution2d_nhwc_qc8(
707 uint32_t input_padding_top,
708 uint32_t input_padding_right,
709 uint32_t input_padding_bottom,
710 uint32_t input_padding_left,
711 uint32_t kernel_height,
712 uint32_t kernel_width,
713 uint32_t subsampling_height,
714 uint32_t subsampling_width,
715 uint32_t dilation_height,
716 uint32_t dilation_width,
717 uint32_t groups,
718 size_t group_input_channels,
719 size_t group_output_channels,
720 size_t input_channel_stride,
721 size_t output_channel_stride,
722 int8_t input_zero_point,
723 float input_scale,
724 const float* kernel_scale,
725 const int8_t* kernel,
726 const int32_t* bias,
727 int8_t output_zero_point,
728 float output_scale,
729 int8_t output_min,
730 int8_t output_max,
731 uint32_t flags,
732 xnn_operator_t* convolution_op_out)
733 {
734 if (input_scale <= 0.0f || !isnormal(input_scale)) {
735 xnn_log_error(
736 "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
737 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8), input_scale);
738 return xnn_status_invalid_parameter;
739 }
740
741 for (size_t output_channel = 0; output_channel < groups * group_output_channels; output_channel++) {
742 if (kernel_scale[output_channel] <= 0.0f || !isnormal(kernel_scale[output_channel])) {
743 xnn_log_error(
744 "failed to create %s operator with %.7g kernel scale in output channel #%zu: "
745 "scale must be finite, normalized, and positive",
746 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8), kernel_scale[output_channel],
747 output_channel);
748 return xnn_status_invalid_parameter;
749 }
750 }
751
752 if (output_scale <= 0.0f || !isnormal(output_scale)) {
753 xnn_log_error(
754 "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
755 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8), output_scale);
756 return xnn_status_invalid_parameter;
757 }
758
759 if (output_min >= output_max) {
760 xnn_log_error(
761 "failed to create %s operator with [%" PRId8 ", %" PRId8 "] output range: range min must be below range max",
762 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8), output_min, output_max);
763 return xnn_status_invalid_parameter;
764 }
765
766 float* requantization_scale = XNN_SIMD_ALLOCA(groups * group_output_channels * sizeof(float));
767 for (size_t output_channel = 0; output_channel < groups * group_output_channels; output_channel++) {
768 requantization_scale[output_channel] = input_scale * kernel_scale[output_channel] / output_scale;
769 if (requantization_scale[output_channel] >= 256.0f) {
770 xnn_log_error(
771 "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale in output channel #%zu: "
772 "requantization scale %.7g is greater or equal to 256.0",
773 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8),
774 input_scale, kernel_scale[output_channel], output_scale,
775 output_channel, requantization_scale[output_channel]);
776 return xnn_status_unsupported_parameter;
777 }
778 }
779
780 const struct xnn_qs8_packing_params packing_params = { .input_zero_point = input_zero_point, };
781
782 union xnn_qs8_minmax_params gemm_params;
783 if XNN_LIKELY(xnn_params.qc8.gemm.init.qc8 != NULL) {
784 xnn_params.qc8.gemm.init.qc8(&gemm_params,
785 output_zero_point, output_min, output_max);
786 }
787
788 union xnn_qs8_minmax_params dwconv_params;
789 const struct dwconv_parameters* dwconv_ukernel =
790 find_dwconv_ukernel(kernel_height * kernel_width, xnn_params.qc8.dwconv, XNN_MAX_QC8_DWCONV_UKERNELS);
791 if XNN_LIKELY(dwconv_ukernel != NULL) {
792 dwconv_ukernel->init.qc8(&dwconv_params,
793 output_zero_point, output_min, output_max);
794 }
795
796 return create_convolution2d_nhwc(
797 input_padding_top, input_padding_right, input_padding_bottom, input_padding_left,
798 kernel_height, kernel_width,
799 subsampling_height, subsampling_width,
800 dilation_height, dilation_width,
801 groups, group_input_channels, group_output_channels,
802 input_channel_stride, output_channel_stride,
803 kernel, bias, flags,
804 0 /* log2(sizeof(input element)) = log2(sizeof(int8_t)) */,
805 0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
806 sizeof(int32_t) /* sizeof(bias element) */,
807 (xnn_pack_vmulcaddc_w_function) NULL,
808 (xnn_pack_dwconv_hwg_w_function) xnn_pack_qs8_dwconv_hwg_w,
809 (xnn_pack_dwconv_ghw_w_function) xnn_pack_qs8_dwconv_ghw_w,
810 (xnn_pack_gemm_goi_w_function) xnn_pack_qs8_gemm_goi_w,
811 (xnn_pack_conv_kgo_w_function) xnn_pack_qs8_conv_kgo_w,
812 (xnn_pack_conv_goki_w_function) xnn_pack_qs8_conv_goki_w,
813 &packing_params, input_zero_point /* input padding byte */, 0 /* packed weights padding byte */,
814 sizeof(float) /* extra weights bytes */, xnn_init_qc8_scale_fp32_params, requantization_scale,
815 &gemm_params, sizeof(gemm_params),
816 &dwconv_params, sizeof(dwconv_params),
817 NULL /* vmulcaddc params */, 0,
818 &xnn_params.qc8.gemm, dwconv_ukernel, NULL /* vmulcaddc parameters */,
819 NULL /* jit_gemm_params */,
820 false /* linear activation */, false /* relu activation */, XNN_INIT_FLAG_QC8,
821 xnn_operator_type_convolution_nhwc_qc8,
822 convolution_op_out);
823 }
824
xnn_create_convolution2d_nhwc_f16(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,const void * kernel,const void * bias,float output_min,float output_max,uint32_t flags,xnn_operator_t * convolution_op_out)825 enum xnn_status xnn_create_convolution2d_nhwc_f16(
826 uint32_t input_padding_top,
827 uint32_t input_padding_right,
828 uint32_t input_padding_bottom,
829 uint32_t input_padding_left,
830 uint32_t kernel_height,
831 uint32_t kernel_width,
832 uint32_t subsampling_height,
833 uint32_t subsampling_width,
834 uint32_t dilation_height,
835 uint32_t dilation_width,
836 uint32_t groups,
837 size_t group_input_channels,
838 size_t group_output_channels,
839 size_t input_channel_stride,
840 size_t output_channel_stride,
841 const void* kernel,
842 const void* bias,
843 float output_min,
844 float output_max,
845 uint32_t flags,
846 xnn_operator_t* convolution_op_out)
847 {
848 if (isnan(output_min)) {
849 xnn_log_error(
850 "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
851 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f16));
852 return xnn_status_invalid_parameter;
853 }
854
855 if (isnan(output_max)) {
856 xnn_log_error(
857 "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
858 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f16));
859 return xnn_status_invalid_parameter;
860 }
861
862 const uint16_t fp16_output_min = fp16_ieee_from_fp32_value(output_min);
863 const uint16_t fp16_output_max = fp16_ieee_from_fp32_value(output_max);
864 const float rounded_output_min = fp16_ieee_to_fp32_value(fp16_output_min);
865 const float rounded_output_max = fp16_ieee_to_fp32_value(fp16_output_max);
866 if (rounded_output_min >= rounded_output_max) {
867 xnn_log_error(
868 "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
869 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f16), rounded_output_min, rounded_output_max);
870 return xnn_status_invalid_parameter;
871 }
872
873 union xnn_f16_scaleminmax_params gemm_params;
874 if XNN_LIKELY(xnn_params.f16.gemm.init.f16 != NULL) {
875 xnn_params.f16.gemm.init.f16(&gemm_params,
876 UINT16_C(0x3C00) /* 1.0 */, fp16_output_min, fp16_output_max);
877 }
878
879 union xnn_f16_minmax_params dwconv_params;
880 const struct dwconv_parameters* dwconv_ukernel =
881 find_dwconv_ukernel(kernel_height * kernel_width, xnn_params.f16.dwconv, XNN_MAX_F16_DWCONV_UKERNELS);
882 if XNN_LIKELY(dwconv_ukernel != NULL) {
883 dwconv_ukernel->init.f16(&dwconv_params, fp16_output_min, fp16_output_max);
884 }
885
886 union xnn_f16_minmax_params vmulcaddc_params;
887 if XNN_LIKELY(xnn_params.f16.vmulcaddc.init.f16 != NULL) {
888 xnn_params.f16.vmulcaddc.init.f16(&vmulcaddc_params, fp16_output_min, fp16_output_max);
889 }
890
891 xnn_pack_vmulcaddc_w_function pack_vmulcaddc_w = (xnn_pack_vmulcaddc_w_function) xnn_pack_f16_vmulcaddc_w;
892 xnn_pack_dwconv_hwg_w_function pack_dwconv_hwg_w = (xnn_pack_dwconv_hwg_w_function) xnn_pack_f16_dwconv_hwg_w;
893 xnn_pack_dwconv_ghw_w_function pack_dwconv_ghw_w = (xnn_pack_dwconv_ghw_w_function) xnn_pack_f16_dwconv_ghw_w;
894 xnn_pack_gemm_goi_w_function pack_gemm_goi_w = (xnn_pack_gemm_goi_w_function) xnn_pack_f16_gemm_goi_w;
895 xnn_pack_conv_kgo_w_function pack_conv_kgo_w = (xnn_pack_conv_kgo_w_function) xnn_pack_f16_conv_kgo_w;
896 xnn_pack_conv_goki_w_function pack_conv_goki_w = (xnn_pack_conv_goki_w_function) xnn_pack_f16_conv_goki_w;
897 if (flags & XNN_FLAG_FP32_STATIC_WEIGHTS) {
898 pack_vmulcaddc_w = (xnn_pack_vmulcaddc_w_function) xnn_pack_f32_to_f16_vmulcaddc_w;
899 pack_dwconv_hwg_w = (xnn_pack_dwconv_hwg_w_function) xnn_pack_f32_to_f16_dwconv_hwg_w;
900 pack_dwconv_ghw_w = (xnn_pack_dwconv_ghw_w_function) xnn_pack_f32_to_f16_dwconv_ghw_w;
901 pack_gemm_goi_w = (xnn_pack_gemm_goi_w_function) xnn_pack_f32_to_f16_gemm_goi_w;
902 pack_conv_kgo_w = (xnn_pack_conv_kgo_w_function) xnn_pack_f32_to_f16_conv_kgo_w;
903 pack_conv_goki_w = (xnn_pack_conv_goki_w_function) xnn_pack_f32_to_f16_conv_goki_w;
904 }
905
906 return create_convolution2d_nhwc(
907 input_padding_top, input_padding_right, input_padding_bottom, input_padding_left,
908 kernel_height, kernel_width,
909 subsampling_height, subsampling_width,
910 dilation_height, dilation_width,
911 groups, group_input_channels, group_output_channels,
912 input_channel_stride, output_channel_stride,
913 kernel, bias, flags,
914 1 /* log2(sizeof(input element)) = log2(sizeof(uint16_t)) */,
915 1 /* log2(sizeof(filter element)) = log2(sizeof(uint16_t)) */,
916 sizeof(uint16_t) /* sizeof(bias element) */,
917 pack_vmulcaddc_w,
918 pack_dwconv_hwg_w,
919 pack_dwconv_ghw_w,
920 pack_gemm_goi_w,
921 pack_conv_kgo_w,
922 pack_conv_goki_w,
923 NULL /* packing params */, 0 /* input padding byte */, 0 /* packed weights padding byte */,
924 0 /* extra weights bytes */, NULL /* init scale params fn */, NULL /* scale params */,
925 &gemm_params, sizeof(gemm_params),
926 &dwconv_params, sizeof(dwconv_params),
927 &vmulcaddc_params, sizeof(vmulcaddc_params),
928 &xnn_params.f16.gemm, dwconv_ukernel, &xnn_params.f16.vmulcaddc,
929 NULL /* jit_gemm_params */,
930 false /* linear activation */, false /* relu activation */, XNN_INIT_FLAG_F16,
931 xnn_operator_type_convolution_nhwc_f16,
932 convolution_op_out);
933 }
934
xnn_create_convolution2d_nhwc_f32(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,const float * kernel,const float * bias,float output_min,float output_max,uint32_t flags,xnn_operator_t * convolution_op_out)935 enum xnn_status xnn_create_convolution2d_nhwc_f32(
936 uint32_t input_padding_top,
937 uint32_t input_padding_right,
938 uint32_t input_padding_bottom,
939 uint32_t input_padding_left,
940 uint32_t kernel_height,
941 uint32_t kernel_width,
942 uint32_t subsampling_height,
943 uint32_t subsampling_width,
944 uint32_t dilation_height,
945 uint32_t dilation_width,
946 uint32_t groups,
947 size_t group_input_channels,
948 size_t group_output_channels,
949 size_t input_channel_stride,
950 size_t output_channel_stride,
951 const float* kernel,
952 const float* bias,
953 float output_min,
954 float output_max,
955 uint32_t flags,
956 xnn_operator_t* convolution_op_out)
957 {
958 if (isnan(output_min)) {
959 xnn_log_error(
960 "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
961 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
962 return xnn_status_invalid_parameter;
963 }
964
965 if (isnan(output_max)) {
966 xnn_log_error(
967 "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
968 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
969 return xnn_status_invalid_parameter;
970 }
971
972 if (output_min >= output_max) {
973 xnn_log_error(
974 "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
975 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), output_min, output_max);
976 return xnn_status_invalid_parameter;
977 }
978
979 const bool linear_activation = (output_max == INFINITY) && (output_min == -output_max);
980 const bool relu_activation = (output_max == INFINITY) && (output_min == 0.0f);
981
982 union xnn_f32_minmax_params gemm_params;
983 if XNN_LIKELY(xnn_params.f32.gemm.init.f32 != NULL) {
984 xnn_params.f32.gemm.init.f32(&gemm_params, output_min, output_max);
985 }
986
987 struct jit_gemm_params jit_gemm_params = {
988 .f32_minmax = {
989 .min = output_min,
990 .max = output_max
991 }
992 };
993
994 union xnn_f32_minmax_params dwconv_params;
995 const struct dwconv_parameters* dwconv_ukernel =
996 find_dwconv_ukernel(kernel_height * kernel_width, xnn_params.f32.dwconv, XNN_MAX_F32_DWCONV_UKERNELS);
997 if XNN_LIKELY(dwconv_ukernel != NULL) {
998 dwconv_ukernel->init.f32(&dwconv_params, output_min, output_max);
999 }
1000
1001 union xnn_f32_minmax_params vmulcaddc_params;
1002 if XNN_LIKELY(xnn_params.f32.vmulcaddc.init.f32 != NULL) {
1003 xnn_params.f32.vmulcaddc.init.f32(&vmulcaddc_params, output_min, output_max);
1004 }
1005
1006 return create_convolution2d_nhwc(
1007 input_padding_top, input_padding_right, input_padding_bottom, input_padding_left,
1008 kernel_height, kernel_width,
1009 subsampling_height, subsampling_width,
1010 dilation_height, dilation_width,
1011 groups, group_input_channels, group_output_channels,
1012 input_channel_stride, output_channel_stride,
1013 kernel, bias, flags,
1014 2 /* log2(sizeof(input element)) = log2(sizeof(float)) */,
1015 2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
1016 sizeof(float) /* sizeof(bias element) */,
1017 (xnn_pack_vmulcaddc_w_function) xnn_pack_f32_vmulcaddc_w,
1018 (xnn_pack_dwconv_hwg_w_function) xnn_pack_f32_dwconv_hwg_w,
1019 (xnn_pack_dwconv_ghw_w_function) xnn_pack_f32_dwconv_ghw_w,
1020 (xnn_pack_gemm_goi_w_function) xnn_pack_f32_gemm_goi_w,
1021 (xnn_pack_conv_kgo_w_function) xnn_pack_f32_conv_kgo_w,
1022 (xnn_pack_conv_goki_w_function) xnn_pack_f32_conv_goki_w,
1023 NULL /* packing params */, 0 /* input padding byte */, 0 /* packed weights padding byte */,
1024 0 /* extra weights bytes */, NULL /* init scale params fn */, NULL /* scale params */,
1025 &gemm_params, sizeof(gemm_params),
1026 &dwconv_params, sizeof(dwconv_params),
1027 &vmulcaddc_params, sizeof(vmulcaddc_params),
1028 &xnn_params.f32.gemm, dwconv_ukernel, &xnn_params.f32.vmulcaddc,
1029 &jit_gemm_params,
1030 linear_activation, relu_activation, XNN_INIT_FLAG_F32,
1031 xnn_operator_type_convolution_nhwc_f32,
1032 convolution_op_out);
1033 }
1034
setup_convolution2d_nhwc(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const void * input,void * output,uint32_t datatype_init_flags,uint32_t log2_input_element_size,uint32_t log2_filter_element_size,uint32_t extra_weights_elements_size,uint32_t log2_output_element_size,size_t num_threads)1035 static enum xnn_status setup_convolution2d_nhwc(
1036 xnn_operator_t convolution_op,
1037 size_t batch_size,
1038 size_t input_height,
1039 size_t input_width,
1040 const void* input,
1041 void* output,
1042 uint32_t datatype_init_flags,
1043 uint32_t log2_input_element_size,
1044 uint32_t log2_filter_element_size,
1045 uint32_t extra_weights_elements_size,
1046 uint32_t log2_output_element_size,
1047 size_t num_threads)
1048 {
1049 convolution_op->state = xnn_run_state_invalid;
1050
1051 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
1052 xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
1053 xnn_operator_type_to_string(convolution_op->type));
1054 return xnn_status_uninitialized;
1055 }
1056
1057 if ((xnn_params.init_flags & datatype_init_flags) != datatype_init_flags) {
1058 xnn_log_error(
1059 "failed to create %s operator: operations on data type are not supported",
1060 xnn_operator_type_to_string(convolution_op->type));
1061 return xnn_status_unsupported_hardware;
1062 }
1063
1064 if (input_width == 0 || input_height == 0) {
1065 xnn_log_error(
1066 "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
1067 xnn_operator_type_to_string(convolution_op->type), input_width, input_height);
1068 return xnn_status_invalid_parameter;
1069 }
1070
1071 if (batch_size == 0) {
1072 convolution_op->state = xnn_run_state_skip;
1073 return xnn_status_success;
1074 }
1075
1076 convolution_op->batch_size = batch_size;
1077 convolution_op->input_height = input_height;
1078 convolution_op->input_width = input_width;
1079 convolution_op->input = input;
1080
1081 if (convolution_op->flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) {
1082 convolution_op->output_height = compute_output_dimension_with_tf_same_padding(
1083 input_height, convolution_op->stride_height);
1084 convolution_op->output_width = compute_output_dimension_with_tf_same_padding(
1085 input_width, convolution_op->stride_width);
1086
1087 const uint32_t effective_kernel_height = (convolution_op->kernel_height - 1) * convolution_op->dilation_height + 1;
1088 const uint32_t effective_kernel_width = (convolution_op->kernel_width - 1) * convolution_op->dilation_width + 1;
1089 const size_t total_padding_height =
1090 (convolution_op->output_height - 1) * convolution_op->stride_height + effective_kernel_height - input_height;
1091 const size_t total_padding_width =
1092 (convolution_op->output_width - 1) * convolution_op->stride_width + effective_kernel_width - input_width;
1093 convolution_op->padding_top = total_padding_height / 2;
1094 convolution_op->padding_left = total_padding_width / 2;
1095 convolution_op->padding_bottom = total_padding_height - convolution_op->padding_top;
1096 convolution_op->padding_right = total_padding_width - convolution_op->padding_left;
1097 } else {
1098 convolution_op->output_height = compute_output_dimension(
1099 convolution_op->padding_top + input_height + convolution_op->padding_bottom,
1100 convolution_op->kernel_height,
1101 convolution_op->dilation_height,
1102 convolution_op->stride_height);
1103 convolution_op->output_width = compute_output_dimension(
1104 convolution_op->padding_left + input_width + convolution_op->padding_right,
1105 convolution_op->kernel_width,
1106 convolution_op->dilation_width,
1107 convolution_op->stride_width);
1108 }
1109 convolution_op->output = output;
1110
1111 switch (convolution_op->ukernel.type) {
1112 case xnn_ukernel_type_gemm:
1113 {
1114 // Convolution maps directly to GEMM and doesn't use indirection buffer.
1115
1116 const size_t output_height = convolution_op->output_height;
1117 const size_t output_width = convolution_op->output_width;
1118 const size_t output_size = output_height * output_width;
1119 const size_t batch_output_size = batch_size * output_size;
1120
1121 const size_t groups = convolution_op->groups;
1122 const size_t group_input_channels = convolution_op->group_input_channels;
1123 const size_t w_stride = extra_weights_elements_size +
1124 (round_up_po2(group_input_channels, convolution_op->ukernel.gemm.kr * convolution_op->ukernel.gemm.sr) << log2_filter_element_size);
1125 const size_t group_output_channels = convolution_op->group_output_channels;
1126
1127 uint32_t mr = convolution_op->ukernel.gemm.mr;
1128 const uint32_t nr = convolution_op->ukernel.gemm.nr;
1129 struct xnn_hmp_gemm_ukernel gemm_ukernel = convolution_op->ukernel.gemm.general_case;
1130 if (batch_output_size == 1 && convolution_op->ukernel.gemm.mr1_case.function[XNN_UARCH_DEFAULT] != NULL) {
1131 mr = 1;
1132 gemm_ukernel = convolution_op->ukernel.gemm.mr1_case;
1133 }
1134
1135 convolution_op->context.gemm = (struct gemm_context) {
1136 .k_scaled = group_input_channels << log2_input_element_size,
1137 .a = input,
1138 .a_stride = convolution_op->input_pixel_stride << log2_input_element_size,
1139 .packed_w = convolution_op->packed_weights,
1140 .w_stride = w_stride,
1141 .wg_stride = w_stride * round_up(group_output_channels, nr),
1142 .c = output,
1143 .cm_stride = convolution_op->output_pixel_stride << log2_output_element_size,
1144 .cn_stride = nr << log2_output_element_size,
1145 .cg_stride = group_output_channels << log2_output_element_size,
1146 .log2_csize = log2_output_element_size,
1147 .ukernel = gemm_ukernel,
1148 };
1149 memcpy(&convolution_op->context.gemm.params, &convolution_op->params, sizeof(convolution_op->context.gemm.params));
1150
1151 size_t nc = group_output_channels;
1152 if (num_threads > 1) {
1153 const size_t num_other_tiles = groups * divide_round_up(batch_output_size, mr);
1154 const size_t target_tiles_per_thread = 5;
1155 const size_t max_nc = divide_round_up(group_output_channels * num_other_tiles, num_threads * target_tiles_per_thread);
1156 if (max_nc < nc) {
1157 nc = min(nc, divide_round_up(nc, max_nc * nr) * nr);
1158 }
1159 }
1160 if (groups == 1) {
1161 #if XNN_MAX_UARCH_TYPES > 1
1162 if (xnn_is_hmp_gemm_ukernel(gemm_ukernel)) {
1163 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d_with_uarch;
1164 convolution_op->compute.task_2d_tile_2d_with_id = (pthreadpool_task_2d_tile_2d_with_id_t) xnn_compute_hmp_gemm;
1165 } else {
1166 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d;
1167 convolution_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_gemm;
1168 }
1169 #else
1170 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d;
1171 convolution_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_gemm;
1172 #endif
1173 convolution_op->compute.range[0] = batch_output_size;
1174 convolution_op->compute.range[1] = group_output_channels;
1175 convolution_op->compute.tile[0] = mr;
1176 convolution_op->compute.tile[1] = nc;
1177 } else {
1178 #if XNN_MAX_UARCH_TYPES > 1
1179 if (xnn_is_hmp_gemm_ukernel(gemm_ukernel)) {
1180 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d_with_uarch;
1181 convolution_op->compute.task_3d_tile_2d_with_id = (pthreadpool_task_3d_tile_2d_with_id_t) xnn_compute_hmp_grouped_gemm;
1182 } else {
1183 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1184 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_gemm;
1185 }
1186 #else
1187 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1188 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_gemm;
1189 #endif
1190 convolution_op->compute.range[0] = groups;
1191 convolution_op->compute.range[1] = batch_output_size;
1192 convolution_op->compute.range[2] = group_output_channels;
1193 convolution_op->compute.tile[0] = mr;
1194 convolution_op->compute.tile[1] = nc;
1195 }
1196 convolution_op->state = xnn_run_state_ready;
1197
1198 return xnn_status_success;
1199 }
1200 case xnn_ukernel_type_igemm:
1201 {
1202 const size_t groups = convolution_op->groups;
1203 const size_t kernel_height = convolution_op->kernel_height;
1204 const size_t kernel_width = convolution_op->kernel_width;
1205 const size_t kernel_size = kernel_height * kernel_width;
1206 const size_t output_height = convolution_op->output_height;
1207 const size_t output_width = convolution_op->output_width;
1208 const size_t output_size = output_height * output_width;
1209
1210 uint32_t mr = convolution_op->ukernel.igemm.mr;
1211 const uint32_t nr = convolution_op->ukernel.igemm.nr;
1212 struct xnn_hmp_igemm_ukernel igemm_ukernel = convolution_op->ukernel.igemm.general_case;
1213 if (output_size == 1 && convolution_op->ukernel.igemm.mr1_case.function[XNN_UARCH_DEFAULT] != NULL) {
1214 mr = 1;
1215 igemm_ukernel = convolution_op->ukernel.igemm.mr1_case;
1216 }
1217
1218 const size_t tiled_output_size = round_up(output_size, mr);
1219 const size_t indirection_buffer_size = sizeof(void*) * kernel_size * tiled_output_size;
1220
1221 if (input_height != convolution_op->last_input_height ||
1222 input_width != convolution_op->last_input_width)
1223 {
1224 const void** indirection_buffer = (const void**) xnn_reallocate_memory((void*) convolution_op->indirection_buffer, indirection_buffer_size);
1225 if (indirection_buffer == NULL) {
1226 xnn_log_error(
1227 "failed to allocate %zu bytes for %s operator indirection buffer",
1228 indirection_buffer_size, xnn_operator_type_to_string(convolution_op->type));
1229 return xnn_status_out_of_memory;
1230 }
1231 convolution_op->indirection_buffer = indirection_buffer;
1232 convolution_op->last_input = input;
1233 convolution_op->last_input_height = input_height;
1234 convolution_op->last_input_width = input_width;
1235
1236 xnn_indirection_init_conv2d(convolution_op, mr, log2_input_element_size);
1237 }
1238
1239 const size_t group_input_channels = convolution_op->group_input_channels;
1240 const size_t w_stride = extra_weights_elements_size +
1241 (round_up_po2(group_input_channels, convolution_op->ukernel.igemm.kr * convolution_op->ukernel.igemm.sr) * kernel_size << log2_filter_element_size);
1242 const size_t group_output_channels = convolution_op->group_output_channels;
1243 convolution_op->context.igemm = (struct igemm_context) {
1244 .ks = kernel_size,
1245 .ks_scaled = kernel_size * mr * sizeof(void*),
1246 .kc = group_input_channels << log2_input_element_size,
1247 .w_stride = w_stride,
1248 .indirect_a = convolution_op->indirection_buffer,
1249 .a_offset = (size_t) ((uintptr_t) input - (uintptr_t) convolution_op->last_input),
1250 .zero = convolution_op->zero_buffer,
1251 .packed_w = convolution_op->packed_weights,
1252 .c = convolution_op->output,
1253 .cm_stride = convolution_op->output_pixel_stride << log2_output_element_size,
1254 .cn_stride = nr << log2_output_element_size,
1255 .ga_stride = group_input_channels << log2_input_element_size,
1256 .gw_stride = w_stride * round_up(group_output_channels, nr),
1257 .gc_stride = group_output_channels << log2_output_element_size,
1258 .ba_stride = input_height * input_width * convolution_op->input_pixel_stride << log2_input_element_size,
1259 .bc_stride = output_size * convolution_op->output_pixel_stride << log2_output_element_size,
1260 .log2_csize = log2_output_element_size,
1261 .ukernel = igemm_ukernel,
1262 };
1263 memcpy(&convolution_op->context.igemm.params, &convolution_op->params, sizeof(convolution_op->context.igemm.params));
1264
1265 size_t nc = group_output_channels;
1266 if (num_threads > 1) {
1267 const size_t num_other_tiles = groups * batch_size * divide_round_up(output_size, mr);
1268 const size_t target_tiles_per_thread = 5;
1269 const size_t max_nc = divide_round_up(group_output_channels * num_other_tiles, num_threads * target_tiles_per_thread);
1270 if (max_nc < nc) {
1271 nc = min(nc, divide_round_up(nc, max_nc * nr) * nr);
1272 }
1273 }
1274 if (groups == 1) {
1275 #if XNN_MAX_UARCH_TYPES > 1
1276 if (xnn_is_hmp_igemm_ukernel(igemm_ukernel)) {
1277 if (batch_size > 1) {
1278 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d_with_uarch;
1279 convolution_op->compute.task_3d_tile_2d_with_id = (pthreadpool_task_3d_tile_2d_with_id_t) xnn_compute_batch_hmp_igemm;
1280 } else {
1281 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d_with_uarch;
1282 convolution_op->compute.task_2d_tile_2d_with_id = (pthreadpool_task_2d_tile_2d_with_id_t) xnn_compute_hmp_igemm;
1283 }
1284 } else {
1285 if (batch_size > 1) {
1286 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1287 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_batch_igemm;
1288 } else {
1289 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d;
1290 convolution_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_igemm;
1291 }
1292 }
1293 #else
1294 if (batch_size > 1) {
1295 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1296 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_batch_igemm;
1297 } else {
1298 convolution_op->compute.type = xnn_parallelization_type_2d_tile_2d;
1299 convolution_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_igemm;
1300 }
1301 #endif
1302 if (batch_size > 1) {
1303 convolution_op->compute.range[0] = batch_size;
1304 convolution_op->compute.range[1] = output_size;
1305 convolution_op->compute.range[2] = group_output_channels;
1306 } else {
1307 convolution_op->compute.range[0] = output_size;
1308 convolution_op->compute.range[1] = group_output_channels;
1309 }
1310 convolution_op->compute.tile[0] = mr;
1311 convolution_op->compute.tile[1] = nc;
1312 } else {
1313 #if XNN_MAX_UARCH_TYPES > 1
1314 if (xnn_is_hmp_igemm_ukernel(igemm_ukernel)) {
1315 if (batch_size > 1) {
1316 convolution_op->compute.type = xnn_parallelization_type_4d_tile_2d_with_uarch;
1317 convolution_op->compute.task_4d_tile_2d_with_id = (pthreadpool_task_4d_tile_2d_with_id_t) xnn_compute_hmp_grouped_batch_igemm;
1318 } else {
1319 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d_with_uarch;
1320 convolution_op->compute.task_3d_tile_2d_with_id = (pthreadpool_task_3d_tile_2d_with_id_t) xnn_compute_hmp_grouped_igemm;
1321 }
1322 } else {
1323 if (batch_size > 1) {
1324 convolution_op->compute.type = xnn_parallelization_type_4d_tile_2d;
1325 convolution_op->compute.task_4d_tile_2d = (pthreadpool_task_4d_tile_2d_t) xnn_compute_grouped_batch_igemm;
1326 } else {
1327 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1328 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_igemm;
1329 }
1330 }
1331 #else
1332 if (batch_size > 1) {
1333 convolution_op->compute.type = xnn_parallelization_type_4d_tile_2d;
1334 convolution_op->compute.task_4d_tile_2d = (pthreadpool_task_4d_tile_2d_t) xnn_compute_grouped_batch_igemm;
1335 } else {
1336 convolution_op->compute.type = xnn_parallelization_type_3d_tile_2d;
1337 convolution_op->compute.task_3d_tile_2d = (pthreadpool_task_3d_tile_2d_t) xnn_compute_grouped_igemm;
1338 }
1339 #endif
1340 if (batch_size > 1) {
1341 convolution_op->compute.range[0] = batch_size;
1342 convolution_op->compute.range[1] = groups;
1343 convolution_op->compute.range[2] = output_size;
1344 convolution_op->compute.range[3] = group_output_channels;
1345 } else {
1346 convolution_op->compute.range[0] = groups;
1347 convolution_op->compute.range[1] = output_size;
1348 convolution_op->compute.range[2] = group_output_channels;
1349 }
1350 convolution_op->compute.tile[0] = mr;
1351 convolution_op->compute.tile[1] = nc;
1352 }
1353 convolution_op->state = xnn_run_state_ready;
1354
1355 return xnn_status_success;
1356 }
1357 case xnn_ukernel_type_dwconv:
1358 {
1359 const size_t kernel_height = convolution_op->kernel_height;
1360 const size_t kernel_width = convolution_op->kernel_width;
1361 const size_t kernel_size = kernel_height * kernel_width;
1362 const size_t output_height = convolution_op->output_height;
1363 const size_t output_width = convolution_op->output_width;
1364 const size_t step_width = convolution_op->dilation_width == 1 ? convolution_op->stride_width : kernel_width;
1365 const size_t step_height = kernel_size + (output_width - 1) * step_width * kernel_height;
1366 if (input_height != convolution_op->last_input_height || input_width != convolution_op->last_input_width) {
1367 const size_t indirection_buffer_size = sizeof(void*) * output_height * step_height;
1368
1369 const void** indirection_buffer =
1370 (const void**) xnn_reallocate_memory(convolution_op->indirection_buffer, indirection_buffer_size);
1371 if (indirection_buffer == NULL) {
1372 xnn_log_error("failed to allocate %zu bytes for %s operator indirection buffer",
1373 indirection_buffer_size, xnn_operator_type_to_string(convolution_op->type));
1374 return xnn_status_out_of_memory;
1375 }
1376 convolution_op->indirection_buffer = indirection_buffer;
1377
1378 xnn_indirection_init_dwconv2d(convolution_op, step_height, step_width, log2_input_element_size);
1379
1380 convolution_op->last_input = input;
1381 convolution_op->last_input_height = input_height;
1382 convolution_op->last_input_width = input_width;
1383 }
1384
1385 const size_t groups = convolution_op->groups;
1386 convolution_op->context.dwconv = (struct dwconv_context) {
1387 .indirect_input = convolution_op->indirection_buffer,
1388 .indirect_input_width_stride = kernel_height * step_width * sizeof(void*),
1389 .indirect_input_height_stride = step_height * sizeof(void*),
1390 .input_offset = (size_t) ((uintptr_t) input - (uintptr_t) convolution_op->last_input),
1391 .input_batch_stride = (input_height * input_width * convolution_op->input_pixel_stride) << log2_input_element_size,
1392 .packed_weights = convolution_op->packed_weights,
1393 .output = convolution_op->output,
1394 .output_batch_stride = (output_height * output_width * convolution_op->output_pixel_stride) << log2_output_element_size,
1395 .output_height_stride = (output_width * convolution_op->output_pixel_stride) << log2_output_element_size,
1396 .output_width = output_width,
1397 .groups = groups,
1398 .zero = convolution_op->zero_buffer,
1399 .output_increment = (convolution_op->output_pixel_stride - groups) << log2_output_element_size,
1400 .unipass_ukernel = convolution_op->ukernel.dwconv.unipass_function,
1401 };
1402 memcpy(&convolution_op->context.dwconv.params, &convolution_op->params, sizeof(convolution_op->context.dwconv.params));
1403
1404 convolution_op->compute.type = xnn_parallelization_type_2d;
1405 convolution_op->compute.task_2d = (pthreadpool_task_2d_t) xnn_compute_dwconv_unipass;
1406 convolution_op->compute.range[0] = batch_size;
1407 convolution_op->compute.range[1] = output_height;
1408 convolution_op->state = xnn_run_state_ready;
1409
1410 return xnn_status_success;
1411 }
1412 case xnn_ukernel_type_vmulcaddc:
1413 {
1414 const size_t batch_output_size = batch_size * convolution_op->output_height * convolution_op->output_width;
1415
1416 convolution_op->context.vmulcaddc = (struct vmulcaddc_context) {
1417 .n = convolution_op->groups << log2_input_element_size,
1418 .x = input,
1419 .x_stride = convolution_op->input_pixel_stride << log2_input_element_size,
1420 .w = convolution_op->packed_weights,
1421 .y = output,
1422 .y_stride = convolution_op->output_pixel_stride << log2_output_element_size,
1423 .ukernel = convolution_op->ukernel.vmulcaddc.function,
1424 };
1425 memcpy(&convolution_op->context.vmulcaddc.params, &convolution_op->params, sizeof(convolution_op->context.vmulcaddc.params));
1426
1427 size_t mc = batch_output_size;
1428 if (num_threads > 1) {
1429 const size_t target_tiles_per_thread = 5;
1430 const size_t max_mc = divide_round_up(batch_output_size, num_threads * target_tiles_per_thread);
1431 if (max_mc < mc) {
1432 const uint32_t mr = convolution_op->ukernel.vmulcaddc.mr;
1433 mc = min(mc, divide_round_up(mc, max_mc * mr) * mr);
1434 }
1435 }
1436 convolution_op->compute.type = xnn_parallelization_type_1d_tile_1d;
1437 convolution_op->compute.task_1d_tile_1d = (pthreadpool_task_1d_tile_1d_t) xnn_compute_vmulcaddc;
1438 convolution_op->compute.range[0] = batch_output_size;
1439 convolution_op->compute.tile[0] = mc;
1440 convolution_op->state = xnn_run_state_ready;
1441
1442 return xnn_status_success;
1443 }
1444 default:
1445 XNN_UNREACHABLE;
1446 }
1447 }
1448
xnn_setup_convolution2d_nhwc_qu8(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const uint8_t * input,uint8_t * output,pthreadpool_t threadpool)1449 enum xnn_status xnn_setup_convolution2d_nhwc_qu8(
1450 xnn_operator_t convolution_op,
1451 size_t batch_size,
1452 size_t input_height,
1453 size_t input_width,
1454 const uint8_t* input,
1455 uint8_t* output,
1456 pthreadpool_t threadpool)
1457 {
1458 if (convolution_op->type != xnn_operator_type_convolution_nhwc_qu8) {
1459 xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
1460 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qu8),
1461 xnn_operator_type_to_string(convolution_op->type));
1462 return xnn_status_invalid_parameter;
1463 }
1464
1465 return setup_convolution2d_nhwc(
1466 convolution_op,
1467 batch_size, input_height, input_width,
1468 input, output,
1469 XNN_INIT_FLAG_QU8,
1470 0 /* log2(sizeof(input element)) = log2(sizeof(uint8_t)) */,
1471 0 /* log2(sizeof(filter element)) = log2(sizeof(uint8_t)) */,
1472 sizeof(int32_t) /* sizeof(extra weights elements) */,
1473 0 /* log2(sizeof(output element)) = log2(sizeof(uint8_t)) */,
1474 pthreadpool_get_threads_count(threadpool));
1475 }
1476
xnn_setup_convolution2d_nhwc_qs8(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const int8_t * input,int8_t * output,pthreadpool_t threadpool)1477 enum xnn_status xnn_setup_convolution2d_nhwc_qs8(
1478 xnn_operator_t convolution_op,
1479 size_t batch_size,
1480 size_t input_height,
1481 size_t input_width,
1482 const int8_t* input,
1483 int8_t* output,
1484 pthreadpool_t threadpool)
1485 {
1486 if (convolution_op->type != xnn_operator_type_convolution_nhwc_qs8) {
1487 xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
1488 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qs8),
1489 xnn_operator_type_to_string(convolution_op->type));
1490 return xnn_status_invalid_parameter;
1491 }
1492
1493 return setup_convolution2d_nhwc(
1494 convolution_op,
1495 batch_size, input_height, input_width,
1496 input, output,
1497 XNN_INIT_FLAG_QS8,
1498 0 /* log2(sizeof(input element)) = log2(sizeof(int8_t)) */,
1499 0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
1500 sizeof(int32_t) /* sizeof(extra weights elements) */,
1501 0 /* log2(sizeof(output element)) = log2(sizeof(int8_t)) */,
1502 pthreadpool_get_threads_count(threadpool));
1503 }
1504
xnn_setup_convolution2d_nhwc_qc8(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const int8_t * input,int8_t * output,pthreadpool_t threadpool)1505 enum xnn_status xnn_setup_convolution2d_nhwc_qc8(
1506 xnn_operator_t convolution_op,
1507 size_t batch_size,
1508 size_t input_height,
1509 size_t input_width,
1510 const int8_t* input,
1511 int8_t* output,
1512 pthreadpool_t threadpool)
1513 {
1514 if (convolution_op->type != xnn_operator_type_convolution_nhwc_qc8) {
1515 xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
1516 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_qc8),
1517 xnn_operator_type_to_string(convolution_op->type));
1518 return xnn_status_invalid_parameter;
1519 }
1520
1521 return setup_convolution2d_nhwc(
1522 convolution_op,
1523 batch_size, input_height, input_width,
1524 input, output,
1525 XNN_INIT_FLAG_QC8,
1526 0 /* log2(sizeof(input element)) = log2(sizeof(int8_t)) */,
1527 0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
1528 sizeof(int32_t) + sizeof(float) /* sizeof(extra weights elements) */,
1529 0 /* log2(sizeof(output element)) = log2(sizeof(int8_t)) */,
1530 pthreadpool_get_threads_count(threadpool));
1531 }
1532
xnn_setup_convolution2d_nhwc_f16(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const void * input,void * output,pthreadpool_t threadpool)1533 enum xnn_status xnn_setup_convolution2d_nhwc_f16(
1534 xnn_operator_t convolution_op,
1535 size_t batch_size,
1536 size_t input_height,
1537 size_t input_width,
1538 const void* input,
1539 void* output,
1540 pthreadpool_t threadpool)
1541 {
1542 if (convolution_op->type != xnn_operator_type_convolution_nhwc_f16) {
1543 xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
1544 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f16),
1545 xnn_operator_type_to_string(convolution_op->type));
1546 return xnn_status_invalid_parameter;
1547 }
1548
1549 return setup_convolution2d_nhwc(
1550 convolution_op,
1551 batch_size, input_height, input_width,
1552 input, output,
1553 XNN_INIT_FLAG_F16,
1554 1 /* log2(sizeof(input element)) = log2(sizeof(uint16_t)) */,
1555 1 /* log2(sizeof(filter element)) = log2(sizeof(uint16_t)) */,
1556 sizeof(uint16_t) /* sizeof(extra weights elements) */,
1557 1 /* log2(sizeof(output element)) = log2(sizeof(uint16_t)) */,
1558 pthreadpool_get_threads_count(threadpool));
1559 }
1560
xnn_setup_convolution2d_nhwc_f32(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const float * input,float * output,pthreadpool_t threadpool)1561 enum xnn_status xnn_setup_convolution2d_nhwc_f32(
1562 xnn_operator_t convolution_op,
1563 size_t batch_size,
1564 size_t input_height,
1565 size_t input_width,
1566 const float* input,
1567 float* output,
1568 pthreadpool_t threadpool)
1569 {
1570 if (convolution_op->type != xnn_operator_type_convolution_nhwc_f32) {
1571 xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
1572 xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32),
1573 xnn_operator_type_to_string(convolution_op->type));
1574 return xnn_status_invalid_parameter;
1575 }
1576
1577 return setup_convolution2d_nhwc(
1578 convolution_op,
1579 batch_size, input_height, input_width,
1580 input, output,
1581 XNN_INIT_FLAG_F32,
1582 2 /* log2(sizeof(input element)) = log2(sizeof(float)) */,
1583 2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
1584 sizeof(float) /* sizeof(extra weights elements) */,
1585 2 /* log2(sizeof(output element)) = log2(sizeof(float)) */,
1586 pthreadpool_get_threads_count(threadpool));
1587 }
1588