1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <math.h>
7 #include <stddef.h>
8 #include <stdint.h>
9
10 #include <xnnpack.h>
11 #include <xnnpack/common.h>
12 #include <xnnpack/log.h>
13 #include <xnnpack/operator.h>
14 #include <xnnpack/params.h>
15 #include <xnnpack/subgraph.h>
16
17
create_deconvolution_operator(const struct xnn_node * node,const struct xnn_value * values,size_t num_values,struct xnn_operator_data * opdata)18 static enum xnn_status create_deconvolution_operator(
19 const struct xnn_node* node,
20 const struct xnn_value* values,
21 size_t num_values,
22 struct xnn_operator_data* opdata)
23 {
24 assert(node->num_inputs >= 2);
25 assert(node->num_inputs <= 3);
26 const bool use_bias = node->num_inputs >= 3;
27
28 const uint32_t input_id = node->inputs[0];
29 assert(input_id != XNN_INVALID_VALUE_ID);
30 assert(input_id < num_values);
31 const uint32_t filter_id = node->inputs[1];
32 assert(filter_id != XNN_INVALID_VALUE_ID);
33 assert(filter_id < num_values);
34
35 const void* bias_data = NULL;
36 if (use_bias) {
37 const uint32_t bias_id = node->inputs[2];
38 assert(bias_id != XNN_INVALID_VALUE_ID);
39 assert(bias_id < num_values);
40
41 bias_data = values[bias_id].data;
42 assert(bias_data != NULL);
43 }
44
45 assert(node->num_outputs == 1);
46 const uint32_t output_id = node->outputs[0];
47 assert(output_id != XNN_INVALID_VALUE_ID);
48 assert(output_id < num_values);
49
50 const void* filter_data = values[filter_id].data;
51 assert(filter_data != NULL);
52
53 enum xnn_status status = xnn_status_uninitialized;
54 switch (node->compute_type) {
55 case xnn_compute_type_fp32:
56 status = xnn_create_deconvolution2d_nhwc_f32(
57 node->params.deconvolution_2d.padding_top,
58 node->params.deconvolution_2d.padding_right,
59 node->params.deconvolution_2d.padding_bottom,
60 node->params.deconvolution_2d.padding_left,
61 node->params.deconvolution_2d.kernel_height,
62 node->params.deconvolution_2d.kernel_width,
63 node->params.deconvolution_2d.upsampling_height,
64 node->params.deconvolution_2d.upsampling_width,
65 node->params.deconvolution_2d.dilation_height,
66 node->params.deconvolution_2d.dilation_width,
67 node->params.deconvolution_2d.groups,
68 node->params.deconvolution_2d.group_input_channels,
69 node->params.deconvolution_2d.group_output_channels,
70 node->params.deconvolution_2d.group_input_channels * node->params.deconvolution_2d.groups /* input_pixel_stride */,
71 node->params.deconvolution_2d.group_output_channels * node->params.deconvolution_2d.groups /* output_pixel_stride */,
72 filter_data,
73 bias_data,
74 node->activation.output_min,
75 node->activation.output_max,
76 node->flags,
77 &opdata->operator_object);
78 break;
79 #ifndef XNN_NO_QS8_OPERATORS
80 case xnn_compute_type_qs8:
81 {
82 const float output_scale = values[output_id].quantization.scale;
83 const int32_t output_zero_point = values[output_id].quantization.zero_point;
84 const int8_t output_min =
85 (int8_t) lrintf(fminf(fmaxf(node->activation.output_min / output_scale + (float) output_zero_point, -128.0f), 127.0f));
86 const int8_t output_max =
87 (int8_t) lrintf(fminf(fmaxf(node->activation.output_max / output_scale + (float) output_zero_point, -128.0f), 127.0f));
88 status = xnn_create_deconvolution2d_nhwc_qs8(
89 node->params.deconvolution_2d.padding_top,
90 node->params.deconvolution_2d.padding_right,
91 node->params.deconvolution_2d.padding_bottom,
92 node->params.deconvolution_2d.padding_left,
93 node->params.deconvolution_2d.kernel_height,
94 node->params.deconvolution_2d.kernel_width,
95 node->params.deconvolution_2d.upsampling_height,
96 node->params.deconvolution_2d.upsampling_width,
97 node->params.deconvolution_2d.dilation_height,
98 node->params.deconvolution_2d.dilation_width,
99 node->params.deconvolution_2d.groups,
100 node->params.deconvolution_2d.group_input_channels,
101 node->params.deconvolution_2d.group_output_channels,
102 node->params.deconvolution_2d.group_input_channels * node->params.deconvolution_2d.groups /* input_pixel_stride */,
103 node->params.deconvolution_2d.group_output_channels * node->params.deconvolution_2d.groups /* output_pixel_stride */,
104 (int8_t) values[input_id].quantization.zero_point,
105 values[input_id].quantization.scale,
106 values[filter_id].quantization.scale,
107 filter_data,
108 bias_data,
109 output_zero_point,
110 output_scale,
111 output_min,
112 output_max,
113 node->flags,
114 &opdata->operator_object);
115 break;
116 }
117 #endif // !defined(XNN_NO_QS8_OPERATORS)
118 #ifndef XNN_NO_QU8_OPERATORS
119 case xnn_compute_type_qu8:
120 {
121 const float output_scale = values[output_id].quantization.scale;
122 const int32_t output_zero_point = values[output_id].quantization.zero_point;
123 const uint8_t output_min =
124 (uint8_t) lrintf(fminf(fmaxf(node->activation.output_min / output_scale + (float) output_zero_point, 0.0f), 255.0f));
125 const uint8_t output_max =
126 (uint8_t) lrintf(fminf(fmaxf(node->activation.output_max / output_scale + (float) output_zero_point, 0.0f), 255.0f));
127 status = xnn_create_deconvolution2d_nhwc_qu8(
128 node->params.deconvolution_2d.padding_top,
129 node->params.deconvolution_2d.padding_right,
130 node->params.deconvolution_2d.padding_bottom,
131 node->params.deconvolution_2d.padding_left,
132 node->params.deconvolution_2d.kernel_height,
133 node->params.deconvolution_2d.kernel_width,
134 node->params.deconvolution_2d.upsampling_height,
135 node->params.deconvolution_2d.upsampling_width,
136 node->params.deconvolution_2d.dilation_height,
137 node->params.deconvolution_2d.dilation_width,
138 node->params.deconvolution_2d.groups,
139 node->params.deconvolution_2d.group_input_channels,
140 node->params.deconvolution_2d.group_output_channels,
141 node->params.deconvolution_2d.group_input_channels * node->params.deconvolution_2d.groups /* input_pixel_stride */,
142 node->params.deconvolution_2d.group_output_channels * node->params.deconvolution_2d.groups /* output_pixel_stride */,
143 (uint8_t) values[input_id].quantization.zero_point,
144 values[input_id].quantization.scale,
145 (uint8_t) values[filter_id].quantization.zero_point,
146 values[filter_id].quantization.scale,
147 filter_data,
148 bias_data,
149 output_zero_point,
150 output_scale,
151 output_min,
152 output_max,
153 node->flags,
154 &opdata->operator_object);
155 break;
156 }
157 #endif // !defined(XNN_NO_QU8_OPERATORS)
158 default:
159 XNN_UNREACHABLE;
160 }
161 if (status == xnn_status_success) {
162 opdata->batch_size = values[input_id].shape.dim[0];
163 opdata->input_height = values[input_id].shape.dim[1];
164 opdata->input_width = values[input_id].shape.dim[2];
165 opdata->adjustment_height = node->params.deconvolution_2d.adjustment_height;
166 opdata->adjustment_width = node->params.deconvolution_2d.adjustment_width;
167 opdata->inputs[0] = input_id;
168 opdata->outputs[0] = output_id;
169 }
170 return status;
171 }
172
setup_deconvolution_operator(const struct xnn_operator_data * opdata,const struct xnn_blob * blobs,size_t num_blobs,pthreadpool_t threadpool)173 static enum xnn_status setup_deconvolution_operator(
174 const struct xnn_operator_data* opdata,
175 const struct xnn_blob* blobs,
176 size_t num_blobs,
177 pthreadpool_t threadpool)
178 {
179 const uint32_t input_id = opdata->inputs[0];
180 assert(input_id != XNN_INVALID_VALUE_ID);
181 assert(input_id < num_blobs);
182
183 const uint32_t output_id = opdata->outputs[0];
184 assert(output_id != XNN_INVALID_VALUE_ID);
185 assert(output_id < num_blobs);
186
187 const struct xnn_blob* input_blob = blobs + input_id;
188 const void* input_data = input_blob->data;
189 assert(input_data != NULL);
190
191 const struct xnn_blob* output_blob = blobs + output_id;
192 void* output_data = output_blob->data;
193 assert(output_data != NULL);
194
195 switch (opdata->operator_object->type) {
196 case xnn_operator_type_deconvolution_nhwc_f32:
197 return xnn_setup_deconvolution2d_nhwc_f32(
198 opdata->operator_object,
199 opdata->batch_size,
200 opdata->input_height,
201 opdata->input_width,
202 opdata->adjustment_height,
203 opdata->adjustment_width,
204 input_data,
205 output_data,
206 threadpool);
207 break;
208 #ifndef XNN_NO_QS8_OPERATORS
209 case xnn_operator_type_deconvolution_nhwc_qs8:
210 return xnn_setup_deconvolution2d_nhwc_qs8(
211 opdata->operator_object,
212 opdata->batch_size,
213 opdata->input_height,
214 opdata->input_width,
215 opdata->adjustment_height,
216 opdata->adjustment_width,
217 input_data,
218 output_data,
219 threadpool);
220 break;
221 #endif // !defined(XNN_NO_QS8_OPERATORS)
222 #ifndef XNN_NO_QU8_OPERATORS
223 case xnn_operator_type_deconvolution_nhwc_qu8:
224 return xnn_setup_deconvolution2d_nhwc_qu8(
225 opdata->operator_object,
226 opdata->batch_size,
227 opdata->input_height,
228 opdata->input_width,
229 opdata->adjustment_height,
230 opdata->adjustment_width,
231 input_data,
232 output_data,
233 threadpool);
234 break;
235 #endif // !defined(XNN_NO_QU8_OPERATORS)
236 default:
237 XNN_UNREACHABLE;
238 }
239 }
240
validate_datatypes_with_bias(enum xnn_datatype input_datatype,enum xnn_datatype filter_datatype,enum xnn_datatype bias_datatype,enum xnn_datatype output_datatype)241 static inline enum xnn_compute_type validate_datatypes_with_bias(
242 enum xnn_datatype input_datatype,
243 enum xnn_datatype filter_datatype,
244 enum xnn_datatype bias_datatype,
245 enum xnn_datatype output_datatype)
246 {
247 switch (filter_datatype) {
248 case xnn_datatype_fp32:
249 if (input_datatype == xnn_datatype_fp32 &&
250 bias_datatype == xnn_datatype_fp32 &&
251 output_datatype == xnn_datatype_fp32)
252 {
253 return xnn_compute_type_fp32;
254 }
255 break;
256 #ifndef XNN_NO_QS8_OPERATORS
257 case xnn_datatype_qint8:
258 if (input_datatype == xnn_datatype_qint8 &&
259 bias_datatype == xnn_datatype_qint32 &&
260 output_datatype == xnn_datatype_qint8)
261 {
262 return xnn_compute_type_qs8;
263 }
264 break;
265 #endif // !defined(XNN_NO_QS8_OPERATORS)
266 #ifndef XNN_NO_QU8_OPERATORS
267 case xnn_datatype_quint8:
268 if (input_datatype == xnn_datatype_quint8 &&
269 bias_datatype == xnn_datatype_qint32 &&
270 output_datatype == xnn_datatype_quint8)
271 {
272 return xnn_compute_type_qu8;
273 }
274 break;
275 #endif // !defined(XNN_NO_QU8_OPERATORS)
276 default:
277 XNN_UNREACHABLE;
278 }
279 return xnn_compute_type_invalid;
280 }
281
validate_datatypes_without_bias(enum xnn_datatype input_datatype,enum xnn_datatype filter_datatype,enum xnn_datatype output_datatype)282 static inline enum xnn_compute_type validate_datatypes_without_bias(
283 enum xnn_datatype input_datatype,
284 enum xnn_datatype filter_datatype,
285 enum xnn_datatype output_datatype)
286 {
287 switch (filter_datatype) {
288 case xnn_datatype_fp32:
289 if (input_datatype == xnn_datatype_fp32 && output_datatype == xnn_datatype_fp32) {
290 return xnn_compute_type_fp32;
291 }
292 break;
293 #ifndef XNN_NO_QS8_OPERATORS
294 case xnn_datatype_qint8:
295 if (input_datatype == xnn_datatype_qint8 && output_datatype == xnn_datatype_qint8) {
296 return xnn_compute_type_qs8;
297 }
298 break;
299 #endif // !defined(XNN_NO_QS8_OPERATORS)
300 #ifndef XNN_NO_QU8_OPERATORS
301 case xnn_datatype_quint8:
302 if (input_datatype == xnn_datatype_quint8 && output_datatype == xnn_datatype_quint8) {
303 return xnn_compute_type_qu8;
304 }
305 break;
306 #endif // !defined(XNN_NO_QU8_OPERATORS)
307 default:
308 XNN_UNREACHABLE;
309 }
310 return xnn_compute_type_invalid;
311 }
312
xnn_define_deconvolution_2d(xnn_subgraph_t subgraph,uint32_t padding_top,uint32_t padding_right,uint32_t padding_bottom,uint32_t padding_left,uint32_t adjustment_height,uint32_t adjustment_width,uint32_t kernel_height,uint32_t kernel_width,uint32_t upsampling_height,uint32_t upsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,float output_min,float output_max,uint32_t input_id,uint32_t filter_id,uint32_t bias_id,uint32_t output_id,uint32_t flags)313 enum xnn_status xnn_define_deconvolution_2d(
314 xnn_subgraph_t subgraph,
315 uint32_t padding_top,
316 uint32_t padding_right,
317 uint32_t padding_bottom,
318 uint32_t padding_left,
319 uint32_t adjustment_height,
320 uint32_t adjustment_width,
321 uint32_t kernel_height,
322 uint32_t kernel_width,
323 uint32_t upsampling_height,
324 uint32_t upsampling_width,
325 uint32_t dilation_height,
326 uint32_t dilation_width,
327 uint32_t groups,
328 size_t group_input_channels,
329 size_t group_output_channels,
330 float output_min,
331 float output_max,
332 uint32_t input_id,
333 uint32_t filter_id,
334 uint32_t bias_id,
335 uint32_t output_id,
336 uint32_t flags)
337 {
338 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
339 xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
340 xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
341 return xnn_status_uninitialized;
342 }
343
344 if (kernel_width == 0 || kernel_height == 0) {
345 xnn_log_error(
346 "failed to define %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
347 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), kernel_width, kernel_height);
348 return xnn_status_invalid_parameter;
349 }
350
351 if (upsampling_width == 0 || upsampling_height == 0) {
352 xnn_log_error(
353 "failed to define %s operator with %" PRIu32 "x%" PRIu32 " upsampling: upsampling dimensions must be non-zero",
354 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), upsampling_width, upsampling_height);
355 return xnn_status_invalid_parameter;
356 }
357
358 if (dilation_width == 0 || dilation_height == 0) {
359 xnn_log_error(
360 "failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
361 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), dilation_width, dilation_height);
362 return xnn_status_invalid_parameter;
363 }
364
365 if (groups == 0) {
366 xnn_log_error(
367 "failed to define %s operator with %" PRIu32 " groups: number of groups must be non-zero",
368 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), groups);
369 return xnn_status_invalid_parameter;
370 }
371
372 if (group_input_channels == 0) {
373 xnn_log_error(
374 "failed to define %s operator with %zu input channels per group: number of channels must be non-zero",
375 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), group_input_channels);
376 return xnn_status_invalid_parameter;
377 }
378
379 if (group_output_channels == 0) {
380 xnn_log_error(
381 "failed to define %s operator with %zu output channels per group: number of channels must be non-zero",
382 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), group_output_channels);
383 return xnn_status_invalid_parameter;
384 }
385
386 if (isnan(output_min)) {
387 xnn_log_error(
388 "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
389 xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
390 return xnn_status_invalid_parameter;
391 }
392
393 if (isnan(output_max)) {
394 xnn_log_error(
395 "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
396 xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
397 return xnn_status_invalid_parameter;
398 }
399
400 if (output_min >= output_max) {
401 xnn_log_error(
402 "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
403 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_min, output_max);
404 return xnn_status_invalid_parameter;
405 }
406
407 if (input_id >= subgraph->num_values) {
408 xnn_log_error(
409 "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
410 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id);
411 return xnn_status_invalid_parameter;
412 }
413
414 const struct xnn_value* input_value = &subgraph->values[input_id];
415 if (input_value->type != xnn_value_type_dense_tensor) {
416 xnn_log_error(
417 "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
418 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id, input_value->type);
419 return xnn_status_invalid_parameter;
420 }
421
422 switch (input_value->datatype) {
423 case xnn_datatype_fp32:
424 #ifndef XNN_NO_QS8_OPERATORS
425 case xnn_datatype_qint8:
426 #endif // !defined(XNN_NO_QS8_OPERATORS)
427 #ifndef XNN_NO_QU8_OPERATORS
428 case xnn_datatype_quint8:
429 #endif // !defined(XNN_NO_QU8_OPERATORS)
430 break;
431 default:
432 xnn_log_error(
433 "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
434 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id,
435 xnn_datatype_to_string(input_value->datatype), input_value->datatype);
436 return xnn_status_invalid_parameter;
437 }
438
439 if (filter_id >= subgraph->num_values) {
440 xnn_log_error(
441 "failed to define %s operator with filter ID #%" PRIu32 ": invalid Value ID",
442 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id);
443 return xnn_status_invalid_parameter;
444 }
445
446 const struct xnn_value* filter_value = &subgraph->values[filter_id];
447 if (filter_value->type != xnn_value_type_dense_tensor) {
448 xnn_log_error(
449 "failed to define %s operator with filter ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
450 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id, filter_value->type);
451 return xnn_status_invalid_parameter;
452 }
453
454 if (filter_value->data == NULL) {
455 xnn_log_error(
456 "failed to define %s operator with filter ID #%" PRIu32 ": non-static Value",
457 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id);
458 return xnn_status_invalid_parameter;
459 }
460
461 switch (filter_value->datatype) {
462 case xnn_datatype_fp32:
463 break;
464 #ifndef XNN_NO_QS8_OPERATORS
465 case xnn_datatype_qint8:
466 if (filter_value->quantization.zero_point != 0) {
467 xnn_log_error(
468 "failed to define %s operator with filter ID #%" PRIu32 ": unsupported quantization zero point %" PRId32 " for datatype %s",
469 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id,
470 filter_value->quantization.zero_point, xnn_datatype_to_string(filter_value->datatype));
471 }
472 break;
473 #endif // !defined(XNN_NO_QS8_OPERATORS)
474 #ifndef XNN_NO_QU8_OPERATORS
475 case xnn_datatype_quint8:
476 break;
477 #endif // !defined(XNN_NO_QU8_OPERATORS)
478 default:
479 xnn_log_error(
480 "failed to define %s operator with filter ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
481 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id,
482 xnn_datatype_to_string(filter_value->datatype), filter_value->datatype);
483 return xnn_status_invalid_parameter;
484 }
485
486 const struct xnn_value* bias_value = NULL;
487
488 if (bias_id != XNN_INVALID_VALUE_ID) {
489 if (bias_id >= subgraph->num_values) {
490 xnn_log_error(
491 "failed to define %s operator with bias ID #%" PRIu32 ": invalid Value ID",
492 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), bias_id);
493 return xnn_status_invalid_parameter;
494 }
495
496 bias_value = &subgraph->values[bias_id];
497 if (bias_value->type != xnn_value_type_dense_tensor) {
498 xnn_log_error(
499 "failed to define %s operator with bias ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
500 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), bias_id, bias_value->type);
501 return xnn_status_invalid_parameter;
502 }
503
504 if (bias_value->data == NULL) {
505 xnn_log_error(
506 "failed to define %s operator with bias ID #%" PRIu32 ": non-static Value",
507 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), bias_id);
508 return xnn_status_invalid_parameter;
509 }
510
511 switch (bias_value->datatype) {
512 case xnn_datatype_fp32:
513 #if !defined(XNN_NO_QS8_OPERATORS) || !defined(XNN_NO_QU8_OPERATORS)
514 case xnn_datatype_qint32:
515 #endif // !defined(XNN_NO_QS8_OPERATORS) || !defined(XNN_NO_QU8_OPERATORS)
516 break;
517 default:
518 xnn_log_error(
519 "failed to define %s operator with bias ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
520 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), bias_id,
521 xnn_datatype_to_string(bias_value->datatype), bias_value->datatype);
522 return xnn_status_invalid_parameter;
523 }
524 }
525
526 if (output_id >= subgraph->num_values) {
527 xnn_log_error(
528 "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
529 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_id);
530 return xnn_status_invalid_parameter;
531 }
532
533 const struct xnn_value* output_value = &subgraph->values[output_id];
534 if (output_value->type != xnn_value_type_dense_tensor) {
535 xnn_log_error(
536 "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
537 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_id, output_value->type);
538 return xnn_status_invalid_parameter;
539 }
540
541 switch (output_value->datatype) {
542 case xnn_datatype_fp32:
543 #ifndef XNN_NO_QS8_OPERATORS
544 case xnn_datatype_qint8:
545 #endif // !defined(XNN_NO_QS8_OPERATORS)
546 #ifndef XNN_NO_QU8_OPERATORS
547 case xnn_datatype_quint8:
548 #endif // !defined(XNN_NO_QU8_OPERATORS)
549 break;
550 default:
551 xnn_log_error(
552 "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
553 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_id,
554 xnn_datatype_to_string(output_value->datatype), output_value->datatype);
555 return xnn_status_invalid_parameter;
556 }
557
558 enum xnn_compute_type compute_type = xnn_compute_type_invalid;
559 if (bias_value != NULL) {
560 compute_type = validate_datatypes_with_bias(
561 input_value->datatype, filter_value->datatype, bias_value->datatype, output_value->datatype);
562 if (compute_type == xnn_compute_type_invalid) {
563 xnn_log_error(
564 "failed to define %s operator with input ID #%" PRIu32 ", filter ID #%" PRIu32 ", bias ID #%" PRIu32 ", and output ID #%" PRIu32
565 ": mismatching datatypes across input (%s), filter (%s), bias (%s), and output (%s)",
566 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id, filter_id, bias_id, output_id,
567 xnn_datatype_to_string(input_value->datatype),
568 xnn_datatype_to_string(filter_value->datatype),
569 xnn_datatype_to_string(bias_value->datatype),
570 xnn_datatype_to_string(output_value->datatype));
571 return xnn_status_invalid_parameter;
572 }
573 } else {
574 compute_type = validate_datatypes_without_bias(
575 input_value->datatype, filter_value->datatype, output_value->datatype);
576 if (compute_type == xnn_compute_type_invalid) {
577 xnn_log_error(
578 "failed to define %s operator with input ID #%" PRIu32 ", filter ID #%" PRIu32 ", and output ID #%" PRIu32
579 ": mismatching datatypes across input (%s), filter (%s), and output (%s)",
580 xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id, filter_id, output_id,
581 xnn_datatype_to_string(input_value->datatype),
582 xnn_datatype_to_string(filter_value->datatype),
583 xnn_datatype_to_string(output_value->datatype));
584 return xnn_status_invalid_parameter;
585 }
586 }
587
588 struct xnn_node* node = xnn_subgraph_new_node(subgraph);
589 if (node == NULL) {
590 return xnn_status_out_of_memory;
591 }
592
593 node->type = xnn_node_type_deconvolution_2d;
594 node->compute_type = compute_type;
595 node->params.deconvolution_2d.padding_top = padding_top;
596 node->params.deconvolution_2d.padding_right = padding_right;
597 node->params.deconvolution_2d.padding_bottom = padding_bottom;
598 node->params.deconvolution_2d.padding_left = padding_left;
599 node->params.deconvolution_2d.kernel_height = kernel_height;
600 node->params.deconvolution_2d.kernel_width = kernel_width;
601 node->params.deconvolution_2d.upsampling_height = upsampling_height;
602 node->params.deconvolution_2d.upsampling_width = upsampling_width;
603 node->params.deconvolution_2d.dilation_height = dilation_height;
604 node->params.deconvolution_2d.dilation_width = dilation_width;
605 node->params.deconvolution_2d.adjustment_height = adjustment_height;
606 node->params.deconvolution_2d.adjustment_width = adjustment_width;
607 node->params.deconvolution_2d.groups = groups;
608 node->params.deconvolution_2d.group_input_channels = group_input_channels;
609 node->params.deconvolution_2d.group_output_channels = group_output_channels;
610 node->activation.output_min = output_min;
611 node->activation.output_max = output_max;
612 node->num_inputs = 2 + (size_t) (bias_value != NULL);
613 node->inputs[0] = input_id;
614 node->inputs[1] = filter_id;
615 node->inputs[2] = bias_id;
616 node->num_outputs = 1;
617 node->outputs[0] = output_id;
618 node->flags = flags;
619
620 node->create = create_deconvolution_operator;
621 node->setup = setup_deconvolution_operator;
622
623 return xnn_status_success;
624 };
625