• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <math.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <xnnpack.h>
11 #include <xnnpack/log.h>
12 #include <xnnpack/params.h>
13 #include <xnnpack/subgraph.h>
14 
15 
create_resize_bilinear_operator(const struct xnn_node * node,const struct xnn_value * values,size_t num_values,struct xnn_operator_data * opdata)16 static enum xnn_status create_resize_bilinear_operator(
17   const struct xnn_node* node,
18   const struct xnn_value* values,
19   size_t num_values,
20   struct xnn_operator_data* opdata)
21 {
22   assert(node->num_inputs == 1);
23   const uint32_t input_id = node->inputs[0];
24   assert(input_id != XNN_INVALID_VALUE_ID);
25   assert(input_id < num_values);
26 
27   assert(node->num_outputs == 1);
28   const uint32_t output_id = node->outputs[0];
29   assert(output_id != XNN_INVALID_VALUE_ID);
30   assert(output_id < num_values);
31 
32   const size_t channel_dim = values[input_id].shape.dim[3];
33   assert(channel_dim == values[output_id].shape.dim[3]);
34 
35   enum xnn_status status;
36   if (values[input_id].layout == xnn_layout_type_nchw) {
37     assert(values[output_id].layout == xnn_layout_type_nchw);
38     assert(node->compute_type == xnn_compute_type_fp32);
39     status = xnn_create_resize_bilinear2d_nchw_f32(
40       channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
41       node->flags,
42       &opdata->operator_object);
43   } else {
44     assert(values[input_id].layout == xnn_layout_type_nhwc);
45     assert(values[output_id].layout == xnn_layout_type_nhwc);
46     switch (node->compute_type) {
47       case xnn_compute_type_fp32:
48         status = xnn_create_resize_bilinear2d_nhwc_f32(
49           channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
50           node->flags,
51           &opdata->operator_object);
52         break;
53 #ifndef XNN_NO_S8_OPERATORS
54       case xnn_compute_type_qs8:
55         status = xnn_create_resize_bilinear2d_nhwc_s8(
56           channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
57           node->flags,
58           &opdata->operator_object);
59         break;
60 #endif  // !defined(XNN_NO_S8_OPERATORS)
61 #ifndef XNN_NO_U8_OPERATORS
62       case xnn_compute_type_qu8:
63         status = xnn_create_resize_bilinear2d_nhwc_u8(
64           channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
65           node->flags,
66           &opdata->operator_object);
67         break;
68 #endif  // !defined(XNN_NO_U8_OPERATORS)
69       default:
70         XNN_UNREACHABLE;
71     }
72   }
73   if (status == xnn_status_success) {
74     opdata->batch_size = values[input_id].shape.dim[0];
75     opdata->input_height = values[input_id].shape.dim[1];
76     opdata->input_width = values[input_id].shape.dim[2];
77     opdata->output_height = values[output_id].shape.dim[1];
78     opdata->output_width = values[output_id].shape.dim[2];
79     opdata->inputs[0] = input_id;
80     opdata->outputs[0] = output_id;
81   }
82   return status;
83 }
84 
setup_resize_bilinear_operator(const struct xnn_operator_data * opdata,const struct xnn_blob * blobs,size_t num_blobs,pthreadpool_t threadpool)85 static enum xnn_status setup_resize_bilinear_operator(
86   const struct xnn_operator_data* opdata,
87   const struct xnn_blob* blobs,
88   size_t num_blobs,
89   pthreadpool_t threadpool)
90 {
91   const uint32_t input_id = opdata->inputs[0];
92   assert(input_id != XNN_INVALID_VALUE_ID);
93   assert(input_id < num_blobs);
94 
95   const uint32_t output_id = opdata->outputs[0];
96   assert(output_id != XNN_INVALID_VALUE_ID);
97   assert(output_id < num_blobs);
98 
99   const struct xnn_blob* input_blob = blobs + input_id;
100   const void* input_data = input_blob->data;
101   assert(input_data != NULL);
102 
103   const struct xnn_blob* output_blob = blobs + output_id;
104   void* output_data = output_blob->data;
105   assert(output_data != NULL);
106 
107   switch (opdata->operator_object->type) {
108     case xnn_operator_type_resize_bilinear_nchw_f32:
109       return xnn_setup_resize_bilinear2d_nchw_f32(
110         opdata->operator_object,
111         opdata->batch_size,
112         opdata->input_height,
113         opdata->input_width,
114         opdata->output_height,
115         opdata->output_width,
116         input_data,
117         output_data,
118         threadpool);
119       break;
120     case xnn_operator_type_resize_bilinear_nhwc_f32:
121       return xnn_setup_resize_bilinear2d_nhwc_f32(
122         opdata->operator_object,
123         opdata->batch_size,
124         opdata->input_height,
125         opdata->input_width,
126         opdata->output_height,
127         opdata->output_width,
128         input_data,
129         output_data,
130         threadpool);
131       break;
132 #ifndef XNN_NO_S8_OPERATORS
133     case xnn_operator_type_resize_bilinear_nhwc_s8:
134       return xnn_setup_resize_bilinear2d_nhwc_s8(
135         opdata->operator_object,
136         opdata->batch_size,
137         opdata->input_height,
138         opdata->input_width,
139         opdata->output_height,
140         opdata->output_width,
141         input_data,
142         output_data,
143         threadpool);
144       break;
145 #endif  // !defined(XNN_NO_S8_OPERATORS)
146 #ifndef XNN_NO_U8_OPERATORS
147     case xnn_operator_type_resize_bilinear_nhwc_u8:
148       return xnn_setup_resize_bilinear2d_nhwc_u8(
149         opdata->operator_object,
150         opdata->batch_size,
151         opdata->input_height,
152         opdata->input_width,
153         opdata->output_height,
154         opdata->output_width,
155         input_data,
156         output_data,
157         threadpool);
158       break;
159 #endif  // !defined(XNN_NO_U8_OPERATORS)
160     default:
161       XNN_UNREACHABLE;
162   }
163 }
164 
xnn_define_static_resize_bilinear_2d(xnn_subgraph_t subgraph,size_t new_height,size_t new_width,uint32_t input_id,uint32_t output_id,uint32_t flags)165 enum xnn_status xnn_define_static_resize_bilinear_2d(
166   xnn_subgraph_t subgraph,
167   size_t new_height,
168   size_t new_width,
169   uint32_t input_id,
170   uint32_t output_id,
171   uint32_t flags)
172 {
173   if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
174     xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
175       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d));
176     return xnn_status_uninitialized;
177   }
178 
179   if (new_width == 0 || new_height == 0) {
180     xnn_log_error(
181       "failed to define %s operator with %zux%zu output: output dimensions must be non-zero",
182       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), new_width, new_height);
183     return xnn_status_invalid_parameter;
184   }
185 
186   if (max(new_width, new_height) >= 16777216) {
187     xnn_log_error(
188       "failed to define %s operator with %zux%zu output: output dimensions must be below 2**24",
189       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), new_width, new_height);
190     return xnn_status_unsupported_parameter;
191   }
192 
193   const uint32_t supported_flags = XNN_FLAG_TENSORFLOW_LEGACY_MODE | XNN_FLAG_ALIGN_CORNERS;
194   const uint32_t invalid_flags = flags & ~supported_flags;
195   if (invalid_flags != 0) {
196     xnn_log_error(
197       "failed to define %s operator with 0x%08" PRIx32 " flags: invalid flags 0x%08" PRIx32,
198       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), flags, invalid_flags);
199     return xnn_status_invalid_parameter;
200   }
201 
202   const uint32_t exclusive_flags = XNN_FLAG_TENSORFLOW_LEGACY_MODE | XNN_FLAG_ALIGN_CORNERS;
203   if ((flags & exclusive_flags) == exclusive_flags) {
204     xnn_log_error(
205       "failed to define %s operator with both XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS flags: "
206       "the two flags are mutually exclusive",
207       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d));
208     return xnn_status_invalid_parameter;
209   }
210 
211   if (input_id >= subgraph->num_values) {
212     xnn_log_error(
213       "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
214       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), input_id);
215     return xnn_status_invalid_parameter;
216   }
217 
218   const struct xnn_value* input_value = &subgraph->values[input_id];
219   if (input_value->type != xnn_value_type_dense_tensor) {
220     xnn_log_error(
221       "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
222       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), input_id, input_value->type);
223     return xnn_status_invalid_parameter;
224   }
225 
226   switch (input_value->datatype) {
227     case xnn_datatype_fp32:
228 #ifndef XNN_NO_S8_OPERATORS
229     case xnn_datatype_qint8:
230 #endif  // !defined(XNN_NO_S8_OPERATORS)
231 #ifndef XNN_NO_U8_OPERATORS
232     case xnn_datatype_quint8:
233 #endif  // !defined(XNN_NO_U8_OPERATORS)
234       break;
235     default:
236       xnn_log_error(
237         "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
238         xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), input_id,
239         xnn_datatype_to_string(input_value->datatype), input_value->datatype);
240       return xnn_status_invalid_parameter;
241   }
242 
243   if (output_id >= subgraph->num_values) {
244     xnn_log_error(
245       "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
246       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), output_id);
247     return xnn_status_invalid_parameter;
248   }
249 
250   const struct xnn_value* output_value = &subgraph->values[output_id];
251   if (output_value->type != xnn_value_type_dense_tensor) {
252     xnn_log_error(
253       "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value type %d (expected dense tensor)",
254       xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), output_id, output_value->type);
255     return xnn_status_invalid_parameter;
256   }
257 
258   enum xnn_compute_type compute_type = xnn_compute_type_invalid;
259   switch (output_value->datatype) {
260     case xnn_datatype_fp32:
261       compute_type = xnn_compute_type_fp32;
262       break;
263 #ifndef XNN_NO_S8_OPERATORS
264     case xnn_datatype_qint8:
265       compute_type = xnn_compute_type_qs8;
266       break;
267 #endif  // !defined(XNN_NO_S8_OPERATORS)
268 #ifndef XNN_NO_U8_OPERATORS
269     case xnn_datatype_quint8:
270       compute_type = xnn_compute_type_qu8;
271       break;
272 #endif  // !defined(XNN_NO_U8_OPERATORS)
273       break;
274     default:
275       xnn_log_error(
276         "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
277         xnn_node_type_to_string(xnn_node_type_static_resize_bilinear_2d), output_id,
278         xnn_datatype_to_string(output_value->datatype), output_value->datatype);
279       return xnn_status_invalid_parameter;
280   }
281 
282 #if !defined(XNN_NO_QU8_OPERATORS) || !defined(XNN_NO_QS8_OPERATORS)
283   if (output_value->datatype == xnn_datatype_qint8 || output_value->datatype == xnn_datatype_quint8) {
284     if (input_value->quantization.zero_point != output_value->quantization.zero_point) {
285       xnn_log_error(
286         "failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
287         ": mismatching zero point quantization parameter across input (%"PRId32") and output (%"PRId32")",
288         xnn_node_type_to_string(xnn_node_type_static_constant_pad), input_id, output_id,
289         input_value->quantization.zero_point, output_value->quantization.zero_point);
290       return xnn_status_invalid_parameter;
291     }
292     if (input_value->quantization.scale != output_value->quantization.scale) {
293       xnn_log_error(
294         "failed to define %s operator with input ID #%" PRIu32 " and output ID #%" PRIu32
295         ": mismatching zero point quantization parameter across input (%.7g) and output (%.7g)",
296         xnn_node_type_to_string(xnn_node_type_static_constant_pad), input_id, output_id,
297         input_value->quantization.scale, output_value->quantization.scale);
298       return xnn_status_invalid_parameter;
299     }
300   }
301 #endif  // !defined(XNN_NO_QU8_OPERATORS) || !defined(XNN_NO_QS8_OPERATORS)
302 
303   struct xnn_node* node = xnn_subgraph_new_node(subgraph);
304   if (node == NULL) {
305     return xnn_status_out_of_memory;
306   }
307 
308   node->params.static_resize.new_height = new_height;
309   node->params.static_resize.new_width = new_width;
310 
311   node->type = xnn_node_type_static_resize_bilinear_2d;
312   node->compute_type = compute_type;
313   node->num_inputs = 1;
314   node->inputs[0] = input_id;
315   node->num_outputs = 1;
316   node->outputs[0] = output_id;
317   node->flags = flags;
318 
319   node->create = create_resize_bilinear_operator;
320   node->setup = setup_resize_bilinear_operator;
321 
322   return xnn_status_success;
323 }
324