• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <xnnpack.h>
11 #include <xnnpack/log.h>
12 #include <xnnpack/operator.h>
13 #include <xnnpack/params.h>
14 #include <xnnpack/subgraph.h>
15 #include <xnnpack/subgraph-validation.h>
16 
17 
create_average_pooling_operator(const struct xnn_node * node,const struct xnn_value * values,size_t num_values,struct xnn_operator_data * opdata,const struct xnn_caches * caches)18 static enum xnn_status create_average_pooling_operator(
19   const struct xnn_node* node,
20   const struct xnn_value* values,
21   size_t num_values,
22   struct xnn_operator_data* opdata,
23   const struct xnn_caches* caches)
24 {
25   assert(node->num_inputs == 1);
26   const uint32_t input_id = node->inputs[0];
27   assert(input_id != XNN_INVALID_VALUE_ID);
28   assert(input_id < num_values);
29 
30   assert(node->num_outputs == 1);
31   const uint32_t output_id = node->outputs[0];
32   assert(output_id != XNN_INVALID_VALUE_ID);
33   assert(output_id < num_values);
34 
35   const size_t channel_dim = values[input_id].shape.dim[3];
36   assert(channel_dim == values[output_id].shape.dim[3]);
37 
38   enum xnn_status status;
39   switch (node->compute_type) {
40 #ifndef XNN_NO_F16_OPERATORS
41     case xnn_compute_type_fp16:
42       status = xnn_create_average_pooling2d_nhwc_f16(
43         node->params.pooling_2d.padding_top,
44         node->params.pooling_2d.padding_right,
45         node->params.pooling_2d.padding_bottom,
46         node->params.pooling_2d.padding_left,
47         node->params.pooling_2d.pooling_height,
48         node->params.pooling_2d.pooling_width,
49         node->params.pooling_2d.stride_height,
50         node->params.pooling_2d.stride_width,
51         channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
52         node->activation.output_min,
53         node->activation.output_max,
54         node->flags,
55         &opdata->operator_objects[0]);
56       break;
57 #endif  // !defined(XNN_NO_F16_OPERATORS)
58     case xnn_compute_type_fp32:
59       status = xnn_create_average_pooling2d_nhwc_f32(
60         node->params.pooling_2d.padding_top,
61         node->params.pooling_2d.padding_right,
62         node->params.pooling_2d.padding_bottom,
63         node->params.pooling_2d.padding_left,
64         node->params.pooling_2d.pooling_height,
65         node->params.pooling_2d.pooling_width,
66         node->params.pooling_2d.stride_height,
67         node->params.pooling_2d.stride_width,
68         channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
69         node->activation.output_min,
70         node->activation.output_max,
71         node->flags,
72         &opdata->operator_objects[0]);
73       break;
74     default:
75       XNN_UNREACHABLE;
76   }
77   if (status == xnn_status_success) {
78     opdata->batch_size = values[input_id].shape.dim[0];
79     opdata->input_height = values[input_id].shape.dim[1];
80     opdata->input_width = values[input_id].shape.dim[2];
81     opdata->inputs[0] = input_id;
82     opdata->outputs[0] = output_id;
83   }
84   return status;
85 }
86 
setup_average_pooling_operator(const struct xnn_operator_data * opdata,const struct xnn_blob * blobs,size_t num_blobs,pthreadpool_t threadpool)87 static enum xnn_status setup_average_pooling_operator(
88   const struct xnn_operator_data* opdata,
89   const struct xnn_blob* blobs,
90   size_t num_blobs,
91   pthreadpool_t threadpool)
92 {
93   const uint32_t input_id = opdata->inputs[0];
94   assert(input_id != XNN_INVALID_VALUE_ID);
95   assert(input_id < num_blobs);
96 
97   const uint32_t output_id = opdata->outputs[0];
98   assert(output_id != XNN_INVALID_VALUE_ID);
99   assert(output_id < num_blobs);
100 
101   const struct xnn_blob* input_blob = blobs + input_id;
102   const void* input_data = input_blob->data;
103   assert(input_data != NULL);
104 
105   const struct xnn_blob* output_blob = blobs + output_id;
106   void* output_data = output_blob->data;
107   assert(output_data != NULL);
108 
109   switch (opdata->operator_objects[0]->type) {
110 #ifndef XNN_NO_F16_OPERATORS
111     case xnn_operator_type_average_pooling_nhwc_f16:
112       return xnn_setup_average_pooling2d_nhwc_f16(
113         opdata->operator_objects[0],
114         opdata->batch_size,
115         opdata->input_height,
116         opdata->input_width,
117         input_data,
118         output_data,
119         threadpool);
120 #endif  // !defined(XNN_NO_F16_OPERATORS)
121     case xnn_operator_type_average_pooling_nhwc_f32:
122       return xnn_setup_average_pooling2d_nhwc_f32(
123         opdata->operator_objects[0],
124         opdata->batch_size,
125         opdata->input_height,
126         opdata->input_width,
127         input_data,
128         output_data,
129         threadpool);
130     default:
131       XNN_UNREACHABLE;
132   }
133 }
134 
xnn_define_average_pooling_2d(xnn_subgraph_t subgraph,uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t pooling_height,uint32_t pooling_width,uint32_t stride_height,uint32_t stride_width,float output_min,float output_max,uint32_t input_id,uint32_t output_id,uint32_t flags)135 enum xnn_status xnn_define_average_pooling_2d(
136   xnn_subgraph_t subgraph,
137   uint32_t input_padding_top,
138   uint32_t input_padding_right,
139   uint32_t input_padding_bottom,
140   uint32_t input_padding_left,
141   uint32_t pooling_height,
142   uint32_t pooling_width,
143   uint32_t stride_height,
144   uint32_t stride_width,
145   float output_min,
146   float output_max,
147   uint32_t input_id,
148   uint32_t output_id,
149   uint32_t flags)
150 {
151   enum xnn_status status;
152   if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_average_pooling_2d)) != xnn_status_success) {
153     return status;
154   }
155 
156   const uint32_t pooling_size = pooling_height * pooling_width;
157   if (pooling_size == 0) {
158     xnn_log_error(
159       "failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
160       "pooling size dimensions must be non-zero",
161       xnn_node_type_to_string(xnn_node_type_average_pooling_2d), pooling_width, pooling_height);
162     return xnn_status_invalid_parameter;
163   }
164 
165   if (pooling_size == 1) {
166     xnn_log_error(
167       "failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
168       xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
169     return xnn_status_invalid_parameter;
170   }
171 
172   if (stride_height == 0 || stride_width == 0) {
173     xnn_log_error(
174       "failed to define %s operator with %" PRIu32 "x%" PRIu32 " stride: "
175       "stride dimensions must be non-zero",
176       xnn_node_type_to_string(xnn_node_type_average_pooling_2d), stride_width, stride_height);
177     return xnn_status_invalid_parameter;
178   }
179 
180   if (stride_height > pooling_height) {
181     xnn_log_error(
182       "failed to define %s operator with %" PRIu32 " stride height: must be less than pooling height %" PRIu32,
183       xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_height, pooling_height);
184     return xnn_status_invalid_parameter;
185   }
186 
187   if (stride_width > pooling_width) {
188     xnn_log_error(
189       "failed to define %s operator with %" PRIu32 " stride width: must be less than pooling width %" PRIu32,
190       xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_width, pooling_width);
191     return xnn_status_invalid_parameter;
192   }
193 
194   status = xnn_subgraph_check_output_min_max(xnn_node_type_average_pooling_2d, output_min, output_max);
195   if (status != xnn_status_success) {
196     return status;
197   }
198 
199   const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
200   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
201     if (any_padding) {
202       xnn_log_error(
203         "failed to define %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
204         "TensorFlow SAME padding can't be combined with explicit padding specification",
205         xnn_node_type_to_string(xnn_node_type_average_pooling_2d),
206         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
207       return xnn_status_invalid_parameter;
208     }
209   }
210 
211   if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_average_pooling_2d, input_id, subgraph->num_values)) !=
212       xnn_status_success) {
213     return status;
214   }
215 
216   const struct xnn_value* input_value = &subgraph->values[input_id];
217   status = xnn_subgraph_check_input_type_dense(xnn_node_type_average_pooling_2d, input_id, input_value);
218   if (status != xnn_status_success) {
219     return status;
220   }
221 
222   switch (input_value->datatype) {
223     case xnn_datatype_fp32:
224       break;
225     default:
226       xnn_log_error(
227         "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
228         xnn_node_type_to_string(xnn_node_type_average_pooling_2d), input_id,
229         xnn_datatype_to_string(input_value->datatype), input_value->datatype);
230       return xnn_status_invalid_parameter;
231   }
232 
233   status = xnn_subgraph_check_output_node_id(xnn_node_type_average_pooling_2d, output_id, subgraph->num_values);
234   if (status != xnn_status_success) {
235     return status;
236   }
237 
238   const struct xnn_value* output_value = &subgraph->values[output_id];
239   status = xnn_subgraph_check_output_type_dense(xnn_node_type_average_pooling_2d, output_id, output_value);
240   if (status != xnn_status_success) {
241     return status;
242   }
243 
244   switch (output_value->datatype) {
245     case xnn_datatype_fp32:
246       break;
247     default:
248       xnn_log_error(
249         "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
250         xnn_node_type_to_string(xnn_node_type_average_pooling_2d), output_id,
251         xnn_datatype_to_string(output_value->datatype), output_value->datatype);
252       return xnn_status_invalid_parameter;
253   }
254 
255   struct xnn_node* node = xnn_subgraph_new_node(subgraph);
256   if (node == NULL) {
257     return xnn_status_out_of_memory;
258   }
259 
260   node->type = xnn_node_type_average_pooling_2d;
261   node->compute_type = xnn_compute_type_fp32;
262   node->params.pooling_2d.padding_top = input_padding_top;
263   node->params.pooling_2d.padding_right = input_padding_right;
264   node->params.pooling_2d.padding_bottom = input_padding_bottom;
265   node->params.pooling_2d.padding_left = input_padding_left;
266   node->params.pooling_2d.pooling_height = pooling_height;
267   node->params.pooling_2d.pooling_width = pooling_width;
268   node->params.pooling_2d.stride_height = stride_height;
269   node->params.pooling_2d.stride_width = stride_width;
270   node->activation.output_min = output_min;
271   node->activation.output_max = output_max;
272   node->num_inputs = 1;
273   node->inputs[0] = input_id;
274   node->num_outputs = 1;
275   node->outputs[0] = output_id;
276   node->flags = flags;
277 
278   node->create = create_average_pooling_operator;
279   node->setup = setup_average_pooling_operator;
280 
281   return xnn_status_success;
282 }
283