1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H
26
27 #include "arm_compute/graph/Logger.h"
28 #include "arm_compute/graph/Tensor.h"
29 #include "arm_compute/graph/Types.h"
30 #include "arm_compute/graph/nodes/Nodes.h"
31
32 #include "arm_compute/core/Error.h"
33 #include "arm_compute/core/Helpers.h"
34 #include "arm_compute/core/ITensorInfo.h"
35
36 namespace arm_compute
37 {
38 namespace graph
39 {
40 namespace backends
41 {
42 namespace detail
43 {
44 /** Returns backing tensor info of a given tensor
45 *
46 * @param[in] tensor Tensor to extract the backing tensor from
47 *
48 * @return Backing tensor tensor info if present else nullptr
49 */
get_backing_tensor_info(arm_compute::graph::Tensor * tensor)50 inline arm_compute::ITensorInfo *get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
51 {
52 return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
53 }
54
55 /** Validates a ArgMinMax layer node
56 *
57 * @tparam ArgMinMax layer function type
58 *
59 * @param[in] node Node to validate
60 *
61 * @return Status
62 */
63 template <typename ArgMinMaxLayer>
validate_arg_min_max_layer(ArgMinMaxLayerNode & node)64 Status validate_arg_min_max_layer(ArgMinMaxLayerNode &node)
65 {
66 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
67 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
68 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
69
70 // Extract IO and info
71 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
72 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
73
74 // Validate function
75 return ArgMinMaxLayer::validate(input, node.axis(), output, node.reduction_operation());
76 }
77
78 /** Validates a Bounding Box Transform layer node
79 *
80 * @tparam BoundingBoxTransformLayer Bounding Box Transform layer function type
81 *
82 * @param[in] node Node to validate
83 *
84 * @return Status
85 */
86 template <typename BoundingBoxTransformLayer>
validate_bounding_box_transform_layer(BoundingBoxTransformLayerNode & node)87 Status validate_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
88 {
89 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
90 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
91 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
92
93 // Extract IO and info
94 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
95 arm_compute::ITensorInfo *deltas = get_backing_tensor_info(node.input(1));
96 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
97 const BoundingBoxTransformInfo bbox_info = node.info();
98
99 return BoundingBoxTransformLayer::validate(input, output, deltas, bbox_info);
100 }
101
102 /** Validates a Channel Shuffle layer node
103 *
104 * @tparam ChannelShuffleLayer Channel Shuffle layer function type
105 *
106 * @param[in] node Node to validate
107 *
108 * @return Status
109 */
110 template <typename ChannelShuffleLayer>
validate_channel_shuffle_layer(ChannelShuffleLayerNode & node)111 Status validate_channel_shuffle_layer(ChannelShuffleLayerNode &node)
112 {
113 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
114 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
115 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
116
117 // Extract IO and info
118 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
119 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
120 const unsigned int num_groups = node.num_groups();
121
122 return ChannelShuffleLayer::validate(input, output, num_groups);
123 }
124
125 /** Validates a Convolution layer node
126 *
127 * @tparam ConvolutionLayer Default Convolution layer function type
128 * @tparam DirectConvolutionLayer Direct Convolution layer function type
129 * @tparam GEMMConvolutionLayer GEMM Convolution layer function type
130 * @tparam WinogradConvolutionLayer Winograd Convolution layer function type
131 *
132 * @param[in] node Node to validate
133 *
134 * @return Status
135 */
136 template <typename ConvolutionLayer, typename DirectConvolutionLayer, typename GEMMConvolutionLayer, typename WinogradConvolutionLayer>
validate_convolution_layer(ConvolutionLayerNode & node)137 Status validate_convolution_layer(ConvolutionLayerNode &node)
138 {
139 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
140 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
141 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
142
143 // Extract IO and info
144 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
145 arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
146 arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
147 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
148
149 if(is_data_type_quantized_asymmetric(input->data_type()))
150 {
151 biases->set_data_type(DataType::S32);
152 }
153
154 const PadStrideInfo conv_info = node.convolution_info();
155 const ConvolutionMethod conv_algorithm = node.convolution_method();
156 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
157 const unsigned int num_groups = node.num_groups();
158
159 // Validate function
160 Status status{};
161 switch(conv_algorithm)
162 {
163 case ConvolutionMethod::Direct:
164 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
165 status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
166 break;
167 case ConvolutionMethod::GEMM:
168 status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
169 WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
170 break;
171 case ConvolutionMethod::Winograd:
172 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
173 status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
174 break;
175 case ConvolutionMethod::Default:
176 status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
177 WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
178 break;
179 default:
180 ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
181 }
182
183 return status;
184 }
185
186 /** Validates a Depthwise Convolution layer node
187 *
188 * @tparam DepthwiseConvolutionLayer Default Depthwise Convolution layer type
189 *
190 * @param[in] node Node to validate
191 *
192 * @return Status
193 */
194 template <typename DepthwiseConvolutionLayer>
validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode & node)195 Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
196 {
197 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
198 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
199 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
200
201 // Extract IO and info
202 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
203 arm_compute::ITensorInfo *weights = detail::get_backing_tensor_info(node.input(1));
204 arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
205 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
206
207 const PadStrideInfo conv_info = node.convolution_info();
208 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
209 const int depth_multiplier = node.depth_multiplier();
210
211 // Validate function
212 Status status{};
213 switch(dwc_algorithm)
214 {
215 case DepthwiseConvolutionMethod::Default:
216 case DepthwiseConvolutionMethod::Optimized3x3:
217 status = DepthwiseConvolutionLayer::validate(input, weights, biases, output, conv_info, depth_multiplier);
218 break;
219 default:
220 ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported depthwise convolution method");
221 }
222
223 return status;
224 }
225 /** Validates a depth to space layer node
226 *
227 * @tparam DequantizationLayer Dequantize layer type
228 *
229 * @param[in] node Node to validate
230 *
231 * @return Status
232 */
233 template <typename DepthToSpaceLayer>
validate_depth_to_space_layer(DepthToSpaceLayerNode & node)234 Status validate_depth_to_space_layer(DepthToSpaceLayerNode &node)
235 {
236 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
237 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
238 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
239
240 // Extract IO and info
241 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
242 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
243
244 return DepthToSpaceLayer::validate(input, output, node.block_shape());
245 }
246 /** Validates a dequantize layer node
247 *
248 * @tparam DequantizationLayer Dequantize layer type
249 *
250 * @param[in] node Node to validate
251 *
252 * @return Status
253 */
254 template <typename DequantizationLayer>
validate_dequantization_layer(DequantizationLayerNode & node)255 Status validate_dequantization_layer(DequantizationLayerNode &node)
256 {
257 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
258 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
259 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
260
261 // Extract IO and info
262 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
263 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
264
265 return DequantizationLayer::validate(input, output);
266 }
267 /** Validates a detection output layer node
268 *
269 * @tparam DetectionOutputLayer DetectionOutput layer type
270 *
271 * @param[in] node Node to validate
272 *
273 * @return Status
274 */
275 template <typename DetectionOutputLayer>
validate_detection_output_layer(DetectionOutputLayerNode & node)276 Status validate_detection_output_layer(DetectionOutputLayerNode &node)
277 {
278 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
279 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
280 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
281
282 // Extract IO and info
283 arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
284 arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
285 arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
286 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
287 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
288
289 return DetectionOutputLayer::validate(input0, input1, input2, output, detect_info);
290 }
291 /** Validates a detection post process layer node
292 *
293 * @tparam DetectionPostProcessLayer DetectionOutput layer type
294 *
295 * @param[in] node Node to validate
296 *
297 * @return Status
298 */
299 template <typename DetectionPostProcessLayer>
validate_detection_post_process_layer(DetectionPostProcessLayerNode & node)300 Status validate_detection_post_process_layer(DetectionPostProcessLayerNode &node)
301 {
302 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
303 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
304 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
305
306 // Extract IO and info
307 arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
308 arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
309 arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
310 arm_compute::ITensorInfo *output0 = get_backing_tensor_info(node.output(0));
311 arm_compute::ITensorInfo *output1 = get_backing_tensor_info(node.output(1));
312 arm_compute::ITensorInfo *output2 = get_backing_tensor_info(node.output(2));
313 arm_compute::ITensorInfo *output3 = get_backing_tensor_info(node.output(3));
314 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
315
316 return DetectionPostProcessLayer::validate(input0, input1, input2, output0, output1, output2, output3, detect_info);
317 }
318
319 /** Validates a Generate Proposals layer node
320 *
321 * @tparam GenerateProposalsLayer Generate Proposals layer type
322 *
323 * @param[in] node Node to validate
324 *
325 * @return Status
326 */
327 template <typename GenerateProposalsLayer>
validate_generate_proposals_layer(GenerateProposalsLayerNode & node)328 Status validate_generate_proposals_layer(GenerateProposalsLayerNode &node)
329 {
330 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
331 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
332 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
333
334 // Extract IO and info
335 arm_compute::ITensorInfo *scores = detail::get_backing_tensor_info(node.input(0));
336 arm_compute::ITensorInfo *deltas = detail::get_backing_tensor_info(node.input(1));
337 arm_compute::ITensorInfo *anchors = detail::get_backing_tensor_info(node.input(2));
338 arm_compute::ITensorInfo *proposals = get_backing_tensor_info(node.output(0));
339 arm_compute::ITensorInfo *scores_out = get_backing_tensor_info(node.output(1));
340 arm_compute::ITensorInfo *num_valid_proposals = get_backing_tensor_info(node.output(2));
341 const GenerateProposalsInfo info = node.info();
342
343 return GenerateProposalsLayer::validate(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
344 }
345
346 /** Validates a L2Normalization layer node
347 *
348 * @tparam L2Normalization layer type
349 *
350 * @param[in] node Node to validate
351 *
352 * @return Status
353 */
354 template <typename L2NormalizeLayer>
validate_l2_normalize_layer(L2NormalizeLayerNode & node)355 Status validate_l2_normalize_layer(L2NormalizeLayerNode &node)
356 {
357 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
358 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
359 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
360
361 // Extract IO and info
362 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
363 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
364 int axis = node.axis();
365 float epsilon = node.epsilon();
366
367 // Validate function
368 return L2NormalizeLayer::validate(input, output, axis, epsilon);
369 }
370
371 /** Validates a NormalizePlanarYUV layer node
372 *
373 * @tparam NormalizePlanarYUVLayer layer type
374 *
375 * @param[in] node Node to validate
376 *
377 * @return Status
378 */
379 template <typename NormalizePlanarYUVLayer>
validate_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode & node)380 Status validate_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
381 {
382 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
383 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
384 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
385
386 // Extract IO and info
387 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
388 arm_compute::ITensorInfo *mean = detail::get_backing_tensor_info(node.input(1));
389 arm_compute::ITensorInfo *std = detail::get_backing_tensor_info(node.input(2));
390 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
391
392 // Validate function
393 return NormalizePlanarYUVLayer::validate(input, output, mean, std);
394 }
395
396 /** Validates a pad layer node
397 *
398 * @tparam PadLayer Pad layer type
399 *
400 * @param[in] node Node to validate
401 *
402 * @return Status
403 */
404 template <typename PadLayer>
validate_pad_layer(PadLayerNode & node)405 Status validate_pad_layer(PadLayerNode &node)
406 {
407 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
408 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
409 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
410
411 // Extract IO and info
412 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
413 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
414 const PaddingList &padding = node.padding();
415
416 return PadLayer::validate(input, output, padding);
417 }
418
419 /** Validates a permute layer node
420 *
421 * @tparam PermuteLayer Permute layer type
422 *
423 * @param[in] node Node to validate
424 *
425 * @return Status
426 */
427 template <typename PermuteLayer>
validate_permute_layer(PermuteLayerNode & node)428 Status validate_permute_layer(PermuteLayerNode &node)
429 {
430 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
431 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
432 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
433
434 // Extract IO and info
435 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
436 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
437 const PermutationVector &perm = node.permutation_vector();
438
439 return PermuteLayer::validate(input, output, perm);
440 }
441
442 /** Validates a PRelu layer node
443 *
444 * @tparam PReluLayer PRelu layer type
445 *
446 * @param[in] node Node to validate
447 *
448 * @return Status
449 */
450 template <typename PReluLayer>
validate_prelu_layer(PReluLayerNode & node)451 Status validate_prelu_layer(PReluLayerNode &node)
452 {
453 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
454 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
455 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
456
457 // Extract IO and info
458 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
459 arm_compute::ITensorInfo *alpha = get_backing_tensor_info(node.input(1));
460 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
461
462 return PReluLayer::validate(input, alpha, output);
463 }
464
465 /** Validates a priorbox layer node
466 *
467 * @tparam PriorBoxLayer PriorBox layer type
468 *
469 * @param[in] node Node to validate
470 *
471 * @return Status
472 */
473 template <typename PriorBoxLayer>
validate_priorbox_layer(PriorBoxLayerNode & node)474 Status validate_priorbox_layer(PriorBoxLayerNode &node)
475 {
476 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
477 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
478 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
479
480 // Extract IO and info
481 arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
482 arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
483 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
484 const PriorBoxLayerInfo prior_info = node.priorbox_info();
485
486 return PriorBoxLayer::validate(input0, input1, output, prior_info);
487 }
488
489 /** Validates a Quantization layer node
490 *
491 * @tparam QuantizationLayer Quantization layer type
492 *
493 * @param[in] node Node to validate
494 *
495 * @return Status
496 */
497 template <typename QuantizationLayer>
validate_quantization_layer(QuantizationLayerNode & node)498 Status validate_quantization_layer(QuantizationLayerNode &node)
499 {
500 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
501 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
502 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
503
504 // Extract input and output
505 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
506 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
507
508 // Validate function
509 return QuantizationLayer::validate(input, output);
510 }
511
512 /** Validates a Reduction operation layer node
513 *
514 * @tparam ReductionLayer Reduction layer type
515 *
516 * @param[in] node Node to validate
517 *
518 * @return Status
519 */
520 template <typename ReductionLayer>
validate_reduction_operation_layer(ReductionLayerNode & node)521 Status validate_reduction_operation_layer(ReductionLayerNode &node)
522 {
523 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
524
525 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
526 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
527
528 // Extract input and output
529 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
530 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
531
532 // Validate function
533 return ReductionLayer::validate(input, output, node.axis(), node.op(), node.keep_dims());
534 }
535
536 /** Validates a Reorg layer node
537 *
538 * @tparam ReorgLayer Reorg layer type
539 *
540 * @param[in] node Node to validate
541 *
542 * @return Status
543 */
544 template <typename ReorgLayer>
validate_reorg_layer(ReorgLayerNode & node)545 Status validate_reorg_layer(ReorgLayerNode &node)
546 {
547 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
548 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
549 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
550
551 // Extract input and output
552 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
553 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
554
555 // Validate function
556 return ReorgLayer::validate(input, output, node.stride());
557 }
558
559 /** Validates a Reshape layer node
560 *
561 * @tparam ReshapeLayer Reshape layer type
562 *
563 * @param[in] node Node to validate
564 *
565 * @return Status
566 */
567 template <typename ReshapeLayer>
validate_reshape_layer(ReshapeLayerNode & node)568 Status validate_reshape_layer(ReshapeLayerNode &node)
569 {
570 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
571 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
572 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
573
574 // Extract input and output
575 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
576 arm_compute::ITensorInfo *output = detail::get_backing_tensor_info(node.output(0));
577
578 // Validate function
579 return ReshapeLayer::validate(input, output);
580 }
581
582 /** Validates a ROI Align layer node
583 *
584 * @tparam ROIAlignLayer ROIAlign layer type
585 *
586 * @param[in] node Node to validate
587 *
588 * @return Status
589 */
590 template <typename ROIAlignLayer>
validate_roi_align_layer(ROIAlignLayerNode & node)591 Status validate_roi_align_layer(ROIAlignLayerNode &node)
592 {
593 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
594 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
595 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
596
597 // Extract input and output
598 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
599 arm_compute::ITensorInfo *rois = detail::get_backing_tensor_info(node.input(1));
600 arm_compute::ITensorInfo *output = detail::get_backing_tensor_info(node.output(0));
601 const ROIPoolingLayerInfo &pool_info = node.pooling_info();
602
603 // Validate function
604 return ROIAlignLayer::validate(input, rois, output, pool_info);
605 }
606
607 /** Validates a Slice layer node
608 *
609 * @tparam SliceLayer Slice layer function type
610 *
611 * @param[in] node Node to validate
612 *
613 * @return Status
614 */
615 template <typename SliceLayer>
validate_slice_layer(SliceLayerNode & node)616 Status validate_slice_layer(SliceLayerNode &node)
617 {
618 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
619 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
620 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
621
622 // Extract IO and info
623 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
624 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
625 const Coordinates starts = node.starts();
626 const Coordinates ends = node.ends();
627
628 return SliceLayer::validate(input, output, starts, ends);
629 }
630
631 /** Validates a Strided Slice layer node
632 *
633 * @tparam StridedSliceLayer Strided Slice layer function type
634 *
635 * @param[in] node Node to validate
636 *
637 * @return Status
638 */
639 template <typename StridedSliceLayer>
validate_strided_slice_layer(StridedSliceLayerNode & node)640 Status validate_strided_slice_layer(StridedSliceLayerNode &node)
641 {
642 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
643 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
644 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
645
646 // Extract IO and info
647 arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
648 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
649 const Coordinates starts = node.starts();
650 const Coordinates ends = node.ends();
651 const BiStrides strides = node.strides();
652 const StridedSliceLayerInfo info = node.strided_slice_info();
653
654 return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
655 }
656
657 /** Validates a Upsample layer node
658 *
659 * @tparam UpsampleLayer Upsample layer type
660 *
661 * @param[in] node Node to validate
662 *
663 * @return Status
664 */
665 template <typename UpsampleLayer>
validate_upsample_layer(UpsampleLayerNode & node)666 Status validate_upsample_layer(UpsampleLayerNode &node)
667 {
668 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating UpsampleLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
669 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
670 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
671
672 // Extract input and output
673 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
674 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
675
676 // Validate function
677 return UpsampleLayer::validate(input, output, node.info(), node.upsampling_policy());
678 }
679 /** Validates a YOLO layer node
680 *
681 * @tparam YOLOLayer YOLO layer type
682 *
683 * @param[in] node Node to validate
684 *
685 * @return Status
686 */
687 template <typename YOLOLayer>
validate_yolo_layer(YOLOLayerNode & node)688 Status validate_yolo_layer(YOLOLayerNode &node)
689 {
690 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating YOLOLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
691 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
692 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
693
694 // Extract input and output
695 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
696 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
697
698 // Validate function
699 return YOLOLayer::validate(input, output, node.activation_info(), node.num_classes());
700 }
701 /** Validates a element-wise layer node
702 *
703 * @param[in] node Node to validate
704 *
705 * @return Status
706 */
707 template <typename EltwiseLayerFunctions>
validate_eltwise_Layer(EltwiseLayerNode & node)708 Status validate_eltwise_Layer(EltwiseLayerNode &node)
709 {
710 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
711 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
712 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
713
714 // Extract input and output
715 const arm_compute::ITensorInfo *input1 = detail::get_backing_tensor_info(node.input(0));
716 const arm_compute::ITensorInfo *input2 = detail::get_backing_tensor_info(node.input(1));
717 const arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
718 const EltwiseOperation eltwise_op = node.eltwise_operation();
719 const ConvertPolicy convert_policy = node.convert_policy();
720 const RoundingPolicy round_policy = node.rounding_policy();
721 const ActivationLayerInfo act_info = node.fused_activation();
722 const QuantizationInfo quant_info = node.output_quant_info();
723
724 // Validate function
725 if(eltwise_op == EltwiseOperation::Add)
726 {
727 return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
728 }
729 else if(eltwise_op == EltwiseOperation::Sub)
730 {
731 return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
732 }
733 else if(eltwise_op == EltwiseOperation::Mul)
734 {
735 return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
736 }
737 else if(eltwise_op == EltwiseOperation::Max)
738 {
739 return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
740 }
741 else
742 {
743 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
744 }
745 return Status{};
746 }
747 /** Validates a unary element-wise layer node
748 *
749 * @param[in] node Node to validate
750 *
751 * @return Status
752 */
753 template <typename UnaryEltwiseLayerFunctions>
validate_unary_eltwise_layer(UnaryEltwiseLayerNode & node)754 Status validate_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
755 {
756 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
757 ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
758 ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
759
760 // Extract input and output
761 arm_compute::ITensorInfo *input = detail::get_backing_tensor_info(node.input(0));
762 arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
763 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
764
765 // Validate function
766 if(eltwise_op == UnaryEltwiseOperation::Exp)
767 {
768 return UnaryEltwiseLayerFunctions::ExpLayer::validate(input, output);
769 }
770 else
771 {
772 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
773 }
774
775 return Status{};
776 }
777 } // namespace detail
778 } // namespace backends
779 } // namespace graph
780 } // namespace arm_compute
781
782 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_VALIDATE_HELPERS_H */
783