• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
26 
27 #include "arm_compute/graph/Logger.h"
28 #include "arm_compute/graph/Tensor.h"
29 #include "arm_compute/graph/TypePrinter.h"
30 #include "arm_compute/graph/Types.h"
31 #include "arm_compute/graph/Utils.h"
32 #include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
33 #include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
34 #include "arm_compute/graph/backends/Utils.h"
35 #include "arm_compute/graph/nodes/Nodes.h"
36 
37 #include "arm_compute/core/Error.h"
38 #include "arm_compute/core/Helpers.h"
39 #include "arm_compute/core/ITensorInfo.h"
40 #include "support/Cast.h"
41 
42 namespace arm_compute
43 {
44 namespace graph
45 {
46 namespace backends
47 {
48 namespace detail
49 {
50 // Address rule DR-9R5 (1579. Return by converting move constructor)
51 #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52 #define RETURN_UNIQUE_PTR(x) (x)
53 #else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54 #define RETURN_UNIQUE_PTR(x) (std::move(x))
55 #endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56 
57 /** Returns backing tensor of a given tensor
58  *
59  * @tparam TargetInfo Target information
60  *
61  * @param[in] tensor Tensor to extract the backing tensor from
62  *
63  * @return Backing tensor if present else nullptr
64  */
65 template <typename TargetInfo>
get_backing_tensor(arm_compute::graph::Tensor * tensor)66 typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67 {
68     typename TargetInfo::TensorType *backing_tensor = nullptr;
69     if(tensor != nullptr)
70     {
71         ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72         // Get backing tensor handle
73         ITensorHandle *tensor_handle = tensor->handle();
74         // Get backing tensor
75         backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76     }
77 
78     return backing_tensor;
79 }
80 
81 template <typename TargetInfo>
validate_node(const INode & node,size_t num_expected_inputs,size_t num_expected_outputs)82 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83 {
84     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
85                                   << " Target: " << TargetInfo::TargetType
86                                   << " ID: " << node.id()
87                                   << node.name()
88                                   << std::endl);
89 
90     ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91     ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92     ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
93     ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
94 }
95 
96 /** Creates a backend activation layer function
97  *
98  * @tparam ActivationLayerFunction Backend activation function
99  * @tparam TargetInfo              Target-specific information
100  *
101  * @param[in] node Node to create the backend function for
102  *
103  * @return Backend activation layer function
104  */
105 template <typename ActivationLayerFunction, typename TargetInfo>
create_activation_layer(ActivationLayerNode & node)106 std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107 {
108     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109 
110     // Extract IO and info
111     typename TargetInfo::TensorType *input    = get_backing_tensor<TargetInfo>(node.input(0));
112     typename TargetInfo::TensorType *output   = get_backing_tensor<TargetInfo>(node.output(0));
113     const ActivationLayerInfo        act_info = node.activation_info();
114 
115     // Create function
116     auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117     func->configure(input, output, act_info);
118 
119     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120                                << node.name()
121                                << " Type: " << node.type()
122                                << " Target: " << TargetInfo::TargetType
123                                << " Data Type: " << input->info()->data_type()
124                                << " Shape: " << input->info()->tensor_shape()
125                                << " Activation function: " << act_info.activation()
126                                << " a: " << act_info.a()
127                                << " b: " << act_info.b()
128                                << " InPlace : " << is_in_place_operation(input, output)
129                                << std::endl);
130 
131     return RETURN_UNIQUE_PTR(func);
132 }
133 
134 /** Creates a backend argminmax layer function
135  *
136  * @tparam ArgMinMaxLayerFunction Backend activation function
137  * @tparam TargetInfo             Target-specific information
138  *
139  * @param[in] node Node to create the backend function for
140  *
141  * @return Backend argminmax layer function
142  */
143 template <typename ArgMinMaxLayerFunction, typename TargetInfo>
create_arg_min_max_layer(ArgMinMaxLayerNode & node)144 std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
145 {
146     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
147 
148     // Extract IO and info
149     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
150     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
151     const ReductionOperation         op     = node.reduction_operation();
152     unsigned int                     axis   = node.axis();
153 
154     // Create function
155     auto func = support::cpp14::make_unique<ArgMinMaxLayerFunction>();
156     func->configure(input, axis, output, op);
157 
158     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
159                                << node.name()
160                                << " Type: " << node.type()
161                                << " Target: " << TargetInfo::TargetType
162                                << " Data Type: " << input->info()->data_type()
163                                << " Shape: " << input->info()->tensor_shape()
164                                << " Reduction Operation: " << op
165                                << " axis: " << axis
166                                << std::endl);
167 
168     return RETURN_UNIQUE_PTR(func);
169 }
170 
171 /** Create a backend batch normalization layer function
172  *
173  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
174  * @tparam TargetInfo                      Target-specific information
175  *
176  * @param[in] node Node to create the backend function for
177  *
178  * @return Backend batch normalization layer function
179  */
180 template <typename BatchNormalizationLayerFunction, typename TargetInfo>
create_batch_normalization_layer(BatchNormalizationLayerNode & node)181 std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
182 {
183     validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
184 
185     // Extract IO and info
186     typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
187     typename TargetInfo::TensorType *mean  = get_backing_tensor<TargetInfo>(node.input(1));
188     typename TargetInfo::TensorType *var   = get_backing_tensor<TargetInfo>(node.input(2));
189     typename TargetInfo::TensorType *beta  = get_backing_tensor<TargetInfo>(node.input(3));
190     typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
191 
192     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
193     const float                      epsilon   = node.epsilon();
194     const ActivationLayerInfo        fused_act = node.fused_activation();
195 
196     // Create and configure function
197     auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
198     func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
199 
200     // Log info
201     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
202                                << node.name()
203                                << " Type: " << node.type()
204                                << " Target: " << TargetInfo::TargetType
205                                << " Data Type: " << input->info()->data_type()
206                                << " Shape: " << input->info()->tensor_shape()
207                                << " Epsilon: " << epsilon << " "
208                                << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
209                                << " InPlace: " << is_in_place_operation(input, output)
210                                << std::endl);
211 
212     return RETURN_UNIQUE_PTR(func);
213 }
214 
215 /** Create a backend batch normalization layer function
216  *
217  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
218  * @tparam TargetInfo                      Target-specific information
219  *
220  * @param[in] node Node to create the backend function for
221  * @param[in] ctx  Graph context
222  *
223  * @return Backend batch normalization layer function
224  */
225 template <typename FusedLayerTypes, typename TargetInfo>
create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode & node,GraphContext & ctx)226 std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
227 {
228     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
229 
230     // Extract IO and info
231     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
232     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
233     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
234     typename TargetInfo::TensorType *mean    = get_backing_tensor<TargetInfo>(node.input(3));
235     typename TargetInfo::TensorType *var     = get_backing_tensor<TargetInfo>(node.input(4));
236     typename TargetInfo::TensorType *beta    = get_backing_tensor<TargetInfo>(node.input(5));
237     typename TargetInfo::TensorType *gamma   = get_backing_tensor<TargetInfo>(node.input(6));
238 
239     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
240 
241     const PadStrideInfo       conv_info  = node.convolution_info();
242     const unsigned int        num_groups = node.num_groups();
243     const bool                fast_math  = node.fast_math_hint() == FastMathHint::Enabled;
244     const ActivationLayerInfo fused_act  = node.fused_activation();
245     const float               epsilon    = node.epsilon();
246 
247     // Create and configure function (we assume that functions have been validated before creation)
248     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
249     std::unique_ptr<IFunction>      func;
250     std::string                     func_name;
251 
252     using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
253 
254     // Create and configure function
255     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
256                                     std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
257 
258     // Log info
259     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
260                                << node.name()
261                                << " Type: " << node.type()
262                                << " Target: " << TargetInfo::TargetType
263                                << " Data Type: " << input->info()->data_type()
264                                << " Input shape: " << input->info()->tensor_shape()
265                                << " Weights shape: " << weights->info()->tensor_shape()
266                                << " Output shape: " << output->info()->tensor_shape()
267                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
268                                << std::endl);
269     return RETURN_UNIQUE_PTR(func);
270 }
271 
272 /** Create a backend fused depthwise convolution batch normalization layer function
273  *
274  * @tparam FusedLayerTypes             Fused layer types
275  * @tparam TargetInfo                  Target-specific information
276  *
277  * @param[in] node Node to create the backend function for
278  * @param[in] ctx  Graph context
279  *
280  * @return Backend fused depthwise convolution batch normalization layer function
281  */
282 template <typename FusedLayerTypes, typename TargetInfo>
create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode & node,GraphContext & ctx)283 std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
284 {
285     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
286 
287     // Extract IO and info
288     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
289     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
290     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
291     typename TargetInfo::TensorType *mean    = get_backing_tensor<TargetInfo>(node.input(3));
292     typename TargetInfo::TensorType *var     = get_backing_tensor<TargetInfo>(node.input(4));
293     typename TargetInfo::TensorType *beta    = get_backing_tensor<TargetInfo>(node.input(5));
294     typename TargetInfo::TensorType *gamma   = get_backing_tensor<TargetInfo>(node.input(6));
295 
296     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
297 
298     const PadStrideInfo       conv_info        = node.convolution_info();
299     const unsigned int        depth_multiplier = node.depth_multiplier();
300     const ActivationLayerInfo fused_act        = node.fused_activation();
301     const float               epsilon          = node.epsilon();
302 
303     // Create and configure function (we assume that functions have been validated before creation)
304     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
305     std::unique_ptr<IFunction>      func;
306     std::string                     func_name;
307 
308     using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
309 
310     // Create and configure function
311     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
312                                     std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
313 
314     // Log info
315     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
316                                << node.name()
317                                << " Type: " << node.type()
318                                << " Target: " << TargetInfo::TargetType
319                                << " Data Type: " << input->info()->data_type()
320                                << " Input shape: " << input->info()->tensor_shape()
321                                << " Weights shape: " << weights->info()->tensor_shape()
322                                << " Output shape: " << output->info()->tensor_shape()
323                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
324                                << std::endl);
325     return RETURN_UNIQUE_PTR(func);
326 }
327 
328 /** Create a backend bounding box transform layer function
329  *
330  * @tparam BoundingBoxTransformLayerFunction    Backend bounding box transform function
331  * @tparam TargetInfo                           Target-specific information
332  *
333  * @param[in] node Node to create the backend function for
334  *
335  * @return Backend bounding box transform layer function
336  */
337 template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
create_bounding_box_transform_layer(BoundingBoxTransformLayerNode & node)338 std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
339 {
340     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
341 
342     // Extract IO and info
343     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
344     typename TargetInfo::TensorType *deltas    = get_backing_tensor<TargetInfo>(node.input(1));
345     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
346     const BoundingBoxTransformInfo   bbox_info = node.info();
347 
348     // Create and configure function
349     auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
350     func->configure(input, output, deltas, bbox_info);
351 
352     // Log info
353     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
354                                << node.name()
355                                << " Type: " << node.type()
356                                << " Target: " << TargetInfo::TargetType
357                                << " Data Type: " << input->info()->data_type()
358                                << " Shape: " << input->info()->tensor_shape()
359                                << " BoundingBox Info img W: " << bbox_info.img_width() << " "
360                                << " BoundingBox Info img H: " << bbox_info.img_height() << " "
361                                << std::endl);
362 
363     return RETURN_UNIQUE_PTR(func);
364 }
365 
366 /** Create a backend channel shuffle layer function
367  *
368  * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
369  * @tparam TargetInfo                  Target-specific information
370  *
371  * @param[in] node Node to create the backend function for
372  *
373  * @return Backend channel shuffle layer function
374  */
375 template <typename ChannelShuffleLayerFunction, typename TargetInfo>
create_channel_shuffle_layer(ChannelShuffleLayerNode & node)376 std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
377 {
378     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
379 
380     // Extract IO and info
381     typename TargetInfo::TensorType *input      = get_backing_tensor<TargetInfo>(node.input(0));
382     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
383     const unsigned int               num_groups = node.num_groups();
384 
385     // Create function
386     auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
387     func->configure(input, output, num_groups);
388 
389     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
390                                << node.name()
391                                << " Type: " << node.type()
392                                << " Target: " << TargetInfo::TargetType
393                                << " Data Type: " << input->info()->data_type()
394                                << " Shape: " << input->info()->tensor_shape()
395                                << " Num groups: " << num_groups
396                                << std::endl);
397 
398     return RETURN_UNIQUE_PTR(func);
399 }
400 
401 /** Create a backend layer concatenate function
402  *
403  * @tparam ConcatenateLayerFunction Backend concatenate function
404  * @tparam TargetInfo               Target-specific information
405  *
406  * @param[in] node Node to create the backend function for
407  *
408  * @return Backend concatenate layer function
409  */
410 template <typename ConcatenateLayerFunction, typename TargetInfo>
create_concatenate_layer(ConcatenateLayerNode & node)411 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
412 {
413     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
414     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
415 
416     // Return nullptr if depth concatenate is switched off
417     if(!node.is_enabled())
418     {
419         return nullptr;
420     }
421 
422     // Extract IO and info
423     std::vector<typename TargetInfo::SrcTensorType *> inputs;
424     for(unsigned int i = 0; i < node.num_inputs(); ++i)
425     {
426         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
427     }
428     typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
429     const DataLayout                 data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
430     const size_t                     concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
431 
432     // Create and configure function
433     auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
434     func->configure(inputs, output, concat_axis);
435 
436     // Log info
437     const bool         is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
438     std::ostringstream qss;
439     if(is_quantized)
440     {
441         qss << " Output QuantInfo: " << output->info()->quantization_info();
442     }
443     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
444                                << node.name()
445                                << " Type: " << node.type()
446                                << " Target: " << TargetInfo::TargetType
447                                << " Data Type: " << output->info()->data_type()
448                                << " Shape: " << output->info()->tensor_shape()
449                                << " Num Inputs: " << inputs.size()
450                                << " Axis: " << concat_axis
451                                << qss.str()
452                                << std::endl);
453 
454     return RETURN_UNIQUE_PTR(func);
455 }
456 
457 /** Create a backend convolution layer function
458  *
459  * @tparam ConvolutionLayerFunctions Backend convolution functions
460  * @tparam TargetInfo              Target-specific information
461  *
462  * @param[in] node Node to create the backend function for
463  * @param[in] ctx  Graph context
464  *
465  * @return Backend convolution layer function
466  */
467 template <typename ConvolutionLayerFunctions, typename TargetInfo>
create_convolution_layer(ConvolutionLayerNode & node,GraphContext & ctx)468 std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
469 {
470     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
471 
472     // Extract IO and info
473     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
474     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
475     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
476     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
477 
478     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
479 
480     if(is_quantized)
481     {
482         biases->info()->set_data_type(DataType::S32);
483     }
484 
485     const PadStrideInfo       conv_info      = node.convolution_info();
486     const unsigned int        num_groups     = node.num_groups();
487     const ConvolutionMethod   conv_algorithm = node.convolution_method();
488     const bool                fast_math      = node.fast_math_hint() == FastMathHint::Enabled;
489     const ActivationLayerInfo fused_act      = node.fused_activation();
490 
491     // Create and configure function (we assume that functions have been validated before creation)
492     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
493     std::unique_ptr<IFunction>      func;
494     std::string                     func_name;
495 
496     if(conv_algorithm == ConvolutionMethod::Winograd)
497     {
498         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
499         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
500                                         std::string("WinogradConvolutionLayer"), mm,
501                                         input, weights, biases, output, conv_info, fused_act, fast_math);
502     }
503     else if(conv_algorithm == ConvolutionMethod::Direct)
504     {
505         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
506         std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
507                                         std::string("DirectConvolutionLayer"),
508                                         input, weights, biases, output, conv_info, fused_act);
509     }
510     else if(conv_algorithm == ConvolutionMethod::GEMM)
511     {
512         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
513                                         std::string("GEMMConvolutionLayer"), mm,
514                                         input, weights, biases, output, conv_info,
515                                         WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
516     }
517     else
518     {
519         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
520                                         std::string("GenericConvolutionLayer"), mm,
521                                         input, weights, biases, output, conv_info,
522                                         WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
523     }
524 
525     // Log info
526     std::ostringstream qss;
527     if(is_quantized)
528     {
529         qss << " Input QuantInfo: " << input->info()->quantization_info()
530             << " Weights QuantInfo: " << weights->info()->quantization_info()
531             << " Output QuantInfo: " << output->info()->quantization_info();
532     }
533     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
534                                << node.name()
535                                << " Type: " << func_name
536                                << " Target: " << TargetInfo::TargetType
537                                << " Data Type: " << input->info()->data_type()
538                                << " Groups: " << num_groups
539                                << " Input shape: " << input->info()->tensor_shape()
540                                << " Weights shape: " << weights->info()->tensor_shape()
541                                << " Output shape: " << output->info()->tensor_shape()
542                                << qss.str()
543                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
544                                << std::endl);
545     return RETURN_UNIQUE_PTR(func);
546 }
547 
548 /** Create a backend deconvolution layer function
549  *
550  * @tparam DeconvolutionLayerFunction Backend deconvolution function
551  * @tparam TargetInfo                 Target-specific information
552  *
553  * @param[in] node Node to create the backend function for
554  * @param[in] ctx  Graph context
555  *
556  * @return Backend deconvolution layer function
557  */
558 template <typename DeconvolutionLayerFunction, typename TargetInfo>
create_deconvolution_layer(DeconvolutionLayerNode & node,GraphContext & ctx)559 std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
560 {
561     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
562 
563     // Extract IO and info
564     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
565     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
566     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
567     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
568 
569     const PadStrideInfo deconv_info = node.deconvolution_info();
570 
571     // Create and configure function (we assume that functions have been validated before creation)
572     std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
573     std::unique_ptr<IFunction>      func;
574 
575     std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
576                                       std::string(), mm,
577                                       input, weights, biases, output, deconv_info);
578 
579     // Log info
580     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
581                                << node.name()
582                                << " Type: " << node.type()
583                                << " Target: " << TargetInfo::TargetType
584                                << " Data Type: " << input->info()->data_type()
585                                << " Input shape: " << input->info()->tensor_shape()
586                                << " Weights shape: " << weights->info()->tensor_shape()
587                                << " Output shape: " << output->info()->tensor_shape()
588                                << std::endl);
589     return func;
590 }
591 
592 /** Create a backend layer depth-wise convolution function
593  *
594  * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
595  * @tparam TargetInfo                         Target-specific information
596  *
597  * @param[in] node Node to create the backend function for
598  *
599  * @return Backend depth-wise convolution layer function
600  */
601 template <typename DepthwiseConvolutionLayer, typename TargetInfo>
create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode & node)602 std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
603 {
604     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
605 
606     // Extract IO and info
607     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
608     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
609     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
610     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
611 
612     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
613 
614     if(is_quantized)
615     {
616         biases->info()->set_data_type(DataType::S32);
617     }
618 
619     const PadStrideInfo       conv_info        = node.convolution_info();
620     const unsigned int        depth_multiplier = node.depth_multiplier();
621     const ActivationLayerInfo fused_act        = node.fused_activation();
622 
623     // Create and configure function (we assume that functions have been validated before creation)
624     std::unique_ptr<IFunction> func;
625     std::string                func_name;
626 
627     std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
628                                     std::string("DepthwiseConvolutionLayer"),
629                                     input, weights, biases, output, conv_info, depth_multiplier, fused_act);
630 
631     // Log info
632     std::ostringstream qss;
633     if(is_quantized)
634     {
635         qss << " Input QuantInfo: " << input->info()->quantization_info()
636             << " Weights QuantInfo: " << weights->info()->quantization_info()
637             << " Output QuantInfo: " << output->info()->quantization_info();
638     }
639     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
640                                << node.name()
641                                << " Type: " << func_name
642                                << " Target: " << TargetInfo::TargetType
643                                << " Data Type: " << input->info()->data_type()
644                                << " Input shape: " << input->info()->tensor_shape()
645                                << " Weights shape: " << weights->info()->tensor_shape()
646                                << " Output shape: " << output->info()->tensor_shape()
647                                << " Depth multiplier: " << depth_multiplier
648                                << qss.str()
649                                << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
650                                << std::endl);
651     return RETURN_UNIQUE_PTR(func);
652 }
653 
654 /** Create a backend depth to space layer function
655  *
656  * @tparam DepthToSpaceLayerNode Function Backend depth to space function
657  * @tparam TargetInfo            Target-specific information
658  *
659  * @param[in] node Node to create the backend function for
660  *
661  * @return Backend depth to space layer function
662  */
663 template <typename DepthToSpaceLayerFunction, typename TargetInfo>
create_depth_to_space_layer(DepthToSpaceLayerNode & node)664 std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
665 {
666     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
667 
668     // Extract IO and info
669     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
670     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
671 
672     ARM_COMPUTE_ERROR_ON(input == nullptr);
673     ARM_COMPUTE_ERROR_ON(output == nullptr);
674 
675     // Create and configure function
676     auto func = support::cpp14::make_unique<DepthToSpaceLayerFunction>();
677     func->configure(input, output, node.block_shape());
678 
679     // Log info
680     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
681                                << node.name()
682                                << " Type: " << node.type()
683                                << " Target: " << TargetInfo::TargetType
684                                << " Data Type: " << input->info()->data_type()
685                                << " Input shape: " << input->info()->tensor_shape()
686                                << " Block Size: " << node.block_shape()
687                                << " Output shape: " << output->info()->tensor_shape()
688                                << std::endl);
689 
690     return RETURN_UNIQUE_PTR(func);
691 }
692 
693 /** Create a backend dequantize layer function
694  *
695  * @tparam DequantizationLayer Function Backend dequantize function
696  * @tparam TargetInfo          Target-specific information
697  *
698  * @param[in] node Node to create the backend function for
699  *
700  * @return Backend dequantize layer function
701  */
702 template <typename DequantizationLayerFunction, typename TargetInfo>
create_dequantization_layer(DequantizationLayerNode & node)703 std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
704 {
705     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
706 
707     // Extract IO and info
708     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
709     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
710 
711     ARM_COMPUTE_ERROR_ON(input == nullptr);
712     ARM_COMPUTE_ERROR_ON(output == nullptr);
713 
714     // Create and configure function
715     auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
716     func->configure(input, output);
717 
718     // Log info
719     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
720                                << node.name()
721                                << " Type: " << node.type()
722                                << " Target: " << TargetInfo::TargetType
723                                << " Data Type: " << input->info()->data_type()
724                                << " Input shape: " << input->info()->tensor_shape()
725                                << " Input quantization info: " << output->info()->quantization_info()
726                                << " Output shape: " << output->info()->tensor_shape()
727                                << std::endl);
728 
729     return RETURN_UNIQUE_PTR(func);
730 }
731 /** Create a backend detection output layer function
732  *
733  * @tparam DetectionOutputLayer Function Backend detection output function
734  * @tparam TargetInfo           Target-specific information
735  *
736  * @param[in] node Node to create the backend function for
737  *
738  * @return Backend detection output layer function
739  */
740 template <typename DetectionOutputLayerFunction, typename TargetInfo>
create_detection_output_layer(DetectionOutputLayerNode & node)741 std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
742 {
743     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
744 
745     // Extract IO and info
746     typename TargetInfo::TensorType *input0      = get_backing_tensor<TargetInfo>(node.input(0));
747     typename TargetInfo::TensorType *input1      = get_backing_tensor<TargetInfo>(node.input(1));
748     typename TargetInfo::TensorType *input2      = get_backing_tensor<TargetInfo>(node.input(2));
749     typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
750     const DetectionOutputLayerInfo   detect_info = node.detection_output_info();
751 
752     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
753     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
754     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
755     ARM_COMPUTE_ERROR_ON(output == nullptr);
756 
757     // Create and configure function
758     auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
759     func->configure(input0, input1, input2, output, detect_info);
760 
761     // Log info
762     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
763                                << node.name()
764                                << " Type: " << node.type()
765                                << " Target: " << TargetInfo::TargetType
766                                << " Data Type: " << input0->info()->data_type()
767                                << " Input0 shape: " << input0->info()->tensor_shape()
768                                << " Input1 shape: " << input1->info()->tensor_shape()
769                                << " Input2 shape: " << input2->info()->tensor_shape()
770                                << " Output shape: " << output->info()->tensor_shape()
771                                << " DetectionOutputLayer info: " << detect_info
772                                << std::endl);
773 
774     return RETURN_UNIQUE_PTR(func);
775 }
776 
777 /** Create a backend detection post process layer function
778  *
779  * @tparam DetectionPostProcessLayerFunction Backend detection output function
780  * @tparam TargetInfo                        Target-specific information
781  *
782  * @param[in] node Node to create the backend function for
783  *
784  * @return Backend detection post process layer function
785  */
786 template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
create_detection_post_process_layer(DetectionPostProcessLayerNode & node)787 std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
788 {
789     validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
790 
791     // Extract IO and info
792     typename TargetInfo::TensorType    *input0      = get_backing_tensor<TargetInfo>(node.input(0));
793     typename TargetInfo::TensorType    *input1      = get_backing_tensor<TargetInfo>(node.input(1));
794     typename TargetInfo::TensorType    *input2      = get_backing_tensor<TargetInfo>(node.input(2));
795     typename TargetInfo::TensorType    *output0     = get_backing_tensor<TargetInfo>(node.output(0));
796     typename TargetInfo::TensorType    *output1     = get_backing_tensor<TargetInfo>(node.output(1));
797     typename TargetInfo::TensorType    *output2     = get_backing_tensor<TargetInfo>(node.output(2));
798     typename TargetInfo::TensorType    *output3     = get_backing_tensor<TargetInfo>(node.output(3));
799     const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
800 
801     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
802     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
803     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
804     ARM_COMPUTE_ERROR_ON(output0 == nullptr);
805     ARM_COMPUTE_ERROR_ON(output1 == nullptr);
806     ARM_COMPUTE_ERROR_ON(output2 == nullptr);
807     ARM_COMPUTE_ERROR_ON(output3 == nullptr);
808 
809     // Create and configure function
810     auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
811     func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
812 
813     // Log info
814     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
815                                << node.name()
816                                << " Type: " << node.type()
817                                << " Target: " << TargetInfo::TargetType
818                                << " Data Type: " << input0->info()->data_type()
819                                << " Input0 shape: " << input0->info()->tensor_shape()
820                                << " Input1 shape: " << input1->info()->tensor_shape()
821                                << " Input2 shape: " << input2->info()->tensor_shape()
822                                << " Output0 shape: " << output0->info()->tensor_shape()
823                                << " Output1 shape: " << output1->info()->tensor_shape()
824                                << " Output2 shape: " << output2->info()->tensor_shape()
825                                << " Output3 shape: " << output3->info()->tensor_shape()
826                                << " DetectionPostProcessLayer info: " << detect_info
827                                << std::endl);
828 
829     return RETURN_UNIQUE_PTR(func);
830 }
831 
832 /** Create a backend element-wise operation layer function
833  *
834  * @tparam EltwiseFunctions Backend element-wise function
835  * @tparam TargetInfo       Target-specific information
836  *
837  * @param[in] node Node to create the backend function for
838  *
839  * @return Backend element-wise operation layer function
840  */
841 template <typename EltwiseFunctions, typename TargetInfo>
create_eltwise_layer(EltwiseLayerNode & node)842 std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
843 {
844     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
845 
846     // Extract IO and info
847     typename TargetInfo::TensorType *input1         = get_backing_tensor<TargetInfo>(node.input(0));
848     typename TargetInfo::TensorType *input2         = get_backing_tensor<TargetInfo>(node.input(1));
849     typename TargetInfo::TensorType *output         = get_backing_tensor<TargetInfo>(node.output(0));
850     const EltwiseOperation           eltwise_op     = node.eltwise_operation();
851     const ConvertPolicy              convert_policy = node.convert_policy();
852     const ActivationLayerInfo        act_info       = node.fused_activation();
853     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
854     ARM_COMPUTE_ERROR_ON(input2 == nullptr);
855     ARM_COMPUTE_ERROR_ON(output == nullptr);
856 
857     std::unique_ptr<IFunction> func = nullptr;
858     std::string                func_name;
859     if(eltwise_op == EltwiseOperation::Add)
860     {
861         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
862                                         std::string("ArithmeticAddition"),
863                                         input1, input2, output, convert_policy, act_info);
864     }
865     else if(eltwise_op == EltwiseOperation::Sub)
866     {
867         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
868                                         std::string("ArithmeticSubtraction"),
869                                         input1, input2, output, convert_policy, act_info);
870     }
871     else if(eltwise_op == EltwiseOperation::Mul)
872     {
873         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
874                                         std::string("PixelWiseMultiplication"),
875                                         input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
876     }
877     else if(eltwise_op == EltwiseOperation::Max)
878     {
879         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
880                                         std::string("ElementwiseMaximum"),
881                                         input1, input2, output, act_info);
882     }
883     else
884     {
885         ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
886     }
887 
888     // Log info
889     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
890                                << node.name()
891                                << " Type: " << node.type()
892                                << " Target: " << TargetInfo::TargetType
893                                << " Operation: " << func_name
894                                << " Data Type: " << input1->info()->data_type()
895                                << " Shape: " << input1->info()->tensor_shape()
896                                << std::endl);
897 
898     return RETURN_UNIQUE_PTR(func);
899 }
900 
901 /** Create a backend unary element-wise operation layer function
902  *
903  * @tparam UnaryEltwiseFunctions Backend unary element-wise function
904  * @tparam TargetInfo       Target-specific information
905  *
906  * @param[in] node Node to create the backend function for
907  *
908  * @return Backend unary element-wise operation layer function
909  */
910 template <typename UnaryEltwiseFunctions, typename TargetInfo>
create_unary_eltwise_layer(UnaryEltwiseLayerNode & node)911 std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
912 {
913     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
914 
915     // Extract IO and info
916     typename TargetInfo::TensorType *input      = get_backing_tensor<TargetInfo>(node.input(0));
917     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
918     const UnaryEltwiseOperation      eltwise_op = node.eltwise_descriptor().op;
919 
920     ARM_COMPUTE_ERROR_ON(input == nullptr);
921     ARM_COMPUTE_ERROR_ON(output == nullptr);
922 
923     std::unique_ptr<IFunction> func = nullptr;
924     std::string                func_name;
925     if(eltwise_op == UnaryEltwiseOperation::Exp)
926     {
927         std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
928                                         std::string("Exp"),
929                                         input, output);
930     }
931     else
932     {
933         ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
934     }
935 
936     // Log info
937     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
938                                << node.name()
939                                << " Type: " << node.type()
940                                << " Target: " << TargetInfo::TargetType
941                                << " Operation: " << func_name
942                                << " Data Type: " << input->info()->data_type()
943                                << " Shape: " << input->info()->tensor_shape()
944                                << std::endl);
945 
946     return RETURN_UNIQUE_PTR(func);
947 }
948 
949 /** Create a backend flatten layer function
950  *
951  * @tparam FlattenLayerFunction Backend flatten function
952  * @tparam TargetInfo           Target-specific information
953  *
954  * @param[in] node Node to create the backend function for
955  *
956  * @return Backend flatten layer function
957  */
958 template <typename FlattenLayerFunction, typename TargetInfo>
create_flatten_layer(FlattenLayerNode & node)959 std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
960 {
961     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
962 
963     // Extract IO and info
964     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
965     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
966 
967     ARM_COMPUTE_ERROR_ON(input == nullptr);
968     ARM_COMPUTE_ERROR_ON(output == nullptr);
969 
970     // Create and configure function
971     auto func = support::cpp14::make_unique<FlattenLayerFunction>();
972     func->configure(input, output);
973 
974     // Log info
975     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
976                                << node.name()
977                                << " Type: " << node.type()
978                                << " Target: " << TargetInfo::TargetType
979                                << " Data Type: " << input->info()->data_type()
980                                << " Input shape: " << input->info()->tensor_shape()
981                                << " Output shape: " << output->info()->tensor_shape()
982                                << std::endl);
983 
984     return RETURN_UNIQUE_PTR(func);
985 }
986 
987 /** Create a backend fully connected layer function
988  *
989  * @tparam FullyConnectedLayerFunction Backend fully-connected function
990  * @tparam TargetInfo                  Target-specific information
991  *
992  * @param[in] node Node to create the backend function for
993  * @param[in] ctx  Graph context
994  *
995  * @return Backend fully connected layer function
996  */
997 template <typename FullyConnectedLayerFunction, typename TargetInfo>
create_fully_connected_layer(FullyConnectedLayerNode & node,GraphContext & ctx)998 std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
999 {
1000     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1001 
1002     // Extract IO and info
1003     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1004     typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1005     typename TargetInfo::TensorType *biases  = get_backing_tensor<TargetInfo>(node.input(2));
1006     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1007     const FullyConnectedLayerInfo    fc_info = node.info();
1008 
1009     ARM_COMPUTE_ERROR_ON(input == nullptr);
1010     ARM_COMPUTE_ERROR_ON(weights == nullptr);
1011     ARM_COMPUTE_ERROR_ON(output == nullptr);
1012 
1013     // Create and configure function
1014     auto wm   = get_weights_manager(ctx, TargetInfo::TargetType);
1015     auto mm   = get_memory_manager(ctx, TargetInfo::TargetType);
1016     auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1017     func->configure(input, weights, biases, output, fc_info);
1018 
1019     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1020 
1021     // Log info
1022     std::ostringstream qss;
1023     if(is_quantized)
1024     {
1025         qss << " Input QuantInfo: " << input->info()->quantization_info()
1026             << " Weights QuantInfo: " << weights->info()->quantization_info()
1027             << " Output QuantInfo: " << output->info()->quantization_info();
1028     }
1029     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1030                                << node.name()
1031                                << " Type: " << node.type()
1032                                << " Target: " << TargetInfo::TargetType
1033                                << " Data Type: " << input->info()->data_type()
1034                                << qss.str()
1035                                << " Input shape: " << input->info()->tensor_shape()
1036                                << " Weights shape: " << weights->info()->tensor_shape()
1037                                << " Output shape: " << output->info()->tensor_shape()
1038                                << std::endl);
1039 
1040     return RETURN_UNIQUE_PTR(func);
1041 }
1042 
1043 /** Create a backend generate proposals layer function
1044  *
1045  * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1046  * @tparam TargetInfo                     Target-specific information
1047  *
1048  * @param[in] node Node to create the backend function for
1049  * @param[in] ctx  Graph context
1050  *
1051  * @return Backend generate proposals layer function
1052  */
1053 template <typename GenerateProposalsLayerFunction, typename TargetInfo>
create_generate_proposals_layer(GenerateProposalsLayerNode & node,GraphContext & ctx)1054 std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1055 {
1056     validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1057 
1058     // Extract IO and info
1059     typename TargetInfo::TensorType *scores              = get_backing_tensor<TargetInfo>(node.input(0));
1060     typename TargetInfo::TensorType *deltas              = get_backing_tensor<TargetInfo>(node.input(1));
1061     typename TargetInfo::TensorType *anchors             = get_backing_tensor<TargetInfo>(node.input(2));
1062     typename TargetInfo::TensorType *proposals           = get_backing_tensor<TargetInfo>(node.output(0));
1063     typename TargetInfo::TensorType *scores_out          = get_backing_tensor<TargetInfo>(node.output(1));
1064     typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1065     const GenerateProposalsInfo      info                = node.info();
1066 
1067     ARM_COMPUTE_ERROR_ON(scores == nullptr);
1068     ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1069     ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1070     ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1071     ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1072 
1073     // Create and configure function
1074     auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1075     func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1076 
1077     // Log info
1078     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1079                                << " Target " << TargetInfo::TargetType
1080                                << " Data Type: " << scores->info()->data_type()
1081                                << " Scores shape: " << scores->info()->tensor_shape()
1082                                << " Deltas shape: " << deltas->info()->tensor_shape()
1083                                << " Anchors shape: " << anchors->info()->tensor_shape()
1084                                << " Proposals shape: " << proposals->info()->tensor_shape()
1085                                << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1086                                << " Scores Out shape: " << scores_out->info()->tensor_shape()
1087                                << std::endl);
1088 
1089     return RETURN_UNIQUE_PTR(func);
1090 }
1091 
1092 /** Create a backend l2 normalization layer function
1093  *
1094  * @tparam NormalizationLayerFunction Backend normalization function
1095  * @tparam TargetInfo                 Target-specific information
1096  *
1097  * @param[in] node Node to create the backend function for
1098  * @param[in] ctx  Graph context
1099  *
1100  * @return Backend normalization layer function
1101  */
1102 template <typename L2NormalizeLayerFunction, typename TargetInfo>
create_l2_normalize_layer(L2NormalizeLayerNode & node,GraphContext & ctx)1103 std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1104 {
1105     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1106 
1107     // Extract IO and info
1108     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1109     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1110     int                              axis    = node.axis();
1111     float                            epsilon = node.epsilon();
1112 
1113     ARM_COMPUTE_ERROR_ON(input == nullptr);
1114     ARM_COMPUTE_ERROR_ON(output == nullptr);
1115 
1116     // Create and configure function
1117     auto mm   = get_memory_manager(ctx, TargetInfo::TargetType);
1118     auto func = support::cpp14::make_unique<L2NormalizeLayerFunction>(mm);
1119     func->configure(input, output, axis, epsilon);
1120 
1121     // Log info
1122     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1123                                << node.name()
1124                                << " Type: " << node.type()
1125                                << " Target: " << TargetInfo::TargetType
1126                                << " Data Type: " << input->info()->data_type()
1127                                << " Input shape: " << input->info()->tensor_shape()
1128                                << " Output shape: " << output->info()->tensor_shape()
1129                                << " Axis: " << axis
1130                                << " Epsilon: " << epsilon
1131                                << std::endl);
1132 
1133     return RETURN_UNIQUE_PTR(func);
1134 }
1135 
1136 /** Create a backend normalization layer function
1137  *
1138  * @tparam NormalizationLayerFunction Backend normalization function
1139  * @tparam TargetInfo                 Target-specific information
1140  *
1141  * @param[in] node Node to create the backend function for
1142  * @param[in] ctx  Graph context
1143  *
1144  * @return Backend normalization layer function
1145  */
1146 template <typename NormalizationLayerFunction, typename TargetInfo>
create_normalization_layer(NormalizationLayerNode & node,GraphContext & ctx)1147 std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1148 {
1149     ARM_COMPUTE_UNUSED(ctx);
1150 
1151     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1152 
1153     // Extract IO and info
1154     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1155     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1156     const NormalizationLayerInfo     norm_info = node.normalization_info();
1157     ARM_COMPUTE_ERROR_ON(input == nullptr);
1158     ARM_COMPUTE_ERROR_ON(output == nullptr);
1159 
1160     // Create and configure function
1161     auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
1162     func->configure(input, output, norm_info);
1163 
1164     // Log info
1165     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1166                                << node.name()
1167                                << " Type: " << node.type()
1168                                << " Target: " << TargetInfo::TargetType
1169                                << " Data Type: " << input->info()->data_type()
1170                                << " Input shape: " << input->info()->tensor_shape()
1171                                << " Output shape: " << output->info()->tensor_shape()
1172                                << " Normalization info: " << norm_info.type()
1173                                << std::endl);
1174 
1175     return RETURN_UNIQUE_PTR(func);
1176 }
1177 
1178 /** Create a backend normalize planar YUV layer function
1179  *
1180  * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1181  * @tparam TargetInfo                      Target-specific information
1182  *
1183  * @param[in] node Node to create the backend function for
1184  *
1185  * @return Backend normalize plnar YUV layer function
1186  */
1187 template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode & node)1188 std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1189 {
1190     validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1191 
1192     // Extract IO and info
1193     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1194     typename TargetInfo::TensorType *mean   = get_backing_tensor<TargetInfo>(node.input(1));
1195     typename TargetInfo::TensorType *std    = get_backing_tensor<TargetInfo>(node.input(2));
1196     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1197     ARM_COMPUTE_ERROR_ON(input == nullptr);
1198     ARM_COMPUTE_ERROR_ON(mean == nullptr);
1199     ARM_COMPUTE_ERROR_ON(std == nullptr);
1200     ARM_COMPUTE_ERROR_ON(output == nullptr);
1201 
1202     // Create and configure function
1203     auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1204     func->configure(input, output, mean, std);
1205 
1206     // Log info
1207     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1208                                << node.name()
1209                                << " Type: " << node.type()
1210                                << " Target: " << TargetInfo::TargetType
1211                                << " Data Type: " << input->info()->data_type()
1212                                << " Shape: " << input->info()->tensor_shape()
1213                                << std::endl);
1214 
1215     return RETURN_UNIQUE_PTR(func);
1216 }
1217 
1218 /** Create a backend pad layer function
1219  *
1220  * @tparam PadLayerFunction Backend pad function
1221  * @tparam TargetInfo       Target-specific information
1222  *
1223  * @param[in] node Node to create the backend function for
1224  *
1225  * @return Backend pad layer function
1226  */
1227 template <typename PadLayerFunction, typename TargetInfo>
create_pad_layer(PadLayerNode & node)1228 std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1229 {
1230     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1231 
1232     // Extract IO and info
1233     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1234     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1235     const PaddingList               &padding   = node.padding();
1236     const PixelValue                 pad_value = node.pad_value();
1237     ARM_COMPUTE_ERROR_ON(input == nullptr);
1238     ARM_COMPUTE_ERROR_ON(output == nullptr);
1239 
1240     // Create and configure function
1241     auto func = support::cpp14::make_unique<PadLayerFunction>();
1242     func->configure(input, output, padding, pad_value);
1243 
1244     // Log info
1245     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1246                                << node.name()
1247                                << " Type: " << node.type()
1248                                << " Target: " << TargetInfo::TargetType
1249                                << " Data Type: " << input->info()->data_type()
1250                                << " Input shape: " << input->info()->tensor_shape()
1251                                << " Output shape: " << output->info()->tensor_shape()
1252                                << std::endl);
1253 
1254     return RETURN_UNIQUE_PTR(func);
1255 }
1256 
1257 /** Create a backend permute layer function
1258  *
1259  * @tparam PermuteLayerFunction Backend permute function
1260  * @tparam TargetInfo           Target-specific information
1261  *
1262  * @param[in] node Node to create the backend function for
1263  *
1264  * @return Backend permute layer function
1265  */
1266 template <typename PermuteLayerFunction, typename TargetInfo>
create_permute_layer(PermuteLayerNode & node)1267 std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1268 {
1269     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1270 
1271     // Extract IO and info
1272     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1273     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1274     const PermutationVector         &perm   = node.permutation_vector();
1275     ARM_COMPUTE_ERROR_ON(input == nullptr);
1276     ARM_COMPUTE_ERROR_ON(output == nullptr);
1277 
1278     // Create and configure function
1279     auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1280     func->configure(input, output, perm);
1281 
1282     // Log info
1283     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1284                                << node.name()
1285                                << " Type: " << node.type()
1286                                << " Target: " << TargetInfo::TargetType
1287                                << " Data Type: " << input->info()->data_type()
1288                                << " Input shape: " << input->info()->tensor_shape()
1289                                << " Output shape: " << output->info()->tensor_shape()
1290                                << " Permutation vector: " << perm
1291                                << std::endl);
1292 
1293     return RETURN_UNIQUE_PTR(func);
1294 }
1295 
1296 /** Create a backend pooling layer function
1297  *
1298  * @tparam PoolingLayerFunction Backend pooling function
1299  * @tparam TargetInfo           Target-specific information
1300  *
1301  * @param[in] node Node to create the backend function for
1302  *
1303  * @return Backend pooling layer function
1304  */
1305 template <typename PoolingLayerFunction, typename TargetInfo>
create_pooling_layer(PoolingLayerNode & node)1306 std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1307 {
1308     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1309 
1310     // Extract IO and info
1311     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1312     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1313     const PoolingLayerInfo           pool_info = node.pooling_info();
1314     ARM_COMPUTE_ERROR_ON(input == nullptr);
1315     ARM_COMPUTE_ERROR_ON(output == nullptr);
1316 
1317     // Create and configure function
1318     auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1319     func->configure(input, output, pool_info);
1320 
1321     // Log info
1322     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1323                                << node.name()
1324                                << " Type: " << node.type()
1325                                << " Target: " << TargetInfo::TargetType
1326                                << " Data Type: " << input->info()->data_type()
1327                                << " Input shape: " << input->info()->tensor_shape()
1328                                << " Output shape: " << output->info()->tensor_shape()
1329                                << " Pooling info: " << pool_info.pool_type
1330                                << std::endl);
1331 
1332     return RETURN_UNIQUE_PTR(func);
1333 }
1334 
1335 /** Create a backend PRelu layer function
1336  *
1337  * @tparam PReluFunction Backend PRelu function
1338  * @tparam TargetInfo    Target-specific information
1339  *
1340  * @param[in] node Node to create the backend function for
1341  *
1342  * @return Backend PRelu layer function
1343  */
1344 template <typename PReluFunction, typename TargetInfo>
create_prelu_layer(PReluLayerNode & node)1345 std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1346 {
1347     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1348 
1349     // Extract IO and info
1350     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1351     typename TargetInfo::TensorType *alpha  = get_backing_tensor<TargetInfo>(node.input(1));
1352     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1353     ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1354     ARM_COMPUTE_ERROR_ON(output == nullptr);
1355 
1356     // Create and configure function
1357     auto func = support::cpp14::make_unique<PReluFunction>();
1358     func->configure(input, alpha, output);
1359 
1360     // Log info
1361     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1362                                << node.name()
1363                                << " Type: " << node.type()
1364                                << " Target: " << TargetInfo::TargetType
1365                                << " Data Type: " << input->info()->data_type()
1366                                << " Input shape: " << input->info()->tensor_shape()
1367                                << " Output shape: " << output->info()->tensor_shape()
1368                                << std::endl);
1369 
1370     return RETURN_UNIQUE_PTR(func);
1371 }
1372 
1373 /** Create a backend print layer function
1374  *
1375  * @tparam TargetInfo Target-specific information
1376  *
1377  * @param[in] node Node to create the backend function for
1378  *
1379  * @return Backend print layer function
1380  */
1381 template <typename TargetInfo>
create_print_layer(PrintLayerNode & node)1382 std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1383 {
1384     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1385 
1386     typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1387     ARM_COMPUTE_ERROR_ON(input == nullptr);
1388     ARM_COMPUTE_UNUSED(input);
1389 
1390     // Log info
1391     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1392                                << node.name()
1393                                << " Type: " << node.type()
1394                                << " Target: " << TargetInfo::TargetType
1395                                << " Data Type: " << input->info()->data_type()
1396                                << " Input shape: " << input->info()->tensor_shape()
1397                                << std::endl);
1398 
1399     return nullptr;
1400 }
1401 
1402 /** Create a backend priorbox layer function
1403  *
1404  * @tparam PriorBoxLayerFunction Backend priorbox function
1405  * @tparam TargetInfo           Target-specific information
1406  *
1407  * @param[in] node Node to create the backend function for
1408  *
1409  * @return Backend priorbox layer function
1410  */
1411 template <typename PriorBoxLayerFunction, typename TargetInfo>
create_priorbox_layer(PriorBoxLayerNode & node)1412 std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1413 {
1414     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1415 
1416     // Extract IO and info
1417     typename TargetInfo::TensorType *input0     = get_backing_tensor<TargetInfo>(node.input(0));
1418     typename TargetInfo::TensorType *input1     = get_backing_tensor<TargetInfo>(node.input(1));
1419     typename TargetInfo::TensorType *output     = get_backing_tensor<TargetInfo>(node.output(0));
1420     const PriorBoxLayerInfo          prior_info = node.priorbox_info();
1421     ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1422     ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1423     ARM_COMPUTE_ERROR_ON(output == nullptr);
1424 
1425     // Create and configure function
1426     auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1427     func->configure(input0, input1, output, prior_info);
1428 
1429     // Log info
1430     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1431                                << node.name()
1432                                << " Type: " << node.type()
1433                                << " Target: " << TargetInfo::TargetType
1434                                << " Data Type: " << input0->info()->data_type()
1435                                << " Input0 shape: " << input0->info()->tensor_shape()
1436                                << " Input1 shape: " << input1->info()->tensor_shape()
1437                                << " Output shape: " << output->info()->tensor_shape()
1438                                << " PriorBoxLayer info: " << prior_info
1439                                << std::endl);
1440 
1441     return RETURN_UNIQUE_PTR(func);
1442 }
1443 
1444 /** Create a backend quantization layer function
1445  *
1446  * @tparam QuantizationLayerFunction Backend quantization function
1447  * @tparam TargetInfo                Target-specific information
1448  *
1449  * @param[in] node Node to create the backend function for
1450  *
1451  * @return Backend quantization layer function
1452  */
1453 template <typename QuantizationLayerFunction, typename TargetInfo>
create_quantization_layer(QuantizationLayerNode & node)1454 std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1455 {
1456     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1457 
1458     // Extract IO and info
1459     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1460     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1461     ARM_COMPUTE_ERROR_ON(input == nullptr);
1462     ARM_COMPUTE_ERROR_ON(output == nullptr);
1463 
1464     // Create and configure function
1465     auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1466     func->configure(input, output);
1467 
1468     // Log info
1469     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1470                                << node.name()
1471                                << " Type: " << node.type()
1472                                << " Target: " << TargetInfo::TargetType
1473                                << " Data Type: " << input->info()->data_type()
1474                                << " Input shape: " << input->info()->tensor_shape()
1475                                << " Output shape: " << output->info()->tensor_shape()
1476                                << std::endl);
1477 
1478     return RETURN_UNIQUE_PTR(func);
1479 }
1480 
1481 /** Create a backend reduction operation layer function
1482  *
1483  * @tparam ReductionOperationFunction Backend reduction operation function
1484  * @tparam TargetInfo                 Target-specific information
1485  *
1486  * @param[in] node Node to create the backend function for
1487  * @param[in] ctx  Graph context
1488  *
1489  * @return Backend reduction sum layer function
1490  */
1491 template <typename ReductionOperationFunction, typename TargetInfo>
create_reduction_operation_layer(ReductionLayerNode & node,GraphContext & ctx)1492 std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
1493 {
1494     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1495 
1496     // Extract IO and info
1497     typename TargetInfo::TensorType *input     = get_backing_tensor<TargetInfo>(node.input(0));
1498     typename TargetInfo::TensorType *output    = get_backing_tensor<TargetInfo>(node.output(0));
1499     ReductionOperation               op        = node.op();
1500     int                              axis      = node.axis();
1501     bool                             keep_dims = node.keep_dims();
1502     ARM_COMPUTE_ERROR_ON(input == nullptr);
1503     ARM_COMPUTE_ERROR_ON(output == nullptr);
1504 
1505     // Create and configure function
1506     auto func = support::cpp14::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1507     func->configure(input, output, axis, op, keep_dims);
1508 
1509     // Log info
1510     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1511                                << node.name()
1512                                << " Type: " << node.type()
1513                                << " Target: " << TargetInfo::TargetType
1514                                << " Data Type: " << input->info()->data_type()
1515                                << " Input shape: " << input->info()->tensor_shape()
1516                                << " Output shape: " << output->info()->tensor_shape()
1517                                << " Operation: " << op
1518                                << " Axis: " << axis
1519                                << " Keep dimensions:" << keep_dims
1520                                << std::endl);
1521 
1522     return RETURN_UNIQUE_PTR(func);
1523 }
1524 
1525 /** Create a backend reorg layer function
1526  *
1527  * @tparam ReorgLayerFunction Backend reorg function
1528  * @tparam TargetInfo         Target-specific information
1529  *
1530  * @param[in] node Node to create the backend function for
1531  *
1532  * @return Backend reshape layer function
1533  */
1534 template <typename ReorgLayerFunction, typename TargetInfo>
create_reorg_layer(ReorgLayerNode & node)1535 std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1536 {
1537     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1538 
1539     // Extract IO and info
1540     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1541     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1542     ARM_COMPUTE_ERROR_ON(input == nullptr);
1543     ARM_COMPUTE_ERROR_ON(output == nullptr);
1544 
1545     // Create and configure function
1546     auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1547     func->configure(input, output, node.stride());
1548 
1549     // Log info
1550     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1551                                << node.name()
1552                                << " Type: " << node.type()
1553                                << " Target: " << TargetInfo::TargetType
1554                                << " Data Type: " << input->info()->data_type()
1555                                << " Input shape: " << input->info()->tensor_shape()
1556                                << " Output shape: " << output->info()->tensor_shape()
1557                                << std::endl);
1558 
1559     return RETURN_UNIQUE_PTR(func);
1560 }
1561 
1562 /** Create a backend reshape layer function
1563  *
1564  * @tparam ReshapeLayerFunction Backend reshape function
1565  * @tparam TargetInfo           Target-specific information
1566  *
1567  * @param[in] node Node to create the backend function for
1568  *
1569  * @return Backend reshape layer function
1570  */
1571 template <typename ReshapeLayerFunction, typename TargetInfo>
create_reshape_layer(ReshapeLayerNode & node)1572 std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1573 {
1574     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1575 
1576     // Extract IO and info
1577     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1578     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1579     ARM_COMPUTE_ERROR_ON(input == nullptr);
1580     ARM_COMPUTE_ERROR_ON(output == nullptr);
1581 
1582     // Create and configure function
1583     auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1584     func->configure(input, output);
1585 
1586     // Log info
1587     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1588                                << node.name()
1589                                << " Type: " << node.type()
1590                                << " Target: " << TargetInfo::TargetType
1591                                << " Data Type: " << input->info()->data_type()
1592                                << " Input shape: " << input->info()->tensor_shape()
1593                                << " Output shape: " << output->info()->tensor_shape()
1594                                << std::endl);
1595 
1596     return RETURN_UNIQUE_PTR(func);
1597 }
1598 
1599 /** Create a backend resize layer function
1600  *
1601  * @tparam ResizeLayerFunction Backend resize function
1602  * @tparam TargetInfo          Target-specific information
1603  *
1604  * @param[in] node Node to create the backend function for
1605  *
1606  * @return Backend resize layer function
1607  */
1608 template <typename ResizeLayerFunction, typename TargetInfo>
create_resize_layer(ResizeLayerNode & node)1609 std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1610 {
1611     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1612 
1613     // Extract IO and info
1614     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1615     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1616     ARM_COMPUTE_ERROR_ON(input == nullptr);
1617     ARM_COMPUTE_ERROR_ON(output == nullptr);
1618     const InterpolationPolicy policy = node.policy();
1619 
1620     // Create and configure function
1621     auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1622     func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
1623 
1624     // Log info
1625     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1626                                << node.name()
1627                                << " Type: " << node.type()
1628                                << " Target: " << TargetInfo::TargetType
1629                                << " Data Type: " << input->info()->data_type()
1630                                << " Input shape: " << input->info()->tensor_shape()
1631                                << " Output shape: " << output->info()->tensor_shape()
1632                                << " Interpolation: " << policy
1633                                << std::endl);
1634 
1635     return RETURN_UNIQUE_PTR(func);
1636 }
1637 
1638 /** Create a backend ROI align layer function
1639  *
1640  * @tparam ROIAlignLayerFunction    ROI Align function
1641  * @tparam TargetInfo               Target-specific information
1642  *
1643  * @param[in] node Node to create the backend function for
1644  *
1645  * @return ROI Align layer function
1646  */
1647 template <typename ROIAlignLayerFunction, typename TargetInfo>
create_roi_align_layer(ROIAlignLayerNode & node)1648 std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1649 {
1650     validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1651 
1652     // Extract IO and info
1653     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1654     typename TargetInfo::TensorType *rois   = get_backing_tensor<TargetInfo>(node.input(1));
1655     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1656     ARM_COMPUTE_ERROR_ON(input == nullptr);
1657     ARM_COMPUTE_ERROR_ON(output == nullptr);
1658     ARM_COMPUTE_ERROR_ON(rois == nullptr);
1659 
1660     const ROIPoolingLayerInfo pool_info = node.pooling_info();
1661 
1662     // Create and configure function
1663     auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1664 
1665     func->configure(input, rois, output, pool_info);
1666 
1667     // Log info
1668     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1669                                << node.name()
1670                                << " Type: " << node.type()
1671                                << " Target: " << TargetInfo::TargetType
1672                                << " Data Type: " << input->info()->data_type()
1673                                << " Input shape: " << input->info()->tensor_shape()
1674                                << " Output shape: " << output->info()->tensor_shape()
1675                                << " ROIs shape: " << rois->info()->tensor_shape()
1676                                << " ROIPooling width: " << pool_info.pooled_width()
1677                                << " ROIPooling height: " << pool_info.pooled_height()
1678                                << std::endl);
1679 
1680     return RETURN_UNIQUE_PTR(func);
1681 }
1682 
1683 /** Create a backend slice layer function
1684  *
1685  * @tparam SliceLayerFunction Backend slice function
1686  * @tparam TargetInfo         Target-specific information
1687  *
1688  * @param[in] node Node to create the backend function for
1689  *
1690  * @return Backend slice layer function
1691  */
1692 template <typename SliceLayerFunction, typename TargetInfo>
create_slice_layer(SliceLayerNode & node)1693 std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1694 {
1695     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1696 
1697     // Extract IO and info
1698     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1699     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1700     ARM_COMPUTE_ERROR_ON(input == nullptr);
1701     ARM_COMPUTE_ERROR_ON(output == nullptr);
1702 
1703     // Create and configure function
1704     auto func = support::cpp14::make_unique<SliceLayerFunction>();
1705     func->configure(input, output, node.starts(), node.ends());
1706 
1707     // Log info
1708     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1709                                << node.name()
1710                                << " Type: " << node.type()
1711                                << " Target: " << TargetInfo::TargetType
1712                                << " Data Type: " << input->info()->data_type()
1713                                << " Input shape: " << input->info()->tensor_shape()
1714                                << " Output shape: " << output->info()->tensor_shape()
1715                                << std::endl);
1716 
1717     return RETURN_UNIQUE_PTR(func);
1718 }
1719 
1720 /** Create a backend softmax layer function
1721  *
1722  * @tparam SoftmaxLayerFunction Backend softmax function
1723  * @tparam TargetInfo           Target-specific information
1724  *
1725  * @param[in] node Node to create the backend function for
1726  * @param[in] ctx  Graph context
1727  *
1728  * @return Backend softmax layer function
1729  */
1730 template <typename SoftmaxLayerFunction, typename TargetInfo>
create_softmax_layer(SoftmaxLayerNode & node,GraphContext & ctx)1731 std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1732 {
1733     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1734 
1735     // Extract IO and info
1736     typename TargetInfo::TensorType *input  = get_backing_tensor<TargetInfo>(node.input(0));
1737     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1738     const float                      beta   = node.beta();
1739     ARM_COMPUTE_ERROR_ON(input == nullptr);
1740     ARM_COMPUTE_ERROR_ON(output == nullptr);
1741 
1742     // Create and configure function
1743     auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1744     func->configure(input, output, beta);
1745 
1746     // Log info
1747     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1748                                << node.name()
1749                                << " Type: " << node.type()
1750                                << " Target: " << TargetInfo::TargetType
1751                                << " Data Type: " << input->info()->data_type()
1752                                << " Input shape: " << input->info()->tensor_shape()
1753                                << " Output shape: " << output->info()->tensor_shape()
1754                                << std::endl);
1755 
1756     return RETURN_UNIQUE_PTR(func);
1757 }
1758 
1759 /** Create a backend layer stack function
1760  *
1761  * @tparam StackLayerFunction Backend stack function
1762  * @tparam TargetInfo         Target-specific information
1763  *
1764  * @param[in] node Node to create the backend function for
1765  *
1766  * @return Backend stack layer function
1767  */
1768 template <typename StackLayerFunction, typename TargetInfo>
create_stack_layer(StackLayerNode & node)1769 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1770 {
1771     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1772     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1773 
1774     // Extract IO and info
1775     std::vector<typename TargetInfo::TensorType *> inputs;
1776     for(unsigned int i = 0; i < node.num_inputs(); ++i)
1777     {
1778         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1779     }
1780     typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1781     const int                        axis   = node.axis();
1782 
1783     // Create and configure function
1784     auto func = support::cpp14::make_unique<StackLayerFunction>();
1785     func->configure(inputs, axis, output);
1786 
1787     // Log info
1788     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1789                                << node.name()
1790                                << " Type: " << node.type()
1791                                << " Target: " << TargetInfo::TargetType
1792                                << " Data Type: " << output->info()->data_type()
1793                                << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1794                                << " Output shape: " << output->info()->tensor_shape()
1795                                << " Num Inputs: " << inputs.size()
1796                                << " Axis: " << axis
1797                                << std::endl);
1798 
1799     return RETURN_UNIQUE_PTR(func);
1800 }
1801 
1802 /** Create a backend slice layer function
1803  *
1804  * @tparam StridedSliceLayerFunction Backend strided slice function
1805  * @tparam TargetInfo                Target-specific information
1806  *
1807  * @param[in] node Node to create the backend function for
1808  *
1809  * @return Backend strided slice layer function
1810  */
1811 template <typename StridedSliceLayerFunction, typename TargetInfo>
create_strided_slice_layer(StridedSliceLayerNode & node)1812 std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1813 {
1814     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1815 
1816     // Extract IO and info
1817     typename TargetInfo::TensorType *input   = get_backing_tensor<TargetInfo>(node.input(0));
1818     typename TargetInfo::TensorType *output  = get_backing_tensor<TargetInfo>(node.output(0));
1819     Coordinates                      starts  = node.starts();
1820     Coordinates                      ends    = node.ends();
1821     BiStrides                        strides = node.strides();
1822     StridedSliceLayerInfo            info    = node.strided_slice_info();
1823 
1824     ARM_COMPUTE_ERROR_ON(input == nullptr);
1825     ARM_COMPUTE_ERROR_ON(output == nullptr);
1826 
1827     // Create and configure function
1828     auto func = support::cpp14::make_unique<StridedSliceLayerFunction>();
1829     func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1830 
1831     // Log info
1832     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1833                                << node.name()
1834                                << " Type: " << node.type()
1835                                << " Target: " << TargetInfo::TargetType
1836                                << " Data Type: " << input->info()->data_type()
1837                                << " Input shape: " << input->info()->tensor_shape()
1838                                << " Output shape: " << output->info()->tensor_shape()
1839                                << std::endl);
1840 
1841     return RETURN_UNIQUE_PTR(func);
1842 }
1843 
1844 /** Create a backend Upsample layer function
1845  *
1846  * @tparam UpsampleLayerFunction Backend Upsample function
1847  * @tparam TargetInfo            Target-specific information
1848  *
1849  * @param[in] node Node to create the backend function for
1850  * @param[in] ctx  Graph context
1851  *
1852  * @return Backend Upsample layer function
1853  */
1854 template <typename UpsampleLayerFunction, typename TargetInfo>
create_upsample_layer(UpsampleLayerNode & node,GraphContext & ctx)1855 std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1856 {
1857     ARM_COMPUTE_UNUSED(ctx);
1858     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1859 
1860     // Extract IO and info
1861     typename TargetInfo::TensorType *input             = get_backing_tensor<TargetInfo>(node.input(0));
1862     typename TargetInfo::TensorType *output            = get_backing_tensor<TargetInfo>(node.output(0));
1863     const Size2D                     info              = node.info();
1864     const InterpolationPolicy        upsampling_policy = node.upsampling_policy();
1865     ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1866     ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1867     ARM_COMPUTE_ERROR_ON(input == nullptr);
1868     ARM_COMPUTE_ERROR_ON(output == nullptr);
1869 
1870     // Create and configure function
1871     auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1872     func->configure(input, output, info, upsampling_policy);
1873 
1874     // Log info
1875     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1876                                << node.name()
1877                                << " Type: " << node.type()
1878                                << " Target: " << TargetInfo::TargetType
1879                                << " Data Type: " << input->info()->data_type()
1880                                << " Input shape: " << input->info()->tensor_shape()
1881                                << " Output shape: " << output->info()->tensor_shape()
1882                                << " Strides: " << info
1883                                << " Upsampling policy: " << upsampling_policy
1884                                << std::endl);
1885 
1886     return RETURN_UNIQUE_PTR(func);
1887 }
1888 /** Create a backend YOLO layer function
1889  *
1890  * @tparam YoloLayerFunction Backend YOLO function
1891  * @tparam TargetInfo        Target-specific information
1892  *
1893  * @param[in] node Node to create the backend function for
1894  * @param[in] ctx  Graph context
1895  *
1896  * @return Backend YOLO layer function
1897  */
1898 template <typename YOLOlayerFunction, typename TargetInfo>
create_yolo_layer(YOLOLayerNode & node,GraphContext & ctx)1899 std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1900 {
1901     ARM_COMPUTE_UNUSED(ctx);
1902     validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1903 
1904     // Extract IO and info
1905     typename TargetInfo::TensorType *input       = get_backing_tensor<TargetInfo>(node.input(0));
1906     typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
1907     const ActivationLayerInfo        act_info    = node.activation_info();
1908     const int32_t                    num_classes = node.num_classes();
1909     ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1910     ARM_COMPUTE_ERROR_ON(input == nullptr);
1911     ARM_COMPUTE_ERROR_ON(output == nullptr);
1912 
1913     // Create and configure function
1914     auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1915     func->configure(input, output, act_info, num_classes);
1916 
1917     // Log info
1918     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1919                                << node.name()
1920                                << " Type: " << node.type()
1921                                << " Target: " << TargetInfo::TargetType
1922                                << " Data Type: " << input->info()->data_type()
1923                                << " Input shape: " << input->info()->tensor_shape()
1924                                << " Output shape: " << output->info()->tensor_shape()
1925                                << " Activation function: " << act_info.activation()
1926                                << " Num classes: " << num_classes
1927                                << std::endl);
1928 
1929     return RETURN_UNIQUE_PTR(func);
1930 }
1931 } // namespace detail
1932 } // namespace backends
1933 } // namespace graph
1934 } // namespace arm_compute
1935 
1936 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
1937