• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "Utils.hpp"
9 
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/ILayerSupport.hpp>
12 #include <armnn/BackendHelper.hpp>
13 #include <armnn/utility/Assert.hpp>
14 #include <armnn/utility/IgnoreUnused.hpp>
15 #include <armnn/utility/NumericCast.hpp>
16 
17 #include <armnnUtils/DataLayoutIndexed.hpp>
18 #include <armnnUtils/Transpose.hpp>
19 
20 #include "1.0/FullyConnected.hpp"
21 
22 #include <ActivationFunctor.h>
23 #include <CpuExecutor.h>
24 #include <OperationsUtils.h>
25 
26 #include <armnnUtils/FloatingPointComparison.hpp>
27 
28 #include <log/log.h>
29 #include <vector>
30 
31 #ifdef __clang__
32 #pragma clang diagnostic push
33 #pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
34 #pragma clang diagnostic ignored "-Wunused-function"
35 #pragma clang diagnostic ignored "-Wunused-variable"
36 #endif
37 namespace armnn_driver
38 {
39 
40 ///
41 /// Helper classes
42 ///
43 
44 #ifdef ARMNN_ANDROID_R
45 using OperandType = android::nn::OperandType;
46 #endif
47 
48 struct ConversionData
49 {
ConversionDataarmnn_driver::ConversionData50     ConversionData(const std::vector<armnn::BackendId>& backends)
51     : m_Backends(backends)
52     , m_Network(nullptr, nullptr)
53     , m_DynamicInputsEncountered(false)
54     {}
55 
56     const std::vector<armnn::BackendId>       m_Backends;
57     armnn::INetworkPtr                        m_Network;
58     std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
59     std::vector<android::nn::RunTimePoolInfo> m_MemPools;
60     bool m_DynamicInputsEncountered;
61 };
62 
63 class LayerInputHandle
64 {
65 public:
66     LayerInputHandle();
67     LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
68 
69     bool IsValid() const;
70 
71     void Connect(armnn::IInputSlot& inputSlot);
72 
73     void Disconnect(armnn::IInputSlot& inputSlot);
74 
75     const armnn::TensorInfo& GetTensorInfo() const;
76 
77 private:
78     armnn::IOutputSlot* m_OutputSlot;
79     bool                m_Valid;
80     armnn::TensorInfo   m_TensorInfo;
81 };
82 
83 class ConstTensorPin
84 {
85 public:
86     // Creates an invalid tensor pin (can be used to signal errors)
87     // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
88     ConstTensorPin(bool optional = false);
89 
90     // @param tensorInfo TensorInfo associated with the tensor.
91     // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
92     // the model being converted.
93     // @param numBytes Number of bytes for the tensor data.
94     ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
95                    const armnn::PermutationVector& mappings);
96 
97     ConstTensorPin(const ConstTensorPin& other) = delete;
98     ConstTensorPin(ConstTensorPin&& other)      = default;
99 
100     bool IsValid() const;
101     bool IsOptional() const;
102 
103     const armnn::ConstTensor& GetConstTensor() const;
104     const armnn::ConstTensor* GetConstTensorPtr() const;
105 
106 private:
107     armnn::ConstTensor m_ConstTensor;
108 
109     // Owned memory for swizzled tensor data, only required if the tensor needed
110     // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
111     // the pools associated with the model being converted.
112     std::vector<uint8_t> m_SwizzledTensorData;
113 
114     // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
115     bool m_Optional;
116 };
117 
118 } // namespace armnn_driver
119 
120 ///
121 /// Utility functions
122 ///
123 
124 namespace
125 {
126 
127 using namespace armnn_driver;
128 using namespace android::nn;
129 
130 // Convenience function to log the reason for failing to convert a model.
131 // @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
132 template<class... Args>
Fail(const char * formatStr,Args &&...args)133 static bool Fail(const char* formatStr, Args&&... args)
134 {
135     ALOGD(formatStr, std::forward<Args>(args)...);
136     return false;
137 }
138 
139 // Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
140 // Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
141 #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
142 try \
143 { \
144     for (auto&& backendId : backends) \
145     { \
146         auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
147         if (layerSupportObject) \
148         { \
149             std::string reasonIfUnsupported; \
150             supported = \
151                 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
152             if (supported) \
153             { \
154                 break; \
155             } \
156             else \
157             { \
158                 if (reasonIfUnsupported.size() > 0) \
159                 { \
160                     ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
161                 } \
162                 else \
163                 { \
164                     ALOGD("%s: not supported by armnn", funcName); \
165                 } \
166             } \
167         } \
168         else \
169         { \
170             ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
171         } \
172     } \
173     if (!supported) \
174     { \
175         ALOGD("%s: not supported by any specified backend", funcName); \
176     } \
177 } \
178 catch (const armnn::InvalidArgumentException &e) \
179 { \
180     throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
181 }
182 
183 template<typename HalOperand>
GetTensorShapeForOperand(const HalOperand & operand)184 armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
185 {
186     return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
187 }
188 
IsOperandTypeSupportedForTensors(V1_0::OperandType type)189 inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
190 {
191     return type == V1_0::OperandType::TENSOR_FLOAT32      ||
192            type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
193            type == V1_0::OperandType::TENSOR_INT32;
194 }
195 
196 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
197 
198 // Support within the 1.2 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_2::OperandType type)199 inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
200 {
201     return type == V1_2::OperandType::BOOL                           ||
202            type == V1_2::OperandType::TENSOR_BOOL8                   ||
203            type == V1_2::OperandType::TENSOR_FLOAT16                 ||
204            type == V1_2::OperandType::TENSOR_FLOAT32                 ||
205            type == V1_2::OperandType::TENSOR_QUANT8_ASYMM            ||
206            type == V1_2::OperandType::TENSOR_QUANT8_SYMM             ||
207            type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
208            type == V1_2::OperandType::TENSOR_QUANT16_SYMM            ||
209            type == V1_2::OperandType::TENSOR_INT32;
210 }
211 
212 #endif
213 
214 #ifdef ARMNN_ANDROID_NN_V1_3
215 
216 // Support within the 1.3 driver for specific tensor data types
IsOperandTypeSupportedForTensors(V1_3::OperandType type)217 inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
218 {
219     return type == V1_3::OperandType::BOOL                           ||
220            type == V1_3::OperandType::TENSOR_BOOL8                   ||
221            type == V1_3::OperandType::TENSOR_FLOAT16                 ||
222            type == V1_3::OperandType::TENSOR_FLOAT32                 ||
223            type == V1_3::OperandType::TENSOR_QUANT8_ASYMM            ||
224            type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED     ||
225            type == V1_3::OperandType::TENSOR_QUANT8_SYMM             ||
226            type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
227            type == V1_3::OperandType::TENSOR_QUANT16_SYMM            ||
228            type == V1_3::OperandType::TENSOR_INT32;
229 }
230 
231 #endif
232 
IsBool(V1_0::Operand)233 inline bool IsBool(V1_0::Operand)
234 {
235     return false;
236 }
237 
Is12OrLaterOperand(V1_0::Operand)238 inline bool Is12OrLaterOperand(V1_0::Operand)
239 {
240     return false;
241 }
242 
243 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
244 
IsBool(V1_2::Operand operand)245 inline bool IsBool(V1_2::Operand operand)
246 {
247     return operand.type == V1_2::OperandType::BOOL;
248 }
249 
250 /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_2::Operand)251 inline bool Is12OrLaterOperand(V1_2::Operand)
252 {
253     return true;
254 }
255 
256 #endif
257 
258 #ifdef ARMNN_ANDROID_NN_V1_3
259 
IsBool(V1_3::Operand operand)260 inline bool IsBool(V1_3::Operand operand)
261 {
262     return operand.type == V1_3::OperandType::BOOL;
263 }
264 
265 /// Checks if a operand is 1_2 Operand
Is12OrLaterOperand(V1_3::Operand)266 inline bool Is12OrLaterOperand(V1_3::Operand)
267 {
268     return true;
269 }
270 
271 #endif
272 
273 template<typename LayerHandleType>
AddReshapeLayer(armnn::INetwork & network,LayerHandleType & inputLayer,armnn::TensorInfo reshapeInfo)274 armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
275                                           LayerHandleType& inputLayer,
276                                           armnn::TensorInfo reshapeInfo)
277 {
278     armnn::ReshapeDescriptor reshapeDescriptor;
279     reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
280 
281     armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
282     ARMNN_ASSERT(reshapeLayer != nullptr);
283 
284     // Attach the input layer to the reshape layer
285     inputLayer.Connect(reshapeLayer->GetInputSlot(0));
286     reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
287 
288     return *reshapeLayer;
289 }
290 
BroadcastTensor(LayerInputHandle & input0,LayerInputHandle & input1,armnn::IConnectableLayer * startLayer,ConversionData & data)291 bool BroadcastTensor(LayerInputHandle& input0,
292                      LayerInputHandle& input1,
293                      armnn::IConnectableLayer* startLayer,
294                      ConversionData& data)
295 {
296     ARMNN_ASSERT(startLayer != nullptr);
297 
298     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
299     const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
300 
301     unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
302     unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
303 
304     if (inputDimensions0 == inputDimensions1)
305     {
306         // The inputs have the same number of dimensions, simply connect them to the given layer as they are
307         input0.Connect(startLayer->GetInputSlot(0));
308         input1.Connect(startLayer->GetInputSlot(1));
309 
310         return true;
311     }
312 
313     // Since the number of dimensions do not match then we need to add degenerate dimensions
314     // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
315 
316     unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
317     unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
318                                            armnn::numeric_cast<int>(inputDimensions1));
319 
320     bool input0IsSmaller = inputDimensions0 < inputDimensions1;
321     LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
322     const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
323 
324     const armnn::TensorShape& smallShape = smallInfo.GetShape();
325     std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
326     for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
327     {
328         reshapedDimensions[i] = smallShape[i - sizeDifference];
329     }
330 
331     armnn::TensorInfo reshapedInfo = smallInfo;
332     reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
333                                               reshapedDimensions.data() });
334 
335     // RehsapeDescriptor that is ignored in the IsReshapeSupported function
336     armnn::ReshapeDescriptor reshapeDescriptor;
337 
338     bool isSupported = false;
339     FORWARD_LAYER_SUPPORT_FUNC(__func__,
340                                IsReshapeSupported,
341                                data.m_Backends,
342                                isSupported,
343                                smallInfo,
344                                reshapedInfo,
345                                reshapeDescriptor);
346     if (!isSupported)
347     {
348         return false;
349     }
350 
351     ARMNN_ASSERT(data.m_Network != nullptr);
352     armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
353 
354     if (input0IsSmaller)
355     {
356         // Input0 is the "smaller" tensor, connect the reshape layer as follows:
357         //
358         //  Input0 Input1
359         //     |     |
360         //  Reshape  |
361         //      \   /
362         //    StartLayer
363 
364         reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
365         input1.Connect(startLayer->GetInputSlot(1));
366     }
367     else
368     {
369         // Input1 is the "smaller" tensor, connect the reshape layer as follows:
370         //
371         //  Input0 Input1
372         //     |     |
373         //     |  Reshape
374         //      \   /
375         //    StartLayer
376 
377         input0.Connect(startLayer->GetInputSlot(0));
378         reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
379     }
380 
381     return true;
382 }
383 
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)384 void CalcPadding(uint32_t input,
385                  uint32_t kernel,
386                  uint32_t stride,
387                  uint32_t& outPadHead,
388                  uint32_t& outPadTail,
389                  android::nn::PaddingScheme scheme)
390 {
391     int32_t padHead;
392     int32_t padTail;
393     calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
394     outPadHead = armnn::numeric_cast<uint32_t>(padHead);
395     outPadTail = armnn::numeric_cast<uint32_t>(padTail);
396 }
397 
398 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
399 
CalcPadding(uint32_t input,uint32_t kernel,uint32_t stride,uint32_t dilation,uint32_t & outPadHead,uint32_t & outPadTail,android::nn::PaddingScheme scheme)400 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
401                  uint32_t& outPadTail, android::nn::PaddingScheme scheme)
402 {
403     int32_t padHead;
404     int32_t padTail;
405     calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
406     outPadHead = armnn::numeric_cast<uint32_t>(padHead);
407     outPadTail = armnn::numeric_cast<uint32_t>(padTail);
408 }
409 
CalcPaddingTransposeConv(uint32_t output,uint32_t kernel,int32_t stride,int32_t & outPadHead,int32_t & outPadTail,android::nn::PaddingScheme scheme)410 void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
411                               int32_t& outPadTail, android::nn::PaddingScheme scheme)
412 {
413     calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
414 }
415 
416 #endif
417 
GetOperandShape(const V1_0::Operand & operand)418 Shape GetOperandShape(const V1_0::Operand& operand)
419 {
420     Shape shape;
421     shape.type = android::nn::OperandType(operand.type);
422     shape.dimensions = operand.dimensions;
423     shape.scale = operand.scale;
424     shape.offset = operand.zeroPoint;
425     return shape;
426 }
427 
428 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
429 
GetOperandShape(const V1_2::Operand & operand)430 Shape GetOperandShape(const V1_2::Operand& operand)
431 {
432     Shape shape;
433     shape.type = android::nn::OperandType(operand.type);
434     shape.dimensions = operand.dimensions;
435     shape.scale = operand.scale;
436     shape.offset = operand.zeroPoint;
437     return shape;
438 }
439 
440 #endif
441 
442 #ifdef ARMNN_ANDROID_NN_V1_3
443 
GetOperandShape(const V1_3::Operand & operand)444 Shape GetOperandShape(const V1_3::Operand& operand)
445 {
446     Shape shape;
447     shape.type = OperandType(operand.type);
448     shape.dimensions = operand.dimensions;
449     shape.scale = operand.scale;
450     shape.offset = operand.zeroPoint;
451     return shape;
452 }
453 
454 #endif
455 
456 // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
457 // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
458 // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
459 // user (us, in this case) to ensure they match.
SanitizeBiasQuantizationScale(armnn::TensorInfo & biasInfo,const armnn::TensorInfo & weightInfo,const armnn::TensorInfo & inputInfo)460 void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
461                                    const armnn::TensorInfo& weightInfo,
462                                    const armnn::TensorInfo& inputInfo)
463 {
464     if (weightInfo.HasPerAxisQuantization())
465     {
466         // NOTE: Bias scale is always set to 0 for per-axis quantization and
467         // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
468         auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
469         {
470             return biasScale * inputInfo.GetQuantizationScale();
471         };
472 
473         std::vector<float> biasScales(weightInfo.GetQuantizationScales());
474         std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
475 
476         biasInfo.SetQuantizationScales(biasScales);
477         biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
478 
479         ALOGV("Bias quantization params have been updated for per-axis quantization");
480     }
481     else
482     {
483         const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
484         if (biasInfo.GetQuantizationScale() != expectedBiasScale)
485         {
486             if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
487             {
488                 ALOGW("Bias quantization scale has been modified to match input * weights");
489                 biasInfo.SetQuantizationScale(expectedBiasScale);
490             }
491         }
492     }
493 }
494 
495 // 4D Tensor Permutations
496 const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
497 const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
498 const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
499 
500 // 3D Permutation Vectors
501 const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
502 const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
503 
504 template<typename OSlot>
AddTransposeLayer(armnn::INetwork & network,OSlot & input,const armnn::PermutationVector & mappings)505 armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
506                                             const armnn::PermutationVector& mappings)
507 {
508     // Add swizzle layer
509     armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
510 
511     ARMNN_ASSERT(layer != nullptr);
512 
513     // Connect input to swizzle layer
514     input.Connect(layer->GetInputSlot(0));
515 
516     // Setup swizzled output
517     const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
518     layer->GetOutputSlot(0).SetTensorInfo(outInfo);
519 
520     return *layer;
521 }
522 
ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,const armnn::TensorShape & outputShape,uint32_t concatDim)523 bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
524                                const armnn::TensorShape & outputShape,
525                                uint32_t concatDim)
526 {
527     // Validate the output shape is correct given the input shapes (which have just been validated)
528     unsigned int numDimensions = inputShapes[0].GetNumDimensions();
529     if (outputShape.GetNumDimensions() != numDimensions)
530     {
531         return Fail("%s: Output shape has wrong number of dimensions", __func__);
532     }
533 
534     unsigned int outputSizeAlongConcatenatedDimension = 0;
535     for (unsigned int i = 0; i < inputShapes.size(); i++)
536     {
537         outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
538     }
539 
540     for (unsigned int i = 0; i < numDimensions; ++i)
541     {
542         if (i == concatDim)
543         {
544             if (outputShape[i] != outputSizeAlongConcatenatedDimension)
545             {
546                 return Fail(
547                         "%s: Invalid output shape for dimension %d (%d != %d)",
548                         __func__,
549                         i,
550                         outputShape[i],
551                         outputSizeAlongConcatenatedDimension);
552             }
553         }
554         else
555         {
556             if (outputShape[i] != inputShapes[0][i])
557             {
558                 return Fail("%s: Invalid output shape", __func__);
559             }
560         }
561     }
562 
563     return true;
564 }
565 
RequiresReshape(armnn::TensorShape & inputShape)566 bool RequiresReshape(armnn::TensorShape & inputShape)
567 {
568     return inputShape.GetNumDimensions() < 3;
569 }
570 
SwizzleInputs(armnn::INetwork & network,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping)571 void SwizzleInputs(armnn::INetwork& network,
572                    std::vector<LayerInputHandle>& inputs,
573                    std::vector<armnn::TensorShape>& inputShapes,
574                    const armnn::PermutationVector& mapping)
575 {
576     if (!mapping.IsEqual(IdentityPermutation4D))
577     {
578         size_t nInputs = inputs.size();
579         for (size_t i=0; i<nInputs; ++i)
580         {
581             // add swizzle layer
582             armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
583             auto& outputSlot = swizzleLayer.GetOutputSlot(0);
584             auto& outputInfo = outputSlot.GetTensorInfo();
585             // replace inputs with the swizzled ones
586             inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
587             inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
588         }
589     }
590 }
591 
TransposeInputTensors(ConversionData & data,std::vector<LayerInputHandle> & inputs,std::vector<armnn::TensorShape> & inputShapes,const armnn::PermutationVector & mapping)592 bool TransposeInputTensors(ConversionData& data,
593                           std::vector<LayerInputHandle>& inputs,
594                           std::vector<armnn::TensorShape>& inputShapes,
595                           const armnn::PermutationVector& mapping)
596 {
597     // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
598     if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
599     {
600         armnn::TensorInfo outputTransposeInfo;
601         size_t nInputs = inputs.size();
602         for (size_t i=0; i<nInputs; ++i)
603         {
604             // check permute layer
605             armnn::TransposeDescriptor transposeDesc;
606             transposeDesc.m_DimMappings = mapping;
607             outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
608 
609             bool isSupported = false;
610             FORWARD_LAYER_SUPPORT_FUNC(__func__,
611                                        IsTransposeSupported,
612                                        data.m_Backends,
613                                        isSupported,
614                                        inputs[i].GetTensorInfo(),
615                                        outputTransposeInfo,
616                                        transposeDesc);
617             if (!isSupported)
618             {
619                 return false;
620             }
621 
622         }
623         SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
624     }
625     return true;
626 }
627 
628 
CreateConcatPermutationParameters(const unsigned int numberOfDimensions,int32_t & concatDimension,std::pair<armnn::PermutationVector,armnn::PermutationVector> & permutationPair)629 bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
630                                        int32_t & concatDimension,
631                                        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
632 {
633     bool needPermute = false;
634     ARMNN_ASSERT(numberOfDimensions >= 3);
635 
636     // ArmNN uses Compute Library subtensors to perform concatenation
637     // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
638     // or along dimension 0 or 2 for a 3-D tensor.
639     if (numberOfDimensions == 4 && concatDimension == 2)
640     {
641         concatDimension = 1;
642         permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
643         needPermute = true;
644     }
645     else if (numberOfDimensions == 3 && concatDimension == 1)
646     {
647         concatDimension = 0;
648         permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
649         needPermute = true;
650     }
651     // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
652     // permutation identity to only have 3 dimensions
653     else if (numberOfDimensions == 3 && concatDimension == 2)
654     {
655         permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
656     }
657     return needPermute;
658 }
659 
660 } // anonymous namespace
661 
662 namespace armnn_driver
663 {
664 
665 //// Creates an ArmNN activation layer and connects it to the given layer, if the
666 //// passed in AndroidNN activation function requires so.
667 //// @return The end layer of the sequence of layers built for the given AndroidNN
668 //// activation function or nullptr if an error occurred (e.g. unsupported activation).
669 //// Note that the end layer matches the input layer if no activation is required
670 //// (the sequence of layers has length 1).
671 armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
672                                             ActivationFn activation,
673                                             armnn::IConnectableLayer* prevLayer,
674                                             ConversionData& data);
675 
676 } // namespace armnn_driver
677 
678 ///
679 /// Utility templates
680 ///
681 
682 namespace armnn_driver
683 {
684 
685 using namespace android::nn;
686 
687 template<typename HalPolicy,
688          typename HalOperand   = typename HalPolicy::Operand,
689          typename HalOperation = typename HalPolicy::Operation,
690          typename HalModel     = typename HalPolicy::Model>
GetInputOperand(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,bool failOnIndexOutOfBounds=true)691 const HalOperand* GetInputOperand(const HalOperation& operation,
692                                   uint32_t inputIndex,
693                                   const HalModel& model,
694                                   bool failOnIndexOutOfBounds = true)
695 {
696     if (inputIndex >= operation.inputs.size())
697     {
698         if (failOnIndexOutOfBounds)
699         {
700             Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
701         }
702         return nullptr;
703     }
704 
705     // Model should have been validated beforehand
706     ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
707     return &getMainModel(model).operands[operation.inputs[inputIndex]];
708 }
709 
710 template<typename HalPolicy,
711          typename HalOperand   = typename HalPolicy::Operand,
712          typename HalOperation = typename HalPolicy::Operation,
713          typename HalModel     = typename HalPolicy::Model>
GetOutputOperand(const HalOperation & operation,uint32_t outputIndex,const HalModel & model)714 const HalOperand* GetOutputOperand(const HalOperation& operation,
715                                    uint32_t outputIndex,
716                                    const HalModel& model)
717 {
718     if (outputIndex >= operation.outputs.size())
719     {
720         Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
721         return nullptr;
722     }
723 
724     // Model should have been validated beforehand
725     ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
726 
727     return &getMainModel(model).operands[operation.outputs[outputIndex]];
728 }
729 
730 template<typename HalPolicy,
731          typename HalOperand = typename HalPolicy::Operand,
732          typename HalModel   = typename HalPolicy::Model>
GetOperandValueReadOnlyAddress(const HalOperand & operand,const HalModel & model,const ConversionData & data,bool optional=false)733 const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
734                                            const HalModel& model,
735                                            const ConversionData& data,
736                                            bool optional = false)
737 {
738     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
739 
740     const void* valueStart = nullptr;
741     switch (operand.lifetime)
742     {
743         case HalOperandLifeTime::CONSTANT_COPY:
744         {
745             // Constant found in model.operandValues
746             valueStart = &model.operandValues[operand.location.offset];
747             break;
748         }
749         case HalOperandLifeTime::CONSTANT_REFERENCE:
750         {
751             // Constant specified via a Memory object
752             valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
753             break;
754         }
755         case HalOperandLifeTime::NO_VALUE:
756         {
757             // An optional input tensor with no values is not an error so should not register as a fail
758             if (optional)
759             {
760                 valueStart = nullptr;
761                 break;
762             }
763             [[fallthrough]];
764         }
765         default:
766         {
767             // Unsupported/invalid (e.g. can't get value of an input to the model)
768             Fail("%s: unsupported/invalid operand lifetime: %s",
769                  __func__, toString(operand.lifetime).c_str());
770             valueStart = nullptr;
771         }
772     }
773 
774     return valueStart;
775 }
776 
777 template<typename HalPolicy,
778          typename HalOperation   = typename HalPolicy::Operation,
779          typename HalModel       = typename HalPolicy::Model,
780          typename HalOperandType = typename HalPolicy::OperandType>
GetOperandType(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,HalOperandType & type)781 bool GetOperandType(const HalOperation& operation,
782                     uint32_t inputIndex,
783                     const HalModel& model,
784                     HalOperandType& type)
785 {
786     using HalOperand = typename HalPolicy::Operand;
787 
788     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
789     if (!operand)
790     {
791         return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
792     }
793 
794     type = operand->type;
795     return true;
796 }
797 
798 template<typename HalPolicy,
799          typename HalOperand = typename HalPolicy::Operand>
IsOperandConstant(const HalOperand & operand)800 bool IsOperandConstant(const HalOperand& operand)
801 {
802     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
803 
804     HalOperandLifeTime lifetime = operand.lifetime;
805 
806     return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
807            lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
808            lifetime == HalOperandLifeTime::NO_VALUE;
809 }
810 
811 template<typename HalPolicy,
812          typename HalOperand   = typename HalPolicy::Operand,
813          typename HalModel     = typename HalPolicy::Model>
ConvertOperandToConstTensorPin(const HalOperand & operand,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)814 ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
815                                               const HalModel& model,
816                                               const ConversionData& data,
817                                               const armnn::PermutationVector& dimensionMappings = g_DontPermute,
818                                               const armnn::TensorShape* overrideTensorShape = nullptr,
819                                               bool optional = false)
820 {
821     if (!IsOperandTypeSupportedForTensors(operand.type))
822     {
823         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
824         return ConstTensorPin();
825     }
826 
827     if (!optional && !IsOperandConstant<HalPolicy>(operand))
828     {
829         Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
830         return ConstTensorPin();
831     }
832 
833     const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
834     if (!valueStart)
835     {
836         if (optional)
837         {
838             // optional tensor with no values is not really an error; return it as invalid, but marked as optional
839             return ConstTensorPin(true);
840         }
841         // mandatory tensor with no values
842         Fail("%s: failed to get operand address", __func__);
843         return ConstTensorPin();
844     }
845 
846     armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
847     // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
848     if (tensorInfo.HasPerAxisQuantization())
849     {
850         tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
851     }
852 
853     if (overrideTensorShape != nullptr)
854     {
855         tensorInfo.SetShape(*overrideTensorShape);
856     }
857     return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
858 }
859 
860 template<typename HalPolicy,
861          typename HalOperation = typename HalPolicy::Operation,
862          typename HalModel     = typename HalPolicy::Model>
ConvertOperationInputToConstTensorPin(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data,const armnn::PermutationVector & dimensionMappings=g_DontPermute,const armnn::TensorShape * overrideTensorShape=nullptr,bool optional=false)863 ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
864                                                      uint32_t inputIndex,
865                                                      const HalModel& model,
866                                                      const ConversionData& data,
867                                                      const armnn::PermutationVector& dimensionMappings = g_DontPermute,
868                                                      const armnn::TensorShape* overrideTensorShape = nullptr,
869                                                      bool optional = false)
870 {
871     using HalOperand = typename HalPolicy::Operand;
872 
873     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
874     if (!operand)
875     {
876         Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
877         return ConstTensorPin();
878     }
879     return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
880                                                      model,
881                                                      data,
882                                                      dimensionMappings,
883                                                      overrideTensorShape,
884                                                      optional);
885 }
886 
887 template<typename HalPolicy,
888          typename OutputType,
889          typename HalOperandType = typename HalPolicy::OperandType,
890          typename HalOperation   = typename HalPolicy::Operation,
891          typename HalModel       = typename HalPolicy::Model>
GetInputScalar(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,OutputType & outValue,const HalModel & model,const ConversionData & data,bool optional=false)892 bool GetInputScalar(const HalOperation& operation,
893                     uint32_t inputIndex,
894                     HalOperandType type,
895                     OutputType& outValue,
896                     const HalModel& model,
897                     const ConversionData& data,
898                     bool optional = false)
899 {
900     using HalOperand = typename HalPolicy::Operand;
901 
902     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
903     if (!optional && !operand)
904     {
905         return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
906     }
907 
908     if (!optional && operand->type != type)
909     {
910         return Fail("%s: unexpected operand type: %s (should be %s)",
911                     __func__, toString(operand->type).c_str(), toString(type).c_str());
912     }
913 
914     if (!optional && operand->location.length != sizeof(OutputType))
915     {
916         return Fail("%s: incorrect operand location length: %i (should be %i)",
917                     __func__, operand->location.length, sizeof(OutputType));
918     }
919 
920     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
921     if (!optional && !valueAddress)
922     {
923         return Fail("%s: failed to get address for operand", __func__);
924     }
925 
926     if(!optional)
927     {
928         outValue = *(static_cast<const OutputType*>(valueAddress));
929     }
930 
931     return true;
932 }
933 
934 template<typename HalPolicy,
935          typename HalOperation = typename HalPolicy::Operation,
936          typename HalModel     = typename HalPolicy::Model>
GetInputInt32(const HalOperation & operation,uint32_t inputIndex,int32_t & outValue,const HalModel & model,const ConversionData & data)937 bool GetInputInt32(const HalOperation& operation,
938                    uint32_t inputIndex,
939                    int32_t& outValue,
940                    const HalModel& model,
941                    const ConversionData& data)
942 {
943     return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
944 }
945 
946 template<typename HalPolicy,
947          typename HalOperation = typename HalPolicy::Operation,
948          typename HalModel     = typename HalPolicy::Model>
GetInputFloat32(const HalOperation & operation,uint32_t inputIndex,float & outValue,const HalModel & model,const ConversionData & data)949 bool GetInputFloat32(const HalOperation& operation,
950                      uint32_t inputIndex,
951                      float& outValue,
952                      const HalModel& model,
953                      const ConversionData& data)
954 {
955     return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
956 }
957 
958 template<typename HalPolicy,
959          typename HalOperation   = typename HalPolicy::Operation,
960          typename HalOperandType = typename HalPolicy::OperandType,
961          typename HalModel       = typename HalPolicy::Model>
GetInputActivationFunctionImpl(const HalOperation & operation,uint32_t inputIndex,HalOperandType type,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)962 bool GetInputActivationFunctionImpl(const HalOperation& operation,
963                                     uint32_t inputIndex,
964                                     HalOperandType type,
965                                     ActivationFn& outActivationFunction,
966                                     const HalModel& model,
967                                     const ConversionData& data)
968 {
969     if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
970     {
971         return Fail("%s: unexpected operand type: %s (should be %s or %s)",
972                     __func__,
973                     toString(type).c_str(),
974                     toString(HalOperandType::INT32).c_str(),
975                     toString(HalOperandType::TENSOR_INT32).c_str());
976     }
977 
978     int32_t activationFunctionAsInt;
979     if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
980     {
981         return Fail("%s: failed to get activation input value", __func__);
982     }
983     outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
984     return true;
985 }
986 
987 template<typename HalPolicy,
988          typename HalOperation = typename HalPolicy::Operation,
989          typename HalModel     = typename HalPolicy::Model>
GetInputActivationFunction(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)990 bool GetInputActivationFunction(const HalOperation& operation,
991                                 uint32_t inputIndex,
992                                 ActivationFn& outActivationFunction,
993                                 const HalModel& model,
994                                 const ConversionData& data)
995 {
996     return GetInputActivationFunctionImpl<HalPolicy>(operation,
997                                                      inputIndex,
998                                                      HalPolicy::OperandType::INT32,
999                                                      outActivationFunction,
1000                                                      model,
1001                                                      data);
1002 }
1003 
1004 template<typename HalPolicy,
1005          typename HalOperation = typename HalPolicy::Operation,
1006          typename HalModel     = typename HalPolicy::Model>
GetInputActivationFunctionFromTensor(const HalOperation & operation,uint32_t inputIndex,ActivationFn & outActivationFunction,const HalModel & model,const ConversionData & data)1007 bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1008                                           uint32_t inputIndex,
1009                                           ActivationFn& outActivationFunction,
1010                                           const HalModel& model,
1011                                           const ConversionData& data)
1012 {
1013     // This only accepts a 1-D tensor of size 1
1014     return GetInputActivationFunctionImpl<HalPolicy>(operation,
1015                                                      inputIndex,
1016                                                      HalPolicy::OperandType::INT32,
1017                                                      outActivationFunction,
1018                                                      model,
1019                                                      data);
1020 }
1021 
1022 
1023 template<typename HalPolicy,
1024          typename HalOperation   = typename HalPolicy::Operation,
1025          typename HalModel       = typename HalPolicy::Model>
GetOptionalInputActivation(const HalOperation & operation,uint32_t inputIndex,ActivationFn & activationFunction,const HalModel & model,const ConversionData & data)1026 bool GetOptionalInputActivation(const HalOperation& operation,
1027                                 uint32_t inputIndex,
1028                                 ActivationFn& activationFunction,
1029                                 const HalModel& model,
1030                                 const ConversionData& data)
1031 {
1032     if (operation.inputs.size() <= inputIndex)
1033     {
1034         activationFunction = ActivationFn::kActivationNone;
1035     }
1036     else
1037     {
1038         if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
1039         {
1040             return Fail("%s: Operation has invalid inputs", __func__);
1041         }
1042     }
1043     return true;
1044 }
1045 
1046 template<typename HalPolicy,
1047          typename ConvolutionDescriptor,
1048          typename HalOperation = typename HalPolicy::Operation,
1049          typename HalModel     = typename HalPolicy::Model>
GetOptionalConvolutionDilationParams(const HalOperation & operation,uint32_t dilationXIndex,ConvolutionDescriptor & descriptor,const HalModel & model,const ConversionData & data)1050 bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1051                                           uint32_t dilationXIndex,
1052                                           ConvolutionDescriptor& descriptor,
1053                                           const HalModel& model,
1054                                           const ConversionData& data)
1055 {
1056     bool success = true;
1057     if (operation.inputs.size() >= dilationXIndex + 2)
1058     {
1059         success &= GetInputScalar<HalPolicy>(operation,
1060                                              dilationXIndex,
1061                                              HalPolicy::OperandType::INT32,
1062                                              descriptor.m_DilationX,
1063                                              model,
1064                                              data);
1065         success &= GetInputScalar<HalPolicy>(operation,
1066                                              dilationXIndex + 1,
1067                                              HalPolicy::OperandType::INT32,
1068                                              descriptor.m_DilationY,
1069                                              model,
1070                                              data);
1071     }
1072 
1073     return success;
1074 }
1075 
1076 template<typename HalPolicy,
1077          typename HalOperation   = typename HalPolicy::Operation,
1078          typename HalModel       = typename HalPolicy::Model>
GetOptionalBool(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,const ConversionData & data)1079 bool GetOptionalBool(const HalOperation& operation,
1080                      uint32_t inputIndex,
1081                      const HalModel& model,
1082                      const ConversionData& data)
1083 {
1084     using HalOperand = typename HalPolicy::Operand;
1085 
1086     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1087     if (!operand)
1088     {
1089         return false;
1090     }
1091 
1092     if (!IsBool(*operand))
1093     {
1094         return false;
1095     }
1096 
1097     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1098     if (!valueAddress)
1099     {
1100         return false;
1101     }
1102 
1103     if (*(static_cast<const bool*>(valueAddress)))
1104     {
1105         return true;
1106     }
1107     else
1108     {
1109         return false;
1110     }
1111 }
1112 
1113 template<typename HalPolicy,
1114          typename HalOperand = typename HalPolicy::Operand,
1115          typename HalModel   = typename HalPolicy::Model>
GetTensorInt32Values(const HalOperand & operand,std::vector<int32_t> & outValues,const HalModel & model,const ConversionData & data)1116 bool GetTensorInt32Values(const HalOperand& operand,
1117                           std::vector<int32_t>& outValues,
1118                           const HalModel& model,
1119                           const ConversionData& data)
1120 {
1121     if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
1122     {
1123         return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1124     }
1125 
1126     const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
1127     if (!startAddress)
1128     {
1129         return Fail("%s: failed to get operand address", __func__, operand.type);
1130     }
1131 
1132     // Check number of bytes is sensible
1133     const uint32_t numBytes = operand.location.length;
1134     if (numBytes % sizeof(int32_t) != 0)
1135     {
1136         return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1137                     __func__, numBytes, sizeof(int32_t));
1138     }
1139 
1140     outValues.resize(numBytes / sizeof(int32_t));
1141     memcpy(outValues.data(), startAddress, numBytes);
1142     return true;
1143 }
1144 
1145 template<typename HalPolicy,
1146          typename HalOperation = typename HalPolicy::Operation,
1147          typename HalModel     = typename HalPolicy::Model>
GetInputPaddingScheme(const HalOperation & operation,uint32_t inputIndex,PaddingScheme & outPaddingScheme,const HalModel & model,const ConversionData & data)1148 bool GetInputPaddingScheme(const HalOperation& operation,
1149                            uint32_t inputIndex,
1150                            PaddingScheme& outPaddingScheme,
1151                            const HalModel& model,
1152                            const ConversionData& data)
1153 {
1154     int32_t paddingSchemeAsInt;
1155     if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
1156     {
1157         return Fail("%s: failed to get padding scheme input value", __func__);
1158     }
1159 
1160     outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1161     return true;
1162 }
1163 
1164 template<typename HalPolicy,
1165          typename HalOperation = typename HalPolicy::Operation,
1166          typename HalModel     = typename HalPolicy::Model>
ConvertToLayerInputHandle(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data)1167 LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1168                                            uint32_t inputIndex,
1169                                            const HalModel& model,
1170                                            ConversionData& data)
1171 {
1172     using HalOperand         = typename HalPolicy::Operand;
1173     using HalOperandType     = typename HalPolicy::OperandType;
1174     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1175 
1176     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1177     if (!operand)
1178     {
1179         Fail("%s: failed to get input operand %i", __func__, inputIndex);
1180         return LayerInputHandle();
1181     }
1182 
1183     if (!IsOperandTypeSupportedForTensors(operand->type))
1184     {
1185         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1186         return LayerInputHandle();
1187     }
1188 
1189     try
1190     {
1191         armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1192         if (IsDynamicTensor(operandTensorInfo))
1193         {
1194             Fail("%s: dynamic input tensors are not supported", __func__);
1195             return LayerInputHandle();
1196         }
1197 
1198         switch (operand->lifetime)
1199         {
1200             case HalOperandLifeTime::MODEL_INPUT:
1201             {
1202                 // NOTE: We must check whether we can support the input tensor on at least one
1203                 // of the provided backends; otherwise we cannot convert the operation
1204                 bool isInputSupported = false;
1205                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1206                                            IsInputSupported,
1207                                            data.m_Backends,
1208                                            isInputSupported,
1209                                            operandTensorInfo);
1210 
1211                 if (!isInputSupported)
1212                 {
1213                     Fail("%s: unsupported input tensor", __func__);
1214                     return LayerInputHandle();
1215                 }
1216 
1217                 [[clang::fallthrough]]; // intentional fallthrough
1218             }
1219             case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1220             case HalOperandLifeTime::MODEL_OUTPUT:
1221             {
1222                 // The tensor is either an operand internal to the model, or a model input.
1223                 // It can be associated with an ArmNN output slot for an existing layer.
1224 
1225                 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1226                 const uint32_t operandIndex = operation.inputs[inputIndex];
1227                 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1228             }
1229             case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1230             case HalOperandLifeTime::CONSTANT_REFERENCE:
1231             {
1232                 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1233                 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1234                 if (tensorPin.IsValid())
1235                 {
1236                     bool isSupported = false;
1237                     FORWARD_LAYER_SUPPORT_FUNC(__func__,
1238                                                IsConstantSupported,
1239                                                data.m_Backends,
1240                                                isSupported,
1241                                                tensorPin.GetConstTensor().GetInfo());
1242                     if (!isSupported)
1243                     {
1244                         return LayerInputHandle();
1245                     }
1246 
1247                     armnn::IConnectableLayer* constantLayer =
1248                                     data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1249                     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1250                     outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1251 
1252                     return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1253                 }
1254                 else
1255                 {
1256                     Fail("%s: invalid operand tensor", __func__);
1257                     return LayerInputHandle();
1258                 }
1259                 break;
1260             }
1261             default:
1262             {
1263                 // Unsupported lifetime for an input tensor
1264                 Fail("%s: unsupported lifetime for input tensor: %s",
1265                      __func__, toString(operand->lifetime).c_str());
1266                 return LayerInputHandle();
1267             }
1268         }
1269     }
1270     catch (UnsupportedOperand<HalOperandType>& e)
1271     {
1272         Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1273         return LayerInputHandle();
1274     }
1275 }
1276 
1277 
1278 #ifdef ARMNN_ANDROID_NN_V1_3
1279 template<typename HalPolicy>
ConvertToLayerInputHandle(const::android::hardware::neuralnetworks::V1_3::Operation & operation,uint32_t inputIndex,const::android::hardware::neuralnetworks::V1_3::Model & model,ConversionData & data)1280 LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1281                                            uint32_t inputIndex,
1282                                            const::android::hardware::neuralnetworks::V1_3::Model& model,
1283                                            ConversionData& data)
1284 {
1285     using HalOperand         = typename HalPolicy::Operand;
1286     using HalOperandType     = typename HalPolicy::OperandType;
1287     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1288 
1289     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1290     if (!operand)
1291     {
1292         Fail("%s: failed to get input operand %i", __func__, inputIndex);
1293         return LayerInputHandle();
1294     }
1295 
1296     if (!IsOperandTypeSupportedForTensors(operand->type))
1297     {
1298         Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1299         return LayerInputHandle();
1300     }
1301 
1302     try
1303     {
1304         armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1305 
1306         if (IsDynamicTensor(operandTensorInfo))
1307         {
1308             data.m_DynamicInputsEncountered = true;
1309 
1310             const uint32_t operandIndex = operation.inputs[inputIndex];
1311 
1312             // Check if the dynamic input tensors have been inferred by one of the previous layers
1313             // If not we can't support them
1314             if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
1315             {
1316                 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1317             }
1318             else
1319             {
1320                 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1321                 return LayerInputHandle();
1322             }
1323         }
1324 
1325         switch (operand->lifetime)
1326         {
1327             case HalOperandLifeTime::SUBGRAPH_INPUT:
1328             {
1329                 // NOTE: We must check whether we can support the input tensor on at least one
1330                 // of the provided backends; otherwise we cannot convert the operation
1331                 bool isInputSupported = false;
1332                 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1333                                            IsInputSupported,
1334                                            data.m_Backends,
1335                                            isInputSupported,
1336                                            operandTensorInfo);
1337 
1338                 if (!isInputSupported)
1339                 {
1340                     Fail("%s: unsupported input tensor", __func__);
1341                     return LayerInputHandle();
1342                 }
1343 
1344                 [[clang::fallthrough]]; // intentional fallthrough
1345             }
1346             case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1347             case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1348             {
1349                 // The tensor is either an operand internal to the model, or a model input.
1350                 // It can be associated with an ArmNN output slot for an existing layer.
1351 
1352                 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1353                 const uint32_t operandIndex = operation.inputs[inputIndex];
1354                 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1355             }
1356             case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1357             case HalOperandLifeTime::CONSTANT_REFERENCE:
1358             {
1359                 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1360                 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1361                 if (tensorPin.IsValid())
1362                 {
1363                     bool isSupported = false;
1364                     FORWARD_LAYER_SUPPORT_FUNC(__func__,
1365                                                IsConstantSupported,
1366                                                data.m_Backends,
1367                                                isSupported,
1368                                                tensorPin.GetConstTensor().GetInfo());
1369                     if (!isSupported)
1370                     {
1371                         return LayerInputHandle();
1372                     }
1373 
1374                     armnn::IConnectableLayer* constantLayer =
1375                         data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1376                     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1377                     outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1378 
1379                     return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1380                 }
1381                 else
1382                 {
1383                     Fail("%s: invalid operand tensor", __func__);
1384                     return LayerInputHandle();
1385                 }
1386                 break;
1387             }
1388             default:
1389             {
1390                 // Unsupported lifetime for an input tensor
1391                 Fail("%s: unsupported lifetime for input tensor: %s",
1392                      __func__, toString(operand->lifetime).c_str());
1393                 return LayerInputHandle();
1394             }
1395         }
1396     }
1397     catch (UnsupportedOperand<HalOperandType>& e)
1398     {
1399         Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1400         return LayerInputHandle();
1401     }
1402 }
1403 #endif
1404 
1405 template<typename HalPolicy,
1406          typename HalOperation = typename HalPolicy::Operation,
1407          typename HalModel     = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t operationOutputIndex,armnn::IConnectableLayer & layer,uint32_t layerOutputIndex,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone,bool inferOutputShapes=false)1408 bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1409                                   uint32_t operationOutputIndex,
1410                                   armnn::IConnectableLayer& layer,
1411                                   uint32_t layerOutputIndex,
1412                                   const HalModel& model,
1413                                   ConversionData& data,
1414                                   const armnn::TensorInfo* overrideOutputInfo = nullptr,
1415                                   const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1416                                   const ActivationFn& activationFunction = ActivationFn::kActivationNone,
1417                                   bool inferOutputShapes = false)
1418 {
1419     using HalOperand = typename HalPolicy::Operand;
1420 
1421     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
1422     if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1423     {
1424         return false;
1425     }
1426 
1427     armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1428     if (overrideOutputInfo == nullptr)
1429     {
1430         outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1431     }
1432     else
1433     {
1434         outputSlot.SetTensorInfo(*overrideOutputInfo);
1435     }
1436 
1437     bool isSupported = false;
1438     if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
1439     {
1440         // Type one dynamic tensors require the previous layer's output shape for inference
1441         for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1442         {
1443             if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1444             {
1445                 return false;
1446             }
1447         }
1448         // IsTensorInfoSet will infer the dynamic output shape
1449         outputSlot.IsTensorInfoSet();
1450         // Once the shape is inferred we can validate it
1451         validateFunc(outputSlot.GetTensorInfo(), isSupported);
1452 
1453         if(!isSupported)
1454         {
1455             for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1456             {
1457                 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1458             }
1459             return false;
1460         }
1461     }
1462 
1463     const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1464 
1465     if (activationFunction != ActivationFn::kActivationNone)
1466     {
1467         const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1468         armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1469                                                                      &layer, data);
1470 
1471         if (!endLayer)
1472         {
1473             return Fail("%s: ProcessActivation failed", __func__);
1474         }
1475 
1476         armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1477         data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1478     }
1479     else
1480     {
1481         data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1482     }
1483 
1484     return true;
1485 }
1486 
1487 template<typename HalPolicy,
1488          typename HalOperation = typename HalPolicy::Operation,
1489          typename HalModel     = typename HalPolicy::Model>
OptionalDataLayout(const HalOperation & operation,uint32_t inputIndex,const HalModel & model,ConversionData & data)1490 armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1491                         uint32_t inputIndex,
1492                         const HalModel& model,
1493                         ConversionData& data)
1494 {
1495     using HalOperand = typename HalPolicy::Operand;
1496 
1497     const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1498     if (!operand)
1499     {
1500         return armnn::DataLayout::NHWC;
1501     }
1502 
1503     if (!IsBool(*operand))
1504     {
1505         return armnn::DataLayout::NHWC;
1506     }
1507 
1508     const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1509     if (!valueAddress)
1510     {
1511         return armnn::DataLayout::NHWC;
1512     }
1513 
1514     if (*(static_cast<const bool*>(valueAddress)))
1515     {
1516         return armnn::DataLayout::NCHW;
1517     }
1518     else
1519     {
1520         return armnn::DataLayout::NHWC;
1521     }
1522 }
1523 
1524 template<typename HalPolicy,
1525          typename HalOperation = typename HalPolicy::Operation,
1526          typename HalModel     = typename HalPolicy::Model>
SetupAndTrackLayerOutputSlot(const HalOperation & operation,uint32_t outputIndex,armnn::IConnectableLayer & layer,const HalModel & model,ConversionData & data,const armnn::TensorInfo * overrideOutputInfo=nullptr,const std::function<void (const armnn::TensorInfo &,bool &)> & validateFunc=nullptr,const ActivationFn & activationFunction=ActivationFn::kActivationNone)1527 bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1528                                   uint32_t outputIndex,
1529                                   armnn::IConnectableLayer& layer,
1530                                   const HalModel& model,
1531                                   ConversionData& data,
1532                                   const armnn::TensorInfo* overrideOutputInfo = nullptr,
1533                                   const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1534                                   const ActivationFn& activationFunction = ActivationFn::kActivationNone)
1535 {
1536     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1537                                                    outputIndex,
1538                                                    layer,
1539                                                    outputIndex,
1540                                                    model,
1541                                                    data,
1542                                                    overrideOutputInfo,
1543                                                    validateFunc,
1544                                                    activationFunction);
1545 }
1546 
1547 template<typename HalPolicy,
1548          typename HalOperation = typename HalPolicy::Operation,
1549          typename HalModel     = typename HalPolicy::Model>
ConvertToActivation(const HalOperation & operation,const char * operationName,const armnn::ActivationDescriptor & activationDesc,const HalModel & model,ConversionData & data)1550 bool ConvertToActivation(const HalOperation& operation,
1551                          const char* operationName,
1552                          const armnn::ActivationDescriptor& activationDesc,
1553                          const HalModel& model,
1554                          ConversionData& data)
1555 {
1556     using HalOperand = typename HalPolicy::Operand;
1557 
1558     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1559     if (!input.IsValid())
1560     {
1561         return Fail("%s: Input 0 is invalid", operationName);
1562     }
1563 
1564     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1565     if (!outputOperand)
1566     {
1567         return false;
1568     }
1569 
1570     const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1571 
1572     bool isSupported = false;
1573 
1574     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1575     {
1576         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1577                                    IsActivationSupported,
1578                                    data.m_Backends,
1579                                    isSupported,
1580                                    input.GetTensorInfo(),
1581                                    outInfo,
1582                                    activationDesc);
1583     };
1584 
1585     if(IsDynamicTensor(outInfo))
1586     {
1587         isSupported = AreDynamicTensorsSupported();
1588     }
1589     else
1590     {
1591         validateFunc(outInfo, isSupported);
1592     }
1593 
1594     if (!isSupported)
1595     {
1596         return false;
1597     }
1598 
1599     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1600     ARMNN_ASSERT(layer != nullptr);
1601     input.Connect(layer->GetInputSlot(0));
1602 
1603     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1604 }
1605 
1606 template<typename HalPolicy,
1607     typename HalOperation = typename HalPolicy::Operation,
1608     typename HalModel     = typename HalPolicy::Model>
ConvertReLu(const HalOperation & operation,const HalModel & model,ConversionData & data)1609 bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1610 {
1611     armnn::ActivationDescriptor desc;
1612     desc.m_Function = armnn::ActivationFunction::ReLu;
1613 
1614     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1615 }
1616 
1617 template<typename HalPolicy,
1618     typename HalOperation = typename HalPolicy::Operation,
1619     typename HalModel     = typename HalPolicy::Model>
ConvertReLu1(const HalOperation & operation,const HalModel & model,ConversionData & data)1620 bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1621 {
1622     armnn::ActivationDescriptor desc;
1623     desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1624     desc.m_A        = 1.0f;
1625     desc.m_B        = -1.0f;
1626 
1627     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1628 }
1629 
1630 template<typename HalPolicy,
1631     typename HalOperation = typename HalPolicy::Operation,
1632     typename HalModel     = typename HalPolicy::Model>
ConvertReLu6(const HalOperation & operation,const HalModel & model,ConversionData & data)1633 bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1634 {
1635     armnn::ActivationDescriptor desc;
1636     desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1637     desc.m_A        = 6.0f;
1638 
1639     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1640 }
1641 
1642 template<typename HalPolicy,
1643     typename HalOperation = typename HalPolicy::Operation,
1644     typename HalModel     = typename HalPolicy::Model>
ConvertTanH(const HalOperation & operation,const HalModel & model,ConversionData & data)1645 bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1646 {
1647     armnn::ActivationDescriptor desc;
1648     desc.m_Function = armnn::ActivationFunction::TanH;
1649     desc.m_A = 1.0f; // android nn does not support tanH parameters
1650     desc.m_B = 1.0f; // set to 1.0f for unity scaling
1651 
1652     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1653 }
1654 
1655 template<typename HalPolicy,
1656          typename HalOperation   = typename HalPolicy::Operation,
1657          typename HalModel       = typename HalPolicy::Model>
ConvertPaddings(const HalOperation & operation,const HalModel & model,ConversionData & data,unsigned int rank,armnn::PadDescriptor & padDescriptor)1658 bool ConvertPaddings(const HalOperation& operation,
1659                      const HalModel& model,
1660                      ConversionData& data,
1661                      unsigned int rank,
1662                      armnn::PadDescriptor& padDescriptor)
1663 {
1664     using HalOperand = typename HalPolicy::Operand;
1665 
1666     const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1667     if (!paddingsOperand)
1668     {
1669         return Fail("%s: Could not read paddings operand", __func__);
1670     }
1671 
1672     armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1673     if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1674     {
1675         return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]",  __func__, rank);
1676     }
1677 
1678     std::vector<int32_t> paddings;
1679     if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1680     {
1681         return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1682     }
1683 
1684     // add padding for each dimension of input tensor.
1685     for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1686     {
1687         int paddingBeforeInput = paddings[i];
1688         int paddingAfterInput  = paddings[i + 1];
1689 
1690         if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1691         {
1692             return Fail("%s: Operation has invalid paddings operand, invalid padding values.",  __func__);
1693         }
1694 
1695         padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1696     }
1697 
1698     return true;
1699 }
1700 
1701 template<typename HalPolicy,
1702          typename HalOperation   = typename HalPolicy::Operation,
1703          typename HalModel       = typename HalPolicy::Model>
ConvertPooling2d(const HalOperation & operation,const char * operationName,armnn::PoolingAlgorithm poolType,const HalModel & model,ConversionData & data)1704 bool ConvertPooling2d(const HalOperation& operation,
1705                       const char* operationName,
1706                       armnn::PoolingAlgorithm poolType,
1707                       const HalModel& model,
1708                       ConversionData& data)
1709 {
1710     using HalOperand     = typename HalPolicy::Operand;
1711     using HalOperandType = typename HalPolicy::OperandType;
1712 
1713     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1714     if (!input.IsValid())
1715     {
1716         return Fail("%s: Operation Could not read input 0", operationName);
1717     }
1718 
1719     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1720     if (!output)
1721     {
1722         return Fail("%s: Could not read output 0", __func__);
1723     }
1724 
1725     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
1726     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1727 
1728     armnn::Pooling2dDescriptor desc;
1729     desc.m_PoolType = poolType;
1730     desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
1731     desc.m_DataLayout = armnn::DataLayout::NHWC;
1732 
1733     ActivationFn activation;
1734 
1735     auto inputSize = operation.inputs.size();
1736 
1737     if (inputSize >= 10)
1738     {
1739         // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1740         if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1741             !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1742             !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1743             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1744             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1745             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1746             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1747             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1748             !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1749         {
1750             return Fail("%s: Operation has invalid inputs", operationName);
1751         }
1752 
1753         if (Is12OrLaterOperand(*output))
1754         {
1755             desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1756         }
1757     }
1758     else
1759     {
1760         // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1761         android::nn::PaddingScheme scheme;
1762         if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1763             !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1764             !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1765             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1766             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1767             !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
1768         {
1769             return Fail("%s: Operation has invalid inputs", operationName);
1770         }
1771 
1772         if (Is12OrLaterOperand(*output))
1773         {
1774             desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
1775         }
1776 
1777         const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1778         const unsigned int inputWidth  = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1779         const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1780 
1781         CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1782         CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1783     }
1784 
1785     bool isSupported = false;
1786 
1787     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1788     {
1789         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1790                                    IsPooling2dSupported,
1791                                    data.m_Backends,
1792                                    isSupported,
1793                                    inputInfo,
1794                                    outputInfo,
1795                                    desc);
1796 
1797     };
1798 
1799     if(IsDynamicTensor(outputInfo))
1800     {
1801         isSupported = AreDynamicTensorsSupported();
1802     }
1803     else
1804     {
1805         validateFunc(outputInfo, isSupported);
1806     }
1807 
1808     if (!isSupported)
1809     {
1810         return false;
1811     }
1812 
1813     armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1814     if (!pooling2dLayer)
1815     {
1816         return Fail("%s: AddPooling2dLayer failed", __func__);
1817     }
1818 
1819     input.Connect(pooling2dLayer->GetInputSlot(0));
1820 
1821     if (!isSupported)
1822     {
1823         return false;
1824     }
1825 
1826     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1827                                                    data, nullptr, validateFunc, activation);
1828 }
1829 
1830 template<typename HalPolicy,
1831          typename HalOperation = typename HalPolicy::Operation,
1832          typename HalModel     = typename HalPolicy::Model>
ConvertAdd(const HalOperation & operation,const HalModel & model,ConversionData & data)1833 bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
1834 {
1835     using HalOperand = typename HalPolicy::Operand;
1836 
1837     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1838     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1839 
1840     if (!input0.IsValid() || !input1.IsValid())
1841     {
1842         return Fail("%s: Operation has invalid inputs", __func__);
1843     }
1844 
1845     // The FuseActivation parameter is always the input index 2
1846     // and it should be optional
1847     ActivationFn activationFunction;
1848     if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1849     {
1850         return Fail("%s: Operation has invalid inputs", __func__);
1851     }
1852 
1853     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1854     if (!outputOperand)
1855     {
1856         return false;
1857     }
1858 
1859     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1860     const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1861 
1862     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1863 
1864     bool isSupported = false;
1865     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1866     {
1867         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1868                                    IsAdditionSupported,
1869                                    data.m_Backends,
1870                                    isSupported,
1871                                    inputInfo0,
1872                                    inputInfo1,
1873                                    outputInfo);
1874     };
1875 
1876     if(!IsDynamicTensor(outputInfo))
1877     {
1878         validateFunc(outputInfo, isSupported);
1879     }
1880     else
1881     {
1882         isSupported = AreDynamicTensorsSupported();
1883     }
1884 
1885     if (!isSupported)
1886     {
1887         return false;
1888     }
1889 
1890     armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1891 
1892     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1893     if (!isReshapeSupported)
1894     {
1895         return false;
1896     }
1897 
1898     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1899                                                    data, nullptr, validateFunc, activationFunction);
1900 
1901 }
1902 
1903 template<typename HalPolicy,
1904          typename HalOperation = typename HalPolicy::Operation,
1905          typename HalModel     = typename HalPolicy::Model>
ConvertArgMinMax(const HalOperation & operation,const HalModel & model,ConversionData & data,armnn::ArgMinMaxFunction argMinMaxFunction)1906 bool ConvertArgMinMax(const HalOperation& operation,
1907                       const HalModel& model,
1908                       ConversionData& data,
1909                       armnn::ArgMinMaxFunction argMinMaxFunction)
1910 {
1911     ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1912 
1913     using HalOperand     = typename HalPolicy::Operand;
1914     using HalOperandType = typename HalPolicy::OperandType;
1915 
1916     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1917 
1918     if (!input0.IsValid())
1919     {
1920         return Fail("%s: Operation has invalid inputs", __func__);
1921     }
1922 
1923     int32_t axis;
1924     if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1925     {
1926         return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1927     }
1928 
1929     const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1930     int rank = static_cast<int>(inputInfo.GetNumDimensions());
1931 
1932     if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1933     {
1934         // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1935         // E.g. Rank 4 tensor can have axis in range [-4, 3)
1936         // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1937         return Fail("%s: Axis must be in range [-n, n)", __func__);
1938     }
1939 
1940     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1941     if (!output)
1942     {
1943         return Fail("%s: Could not read output 0", __func__);
1944     }
1945 
1946     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1947 
1948     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1949 
1950     armnn::ArgMinMaxDescriptor descriptor;
1951     descriptor.m_Function = argMinMaxFunction;
1952     descriptor.m_Axis     = axis;
1953 
1954     bool isSupported = false;
1955 
1956     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1957     {
1958         FORWARD_LAYER_SUPPORT_FUNC(__func__,
1959                                    IsArgMinMaxSupported,
1960                                    data.m_Backends,
1961                                    isSupported,
1962                                    inputInfo0,
1963                                    outputInfo,
1964                                    descriptor);
1965     };
1966 
1967     if(IsDynamicTensor(outputInfo))
1968     {
1969         isSupported = AreDynamicTensorsSupported();
1970     }
1971     else
1972     {
1973         validateFunc(outputInfo, isSupported);
1974     }
1975 
1976     if (!isSupported)
1977     {
1978         return false;
1979     }
1980 
1981     armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1982     assert(layer != nullptr);
1983 
1984     input0.Connect(layer->GetInputSlot(0));
1985 
1986     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
1987 }
1988 
1989 template<typename HalPolicy,
1990          typename HalOperation = typename HalPolicy::Operation,
1991          typename HalModel     = typename HalPolicy::Model>
ConvertConcatenation(const HalOperation & operation,const HalModel & model,ConversionData & data)1992 bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
1993 {
1994     using HalOperand = typename HalPolicy::Operand;
1995     using HalOperandType = typename HalPolicy::OperandType;
1996 
1997     // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1998     if (operation.inputs.size() <= 1)
1999     {
2000         return Fail("%s: Operation has insufficient arguments", __func__);
2001     }
2002 
2003     // Get inputs and outputs
2004     const std::size_t numInputTensors = operation.inputs.size() - 1;
2005 
2006     int32_t concatDim;
2007     if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2008     {
2009         return Fail("%s: Operation has invalid inputs", __func__);
2010     }
2011 
2012     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2013     if (!outputOperand)
2014     {
2015         return Fail("%s: Operation has no outputs", __func__);
2016     }
2017 
2018     armnn::TensorInfo  outputInfo      = GetTensorInfoForOperand(*outputOperand);
2019     armnn::TensorShape outputShape     = outputInfo.GetShape();
2020     const bool         isDynamicTensor = IsDynamicTensor(outputInfo);
2021     //
2022     // handle negative concat dims along the lines of tensorflow as described here:
2023     //    https://www.tensorflow.org/api_docs/python/tf/concat
2024     // "negative axis refers to axis + rank(values)-th dimension"
2025     //
2026     if (concatDim < 0)
2027     {
2028         concatDim += outputShape.GetNumDimensions();
2029     }
2030 
2031     if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2032     {
2033         return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2034     }
2035 
2036     std::vector<LayerInputHandle>   inputHandles;
2037     std::vector<armnn::TensorShape> inputShapes;
2038 
2039     inputHandles.reserve(numInputTensors);
2040     inputShapes.reserve(numInputTensors);
2041 
2042     bool          inputsHaveBeenReshaped = false;
2043     unsigned int  tensorDimensionsAdded  = 0;
2044     for (uint32_t i = 0; i < numInputTensors; ++i)
2045     {
2046         const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2047         if (!operand)
2048         {
2049             return Fail("%s: Operation has invalid inputs", __func__);
2050         }
2051 
2052         LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2053         if (!operandInputHandle.IsValid())
2054         {
2055             return Fail("%s: Operation has invalid inputs", __func__);
2056         }
2057 
2058         armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
2059         if (operandShape.GetNumDimensions() == 0)
2060         {
2061             return Fail("%s: Operands with rank 0 are not supported", __func__);
2062         }
2063 
2064         if (RequiresReshape(operandShape))
2065         {
2066             inputsHaveBeenReshaped = true;
2067 
2068             armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2069 
2070             // Expand the tensor to three dimensions
2071             if (operandShape.GetNumDimensions() == 2)
2072             {
2073                 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2074                 tensorDimensionsAdded = 1;
2075             }
2076             else
2077             {
2078                 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2079                 tensorDimensionsAdded = 2;
2080             }
2081 
2082             armnn::ReshapeDescriptor reshapeDescriptor;
2083             reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2084 
2085             bool isSupported = false;
2086             FORWARD_LAYER_SUPPORT_FUNC(__func__,
2087                                        IsReshapeSupported,
2088                                        data.m_Backends,
2089                                        isSupported,
2090                                        operandInputHandle.GetTensorInfo(),
2091                                        reshapeInfo,
2092                                        reshapeDescriptor);
2093 
2094             if (!isSupported)
2095             {
2096                 return false;
2097             }
2098             armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
2099 
2100             // Point to the reshape operation rather then the input operation
2101             operandShape       = reshapeInfo.GetShape();
2102             operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2103         }
2104 
2105         inputShapes.emplace_back(operandShape);
2106         inputHandles.emplace_back(operandInputHandle);
2107 
2108         if (!inputHandles.back().IsValid())
2109         {
2110             return Fail("%s: Operation has invalid inputs", __func__);
2111         }
2112     }
2113 
2114     ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
2115 
2116     if (inputsHaveBeenReshaped)
2117     {
2118         // Adjust the concatenation dimension by the amount of dimensions added (if any)
2119         concatDim += tensorDimensionsAdded;
2120 
2121         // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2122         if (tensorDimensionsAdded == 1)
2123         {
2124             if (IsDynamicTensor(outputInfo))
2125             {
2126                 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2127             }
2128             else
2129             {
2130                 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2131             }
2132         }
2133         else if (tensorDimensionsAdded == 2)
2134         {
2135             if (IsDynamicTensor(outputInfo))
2136             {
2137                 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2138             }
2139             else
2140             {
2141                 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2142             }
2143         }
2144     }
2145 
2146     // Check if permutations is required and get the pair of permutations required for the concatenation.
2147     // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2148     std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2149         std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2150     bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2151                                                          concatDim,
2152                                                          permutationPair);
2153 
2154     // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2155     if (!isDynamicTensor)
2156     {
2157         if (needPermute)
2158         {
2159             outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2160         }
2161 
2162         outputInfo.SetShape(outputShape);
2163     }
2164     // this is no-op for identity swizzles, otherwise it replaces both
2165     // the handles and shapes with the swizzled layer output handles and shapes
2166     if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
2167     {
2168         return false;
2169     }
2170 
2171     // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2172     armnn::OriginsDescriptor concatDescriptor;
2173 
2174     try
2175     {
2176         // The concat descriptor is always created across the only supported concat dimension
2177         // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2178         concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2179                                                                    inputShapes.end(),
2180                                                                    concatDim);
2181     } catch (std::exception& error)
2182     {
2183         return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2184     }
2185 
2186     // Validate the output shape is correct given the input shapes based on the
2187     // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2188     if (!isDynamicTensor)
2189     {
2190         if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2191         {
2192             return Fail("%s: Error validating the output shape for concat", __func__);
2193         }
2194     }
2195 
2196     std::vector<const armnn::TensorInfo*> inputTensorInfos;
2197     std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2198                    [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2199 
2200     bool isSupported  = false;
2201     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2202         FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2203                                    outputInfo, concatDescriptor);
2204     };
2205 
2206     if (!isDynamicTensor)
2207     {
2208         validateFunc(outputInfo, isSupported);
2209     }
2210     else
2211     {
2212         isSupported = AreDynamicTensorsSupported();
2213     }
2214 
2215     if (!isSupported)
2216     {
2217         return false;
2218     }
2219 
2220     armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2221     assert(layer != nullptr);
2222     layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2223     // Connect inputs to the layer
2224     const int numInputSlots = layer->GetNumInputSlots();
2225     assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2226     for (int i = 0; i < numInputSlots; ++i)
2227     {
2228         // connect the input directly to the merge (concat) layer
2229         inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2230     }
2231 
2232     // Transpose the output shape
2233     auto transposeOutputShape = [&](){
2234         armnn::TransposeDescriptor transposeDesc;
2235         transposeDesc.m_DimMappings = permutationPair.second;
2236         armnn::TensorInfo inputTransposeInfo  = layer->GetOutputSlot(0).GetTensorInfo();
2237         armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2238                                                                                  permutationPair.second);
2239         isSupported = false;
2240         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2241                                    IsTransposeSupported,
2242                                    data.m_Backends,
2243                                    isSupported,
2244                                    inputTransposeInfo,
2245                                    outputTransposeInfo,
2246                                    transposeDesc);
2247         if (!isSupported)
2248         {
2249             return false;
2250         }
2251         // Add permutation layer and connect the output to it, the permutation becomes the output layer
2252         armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
2253                                                                      permutationPair.second);
2254         layer = &deswizzleLayer;
2255 
2256         return true;
2257     };
2258 
2259     if (needPermute && !isDynamicTensor)
2260     {
2261         transposeOutputShape();
2262     }
2263 
2264     if (inputsHaveBeenReshaped)
2265     {
2266         if (isDynamicTensor)
2267         {
2268             // Infer the output shapes of concat if outputs are type 1 dynamic
2269             ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
2270             if (!ValidateConcatOutputShape(inputShapes,
2271                                            layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2272                                            concatDim))
2273             {
2274                 return Fail("%s: Error validating the output shape for concat", __func__);
2275             }
2276             transposeOutputShape();
2277         }
2278 
2279         armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2280         // Undo the reshape knowing the amount of dimensions added
2281         if (tensorDimensionsAdded == 1)
2282         {
2283             afterConcatInfo.SetShape(
2284                 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
2285         }
2286         else if (tensorDimensionsAdded == 2)
2287         {
2288             afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
2289         }
2290 
2291         armnn::ReshapeDescriptor reshapeDescriptor;
2292         reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2293         armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2294 
2295         isSupported = false;
2296         auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2297             FORWARD_LAYER_SUPPORT_FUNC(__func__,
2298                                        IsReshapeSupported,
2299                                        data.m_Backends,
2300                                        isSupported,
2301                                        concatInfo,
2302                                        afterConcatInfo,
2303                                        reshapeDescriptor);
2304         };
2305 
2306         if (!IsDynamicTensor(afterConcatInfo))
2307         {
2308             validateReshapeFunc(afterConcatInfo, isSupported);
2309         }
2310         else
2311         {
2312             isSupported = AreDynamicTensorsSupported();
2313         }
2314 
2315         if (!isSupported)
2316         {
2317             return false;
2318         }
2319         layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2320         return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2321                                                        0,
2322                                                        *layer,
2323                                                        model,
2324                                                        data,
2325                                                        nullptr,
2326                                                        validateReshapeFunc);
2327     }
2328 
2329     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2330 }
2331 
2332 template<typename HalPolicy,
2333          typename HalOperation   = typename HalPolicy::Operation,
2334          typename HalModel       = typename HalPolicy::Model>
ConvertConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2335 bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2336 {
2337     using HalOperand     = typename HalPolicy::Operand;
2338     using HalOperandType = typename HalPolicy::OperandType;
2339 
2340     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2341     if (!input.IsValid())
2342     {
2343         return Fail("%s: Operation has invalid inputs", __func__);
2344     }
2345 
2346     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2347     if (!output)
2348     {
2349         return Fail("%s: Could not read output 0", __func__);
2350     }
2351 
2352     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2353     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2354 
2355     // ArmNN does not currently support non-fixed weights or bias
2356     const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2357     const ConstTensorPin biasPin    = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
2358 
2359     if (!weightsPin.IsValid() || !biasPin.IsValid())
2360     {
2361         return Fail("%s: Operation has invalid inputs", __func__);
2362     }
2363 
2364     armnn::ConstTensor weights = weightsPin.GetConstTensor();
2365     armnn::ConstTensor bias    = biasPin.GetConstTensor();
2366     SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2367 
2368     armnn::Convolution2dDescriptor desc;
2369     desc.m_DataLayout = armnn::DataLayout::NHWC;
2370     ActivationFn activation;
2371 
2372     if (operation.inputs.size() == 10)
2373     {
2374         if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2375             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2376             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2377             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2378             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2379             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2380             !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
2381         {
2382             return Fail("%s: Operation has invalid inputs", __func__);
2383         }
2384     }
2385     else if (operation.inputs.size() == 7)
2386     {
2387         android::nn::PaddingScheme paddingScheme;
2388         if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2389             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2390             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2391             !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
2392         {
2393             return Fail("%s: Operation has invalid inputs", __func__);
2394         }
2395 
2396         const uint32_t kernelX = weights.GetShape()[2];
2397         const uint32_t kernelY = weights.GetShape()[1];
2398         const uint32_t inputX  = inputInfo.GetShape()[2];
2399         const uint32_t inputY  = inputInfo.GetShape()[1];
2400 
2401         CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2402         CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2403     }
2404     else
2405     {
2406         return Fail("%s: Unsupported number of operation inputs", __func__);
2407     }
2408 
2409     desc.m_BiasEnabled = true;
2410     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2411 
2412     bool isSupported = false;
2413     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2414     {
2415         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2416                                    IsConvolution2dSupported,
2417                                    data.m_Backends,
2418                                    isSupported,
2419                                    inputInfo,
2420                                    outputInfo,
2421                                    desc,
2422                                    weights.GetInfo(),
2423                                    biases);
2424     };
2425 
2426     if(!IsDynamicTensor(outputInfo))
2427     {
2428         validateFunc(outputInfo, isSupported);
2429     }
2430     else
2431     {
2432         isSupported = AreDynamicTensorsSupported();
2433     }
2434 
2435     if (!isSupported)
2436     {
2437         return false;
2438     }
2439 
2440     armnn::IConnectableLayer* startLayer =
2441             data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2442 
2443     if (!startLayer)
2444     {
2445         return Fail("%s: AddConvolution2dLayer failed", __func__);
2446     }
2447 
2448     input.Connect(startLayer->GetInputSlot(0));
2449 
2450     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2451                                                    data, nullptr, validateFunc, activation);
2452 }
2453 
2454 template<typename HalPolicy,
2455          typename HalOperation   = typename HalPolicy::Operation,
2456          typename HalModel       = typename HalPolicy::Model>
ConvertDepthToSpace(const HalOperation & operation,const HalModel & model,ConversionData & data)2457 bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2458 {
2459     using HalOperand     = typename HalPolicy::Operand;
2460     using HalOperandType = typename HalPolicy::OperandType;
2461 
2462     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2463     if (!input.IsValid() )
2464     {
2465         return Fail("%s: Operation has invalid inputs", __func__);
2466     }
2467 
2468     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2469     unsigned int rank = inputInfo.GetNumDimensions();
2470     if (rank != 4)
2471     {
2472         return Fail("%s: Only inputs with rank 4 are supported", __func__);
2473     }
2474 
2475     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2476     if (!output)
2477     {
2478         return Fail("%s: Could not read output 0", __func__);
2479     }
2480 
2481     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2482 
2483     armnn::DepthToSpaceDescriptor descriptor;
2484 
2485     GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2486     if (descriptor.m_BlockSize <= 1)
2487     {
2488         return Fail("%s: Block size must be at least 1 in all dimensions");
2489     }
2490 
2491     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2492     if (Is12OrLaterOperand(*output))
2493     {
2494         descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2495     }
2496 
2497     bool isSupported = false;
2498     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2499     {
2500         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2501                                    IsDepthToSpaceSupported,
2502                                    data.m_Backends,
2503                                    isSupported,
2504                                    inputInfo,
2505                                    outputInfo,
2506                                    descriptor);
2507     };
2508 
2509     if(!IsDynamicTensor(outputInfo))
2510     {
2511         validateFunc(outputInfo, isSupported);
2512     }
2513     else
2514     {
2515         isSupported = AreDynamicTensorsSupported();
2516     }
2517 
2518     if (!isSupported)
2519     {
2520         return false;
2521     }
2522 
2523     armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2524     assert(layer != nullptr);
2525     input.Connect(layer->GetInputSlot(0));
2526 
2527     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2528 }
2529 
2530 template<typename HalPolicy,
2531          typename HalOperation   = typename HalPolicy::Operation,
2532          typename HalModel       = typename HalPolicy::Model>
ConvertDepthwiseConv2d(const HalOperation & operation,const HalModel & model,ConversionData & data)2533 bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2534 {
2535     using HalOperand     = typename HalPolicy::Operand;
2536     using HalOperandType = typename HalPolicy::OperandType;
2537 
2538     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2539 
2540     if (!input.IsValid())
2541     {
2542         return Fail("%s: Operation has invalid inputs", __func__);
2543     }
2544 
2545     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2546 
2547     if (!output)
2548     {
2549         return Fail("%s: Could not read output 0", __func__);
2550     }
2551 
2552     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2553     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2554 
2555     // ArmNN does not currently support non-fixed weights or bias
2556     // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
2557     const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2558 
2559     if (weightsOperand == nullptr)
2560     {
2561         return Fail("%s: Operand is invalid", __func__);
2562     }
2563     armnn::DepthwiseConvolution2dDescriptor desc;
2564     desc.m_DataLayout = armnn::DataLayout::NHWC;
2565 
2566     // Reinterpret weight data as [ H, W, I, M ]
2567     armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2568                                       weightsOperand->dimensions[2],
2569                                       inputInfo.GetShape()[3],
2570                                       weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
2571 
2572     // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2573     const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2574 
2575     const ConstTensorPin weightsPin =
2576         ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2577                                                          1,
2578                                                          model,
2579                                                          data,
2580                                                          HWIMToMIHW,
2581                                                          &weightsShape);
2582 
2583     // Bias is a 1D tensor
2584     const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
2585 
2586     if (!weightsPin.IsValid() || !biasPin.IsValid())
2587     {
2588         return Fail("%s: Operation has invalid inputs", __func__);
2589     }
2590 
2591     armnn::ConstTensor weights = weightsPin.GetConstTensor();
2592     armnn::ConstTensor bias = biasPin.GetConstTensor();
2593     SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2594 
2595     ActivationFn activation;
2596 
2597     if (operation.inputs.size() == 11)
2598     {
2599         if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2600             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2601             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2602             !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2603             !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2604             !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2605             !GetInputActivationFunction<HalPolicy>(operation,  10, activation, model, data))
2606         {
2607             return Fail("%s: Operation has invalid inputs", __func__);
2608         }
2609     }
2610     else if (operation.inputs.size() == 8)
2611     {
2612         android::nn::PaddingScheme paddingScheme;
2613         if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2614             !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2615             !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
2616             !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
2617         {
2618             return Fail("%s: Operation has invalid inputs", __func__);
2619         }
2620 
2621         const uint32_t kernelX = weights.GetShape()[3];
2622         const uint32_t kernelY = weights.GetShape()[2];
2623         const uint32_t inputX  = inputInfo.GetShape()[2];
2624         const uint32_t inputY  = inputInfo.GetShape()[1];
2625 
2626         CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2627         CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2628     }
2629     else
2630     {
2631         return Fail("%s: Unsupported number of operation inputs", __func__);
2632     }
2633 
2634     desc.m_BiasEnabled = true;
2635     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2636 
2637     bool isSupported = false;
2638     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2639     {
2640         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2641                                    IsDepthwiseConvolutionSupported,
2642                                    data.m_Backends,
2643                                    isSupported,
2644                                    inputInfo,
2645                                    outputInfo,
2646                                    desc,
2647                                    weights.GetInfo(),
2648                                    biases);
2649     };
2650 
2651     if(!IsDynamicTensor(outputInfo))
2652     {
2653         validateFunc(outputInfo, isSupported);
2654     }
2655     else
2656     {
2657         isSupported = AreDynamicTensorsSupported();
2658     }
2659 
2660 
2661     if (!isSupported)
2662     {
2663         return false;
2664     }
2665 
2666     armnn::IConnectableLayer* startLayer =
2667             data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2668     if (!startLayer)
2669     {
2670         return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2671     }
2672 
2673     input.Connect(startLayer->GetInputSlot(0));
2674 
2675     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2676                                                    data, nullptr, validateFunc, activation);
2677 }
2678 
2679 template<typename HalPolicy,
2680          typename HalOperation = typename HalPolicy::Operation,
2681          typename HalModel     = typename HalPolicy::Model>
ConvertDequantize(const HalOperation & operation,const HalModel & model,ConversionData & data)2682 bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
2683 {
2684     using HalOperand = typename HalPolicy::Operand;
2685 
2686     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2687     if (!input.IsValid())
2688     {
2689         return Fail("%s: Operation has invalid input", __func__);
2690     }
2691 
2692     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
2693     const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2694     if (quantizationDim.has_value() && quantizationDim.value() != 0)
2695     {
2696         return Fail("%s: Operation has quantization dimension different than 0", __func__);
2697     }
2698 
2699     const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2700     if (!outputOperand)
2701     {
2702         return Fail("%s: Operation has invalid outputs", __func__);
2703     }
2704 
2705     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2706 
2707     bool isSupported = false;
2708     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2709     {
2710         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2711                                    IsDequantizeSupported,
2712                                    data.m_Backends,
2713                                    isSupported,
2714                                    inputInfo,
2715                                    outputInfo);
2716     };
2717 
2718     if(IsDynamicTensor(outputInfo))
2719     {
2720         isSupported = AreDynamicTensorsSupported();
2721     }
2722     else
2723     {
2724         validateFunc(outputInfo, isSupported);
2725     }
2726 
2727     if (!isSupported)
2728     {
2729         return false;
2730     }
2731 
2732     armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2733     assert(layer != nullptr);
2734     input.Connect(layer->GetInputSlot(0));
2735 
2736     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2737 }
2738 
2739 template<typename HalPolicy,
2740          typename HalOperation = typename HalPolicy::Operation,
2741          typename HalModel     = typename HalPolicy::Model>
ConvertDiv(const HalOperation & operation,const HalModel & model,ConversionData & data)2742 bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
2743 {
2744     using HalOperand = typename HalPolicy::Operand;
2745 
2746     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2747     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2748 
2749     if (!input0.IsValid() || !input1.IsValid())
2750     {
2751         return Fail("%s: Operation has invalid inputs", __func__);
2752     }
2753 
2754     // The FuseActivation parameter is always the input index 2
2755     // and it should be optional
2756     ActivationFn activationFunction;
2757     if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2758     {
2759         return Fail("%s: Operation has invalid inputs", __func__);
2760     }
2761 
2762     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2763     if (!output)
2764     {
2765         return Fail("%s: Could not read output 0", __func__);
2766     }
2767 
2768     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2769 
2770     bool isSupported = false;
2771     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2772     {
2773         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2774                                    IsDivisionSupported,
2775                                    data.m_Backends,
2776                                    isSupported,
2777                                    input0.GetTensorInfo(),
2778                                    input1.GetTensorInfo(),
2779                                    outputInfo);
2780     };
2781 
2782     if(!IsDynamicTensor(outputInfo))
2783     {
2784         validateFunc(outputInfo, isSupported);
2785     }
2786     else
2787     {
2788         isSupported = AreDynamicTensorsSupported();
2789     }
2790 
2791     if (!isSupported)
2792     {
2793         return false;
2794     }
2795 
2796     armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2797 
2798     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2799     if (!isReshapeSupported)
2800     {
2801         return false;
2802     }
2803 
2804     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2805                                                    data, nullptr, validateFunc, activationFunction);
2806 
2807 }
2808 
2809 template<typename HalPolicy,
2810          typename HalOperation = typename HalPolicy::Operation,
2811          typename HalModel     = typename HalPolicy::Model>
ConvertFloor(const HalOperation & operation,const HalModel & model,ConversionData & data)2812 bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
2813 {
2814     using HalOperand = typename HalPolicy::Operand;
2815 
2816     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2817     if (!input.IsValid())
2818     {
2819         return Fail("%s: Operation has invalid inputs", __func__);
2820     }
2821 
2822     const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2823     if (!outputOperand)
2824     {
2825         return Fail("%s: Operation has invalid outputs", __func__);
2826     }
2827 
2828     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2829 
2830     bool isSupported = false;
2831     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2832     {
2833         FORWARD_LAYER_SUPPORT_FUNC(__func__,
2834                                    IsFloorSupported,
2835                                    data.m_Backends,
2836                                    isSupported,
2837                                    input.GetTensorInfo(),
2838                                    outputInfo);
2839     };
2840 
2841     if(!IsDynamicTensor(outputInfo))
2842     {
2843         validateFunc(outputInfo, isSupported);
2844     }
2845     else
2846     {
2847         isSupported = AreDynamicTensorsSupported();
2848     }
2849 
2850     if (!isSupported)
2851     {
2852         return false;
2853     }
2854 
2855     armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2856     assert(layer != nullptr);
2857     input.Connect(layer->GetInputSlot(0));
2858 
2859     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
2860 }
2861 
IsQSymm8(const V1_0::Operand &)2862 inline bool IsQSymm8(const V1_0::Operand&)
2863 {
2864     return false;
2865 }
2866 
2867 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
2868 
IsQSymm8(const V1_2::Operand & operand)2869 inline bool IsQSymm8(const V1_2::Operand& operand)
2870 {
2871     return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2872 }
2873 
2874 #endif
2875 
2876 #ifdef ARMNN_ANDROID_NN_V1_3
2877 
IsQSymm8(const V1_3::Operand & operand)2878 inline bool IsQSymm8(const V1_3::Operand& operand)
2879 {
2880     return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2881 }
2882 
2883 #endif
2884 
2885 enum class DequantizeStatus
2886 {
2887     SUCCESS,
2888     NOT_REQUIRED,
2889     INVALID_OPERAND
2890 };
2891 
2892 using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2893 
2894 template<typename HalPolicy,
2895          typename HalOperation = typename HalPolicy::Operation,
2896          typename HalModel     = typename HalPolicy::Model>
DequantizeIfRequired(size_t operand_index,const HalOperation & operation,const HalModel & model,const ConversionData & data)2897 DequantizeResult DequantizeIfRequired(size_t operand_index,
2898                                       const HalOperation& operation,
2899                                       const HalModel& model,
2900                                       const ConversionData& data)
2901 {
2902     using HalOperand = typename HalPolicy::Operand;
2903 
2904     const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
2905     if (!weightsOperand)
2906     {
2907         return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
2908     }
2909 
2910     if (IsOperandConstant<HalPolicy>(*weightsOperand))
2911     {
2912         // Weights are already constant
2913         return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
2914     }
2915 
2916     const size_t weightsInputIndex = operation.inputs[operand_index];
2917 
2918     // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2919     // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2920     for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
2921     {
2922         // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
2923         const auto& operationIt = getMainModel(model).operations[operationIdx];
2924         if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2925         {
2926             continue;
2927         }
2928 
2929         size_t outOpIndex = weightsInputIndex + 1;
2930         for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
2931         {
2932             outOpIndex = operationIt.outputs[i];
2933         }
2934 
2935         if (outOpIndex != weightsInputIndex)
2936         {
2937             continue;
2938         }
2939 
2940         const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
2941         ARMNN_ASSERT(operand);
2942 
2943         if (!IsQSymm8(*operand))
2944         {
2945             // Only supporting dequantize from QSYMM8 to FLOAT
2946             break;
2947         }
2948 
2949         // Allocate a new buffer for the dequantized data and manually dequantize
2950         const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2951         if (!startValue)
2952         {
2953             // Failed to get the operand address
2954             break;
2955         }
2956 
2957         const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2958         size_t dequantizedBufferLength = operand->location.length;
2959         const float quantizationScale  = operand->scale;
2960 
2961         auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2962         for (size_t i = 0; i < dequantizedBufferLength; ++i)
2963         {
2964             float* dstPtr = dequantizedBuffer.get();
2965             ARMNN_ASSERT(dstPtr);
2966             *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2967         }
2968 
2969         // Construct tensor info for dequantized ConstTensor
2970         armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2971                                      operand->dimensions.data(),
2972                                      armnn::DataType::Float32);
2973 
2974         return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2975                  std::move(tensorInfo),
2976                  DequantizeStatus::SUCCESS };
2977     }
2978 
2979     return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
2980 }
2981 
2982 template<typename HalPolicy,
2983          typename HalOperation = typename HalPolicy::Operation,
2984          typename HalModel     = typename HalPolicy::Model>
DequantizeAndMakeConstTensorPin(const HalOperation & operation,const HalModel & model,const ConversionData & data,size_t operandIndex,bool optional=false)2985 ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2986                                                const HalModel& model,
2987                                                const ConversionData& data,
2988                                                size_t operandIndex,
2989                                                bool optional = false)
2990 {
2991     DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2992 
2993     DequantizeStatus status = std::get<3>(dequantized);
2994     switch (status)
2995     {
2996         case DequantizeStatus::INVALID_OPERAND:
2997         {
2998             // return invalid const tensor pin
2999             return ConstTensorPin();
3000         }
3001         case DequantizeStatus::NOT_REQUIRED:
3002         {
3003             return ConvertOperationInputToConstTensorPin<HalPolicy>(
3004                 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3005         }
3006         case DequantizeStatus::SUCCESS:
3007         default:
3008         {
3009             return ConstTensorPin(
3010                 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3011         }
3012     }
3013 }
3014 
3015 
3016 template<typename HalPolicy,
3017          typename HalOperation = typename HalPolicy::Operation,
3018          typename HalModel     = typename HalPolicy::Model>
ConvertFullyConnected(const HalOperation & operation,const HalModel & model,ConversionData & data)3019 bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
3020 {
3021     using HalOperand = typename HalPolicy::Operand;
3022 
3023     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3024     if (!input.IsValid())
3025     {
3026         return Fail("%s: Operation has invalid inputs", __func__);
3027     }
3028 
3029     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3030     if (!output)
3031     {
3032         return Fail("%s: Could not read output 0", __func__);
3033     }
3034 
3035     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3036     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3037 
3038     ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3039     ConstTensorPin biasPin    = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
3040 
3041     if (!weightsPin.IsValid())
3042     {
3043         return Fail("%s: Operation has invalid weights", __func__);
3044     }
3045 
3046     if (!biasPin.IsValid())
3047     {
3048         return Fail("%s: Operation has invalid bias", __func__);
3049     }
3050 
3051     armnn::ConstTensor weights = weightsPin.GetConstTensor();
3052     armnn::ConstTensor bias    = biasPin.GetConstTensor();
3053     armnn::TensorInfo reshapedInfo = inputInfo;
3054 
3055     try
3056     {
3057         reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
3058     }
3059     catch (const std::exception& e)
3060     {
3061         return Fail("%s: %s", __func__, e.what());
3062     }
3063 
3064     // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3065     SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
3066 
3067     ActivationFn activationFunction;
3068     if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3069     {
3070         return Fail("%s: Operation has invalid inputs", __func__);
3071     }
3072 
3073     armnn::FullyConnectedDescriptor desc;
3074     desc.m_TransposeWeightMatrix = true;
3075     desc.m_BiasEnabled           = true;
3076 
3077     bool isSupported = false;
3078     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3079     {
3080         if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3081                                         weights.GetInfo().GetShape(),
3082                                         outputInfo.GetShape(),
3083                                         desc.m_TransposeWeightMatrix))
3084         {
3085             isSupported = false;
3086             Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3087             return;
3088         }
3089 
3090         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3091                                IsFullyConnectedSupported,
3092                                data.m_Backends,
3093                                isSupported,
3094                                reshapedInfo,
3095                                outputInfo,
3096                                weights.GetInfo(),
3097                                bias.GetInfo(),
3098                                desc);
3099     };
3100 
3101     if(!IsDynamicTensor(outputInfo))
3102     {
3103         validateFunc(outputInfo, isSupported);
3104     }
3105     else
3106     {
3107         isSupported = AreDynamicTensorsSupported();
3108     }
3109 
3110     if (!isSupported)
3111     {
3112         return false;
3113     }
3114 
3115     armnn::IConnectableLayer* startLayer =
3116             data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
3117 
3118     if (inputInfo.GetNumDimensions() > 2U)
3119     {
3120         armnn::ReshapeDescriptor reshapeDescriptor;
3121         reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3122 
3123         armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3124         assert(reshapeLayer != nullptr);
3125         input.Connect(reshapeLayer->GetInputSlot(0));
3126         reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3127         reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3128     }
3129     else
3130     {
3131         input.Connect(startLayer->GetInputSlot(0));
3132     }
3133 
3134     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3135                                                    data, nullptr, validateFunc, activationFunction);
3136 }
3137 
3138 template<typename HalPolicy,
3139          typename HalOperation = typename HalPolicy::Operation,
3140          typename HalModel     = typename HalPolicy::Model>
ConvertL2Normalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3141 bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
3142 {
3143     using HalOperand = typename HalPolicy::Operand;
3144 
3145     if (operation.inputs.size() != 1)
3146     {
3147         return Fail("%s: Optional inputs are not supported", __func__);
3148     }
3149 
3150     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3151     if (!input.IsValid())
3152     {
3153         return Fail("%s: Operation has invalid inputs", __func__);
3154     }
3155 
3156     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3157     if (!output)
3158     {
3159         return Fail("%s: Could not read output 0", __func__);
3160     }
3161 
3162     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3163     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3164 
3165     if (outputInfo.GetNumDimensions() != 4u)
3166     {
3167         return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3168     }
3169 
3170     armnn::L2NormalizationDescriptor desc;
3171     desc.m_DataLayout = armnn::DataLayout::NHWC;
3172 
3173     bool isSupported = false;
3174     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3175     {
3176         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3177                                    IsL2NormalizationSupported,
3178                                    data.m_Backends,
3179                                    isSupported,
3180                                    inputInfo,
3181                                    outputInfo,
3182                                    desc);
3183     };
3184 
3185     if(!IsDynamicTensor(outputInfo))
3186     {
3187         validateFunc(outputInfo, isSupported);
3188     }
3189     else
3190     {
3191         isSupported = AreDynamicTensorsSupported();
3192     }
3193 
3194     if (!isSupported)
3195     {
3196         return false;
3197     }
3198 
3199     armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3200     assert(layer != nullptr);
3201     input.Connect(layer->GetInputSlot(0));
3202 
3203     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3204 }
3205 
3206 template<typename HalPolicy,
3207          typename HalOperation = typename HalPolicy::Operation,
3208          typename HalModel     = typename HalPolicy::Model>
ConvertLocalResponseNormalization(const HalOperation & operation,const HalModel & model,ConversionData & data)3209 bool ConvertLocalResponseNormalization(const HalOperation& operation,
3210                                        const HalModel& model,
3211                                        ConversionData& data)
3212 {
3213     if (operation.inputs.size() != 5)
3214     {
3215         return Fail("%s: Optional inputs are not supported", __func__);
3216     }
3217 
3218     using HalOperand     = typename HalPolicy::Operand;
3219     using HalOperandType = typename HalPolicy::OperandType;
3220 
3221     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3222     if (!input.IsValid())
3223     {
3224         return Fail("%s: Operation has invalid inputs", __func__);
3225     }
3226 
3227     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3228     if (!output)
3229     {
3230         return Fail("%s: Could not read output 0", __func__);
3231     }
3232 
3233     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3234     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3235 
3236     if (outputInfo.GetNumDimensions() != 4u)
3237     {
3238         return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3239     }
3240 
3241     armnn::NormalizationDescriptor descriptor;
3242     descriptor.m_DataLayout      = armnn::DataLayout::NHWC;
3243     descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3244     descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3245 
3246     if (!input.IsValid() ||
3247         !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
3248         !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3249         !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3250         !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3251     {
3252         return Fail("%s: Operation has invalid inputs", __func__);
3253     }
3254 
3255     // ArmNN expects normSize to be the full size of the normalization
3256     // window rather than the radius as in AndroidNN.
3257     descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3258 
3259     bool isSupported = false;
3260     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3261     {
3262         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3263                                    IsNormalizationSupported,
3264                                    data.m_Backends,
3265                                    isSupported,
3266                                    inputInfo,
3267                                    outputInfo,
3268                                    descriptor);
3269     };
3270 
3271     if(!IsDynamicTensor(outputInfo))
3272     {
3273         validateFunc(outputInfo, isSupported);
3274     }
3275     else
3276     {
3277         isSupported = AreDynamicTensorsSupported();
3278     }
3279 
3280     if (!isSupported)
3281     {
3282         return false;
3283     }
3284 
3285 
3286     armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3287     assert(layer != nullptr);
3288     input.Connect(layer->GetInputSlot(0));
3289 
3290     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3291 }
3292 
3293 template<typename HalPolicy,
3294          typename HalOperation = typename HalPolicy::Operation,
3295          typename HalModel     = typename HalPolicy::Model>
ConvertLogistic(const HalOperation & operation,const HalModel & model,ConversionData & data)3296 bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
3297 {
3298     armnn::ActivationDescriptor desc;
3299     desc.m_Function = armnn::ActivationFunction::Sigmoid;
3300 
3301     return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3302 }
3303 
3304 template<typename HalPolicy,
3305          typename HalOperation = typename HalPolicy::Operation,
3306          typename HalModel     = typename HalPolicy::Model>
ConvertMean(const HalOperation & operation,const HalModel & model,ConversionData & data)3307 bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
3308 {
3309     using HalOperand = typename HalPolicy::Operand;
3310 
3311     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3312     if (!input.IsValid())
3313     {
3314         return Fail("%s: Operation has invalid inputs", __func__);
3315     }
3316 
3317     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3318     if (!output)
3319     {
3320         return Fail("%s: Could not read output 0", __func__);
3321     }
3322 
3323     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3324 
3325     const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3326     if (!axisOperand)
3327     {
3328         return Fail("%s: Could not read input 1", __func__);
3329     }
3330 
3331     std::vector<int32_t> axis;
3332     if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3333     {
3334         return Fail("%s: Input 1 has invalid values", __func__);
3335     }
3336 
3337     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3338 
3339     // Convert the axis to unsigned int and remove duplicates.
3340     unsigned int rank = inputInfo.GetNumDimensions();
3341     std::set<unsigned int> uniqueAxis;
3342     std::transform(axis.begin(), axis.end(),
3343                    std::inserter(uniqueAxis, uniqueAxis.begin()),
3344                    [rank](int i) -> unsigned int { return (i + rank) % rank; });
3345 
3346     // Get the "keep dims" flag.
3347     int32_t keepDims = 0;
3348     if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3349     {
3350         return Fail("%s: Could not read input 2", __func__);
3351     }
3352 
3353     armnn::MeanDescriptor descriptor;
3354     descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3355     descriptor.m_KeepDims = keepDims > 0;
3356 
3357     bool isSupported = false;
3358     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3359     {
3360         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3361                                    IsMeanSupported,
3362                                    data.m_Backends,
3363                                    isSupported,
3364                                    inputInfo,
3365                                    outputInfo,
3366                                    descriptor);
3367     };
3368 
3369     if(!IsDynamicTensor(outputInfo))
3370     {
3371         validateFunc(outputInfo, isSupported);
3372     }
3373     else
3374     {
3375         isSupported = AreDynamicTensorsSupported();
3376     }
3377 
3378     if (!isSupported)
3379     {
3380         return false;
3381     }
3382 
3383     armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3384     assert(layer != nullptr);
3385     input.Connect(layer->GetInputSlot(0));
3386 
3387     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3388 }
3389 
3390 template<typename HalPolicy,
3391          typename HalOperation = typename HalPolicy::Operation,
3392          typename HalModel     = typename HalPolicy::Model>
ConvertMul(const HalOperation & operation,const HalModel & model,ConversionData & data)3393 bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
3394 {
3395     using HalOperand = typename HalPolicy::Operand;
3396 
3397     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3398     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3399 
3400     if (!input0.IsValid() || !input1.IsValid())
3401     {
3402         return Fail("%s: Operation has invalid inputs", __func__);
3403     }
3404 
3405     // The FuseActivation parameter is always the input index 2
3406     // and it should be optional
3407     ActivationFn activationFunction;
3408     if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3409     {
3410         return Fail("%s: Operation has invalid inputs", __func__);
3411     }
3412 
3413     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
3414 
3415     if (outputOperand == nullptr)
3416     {
3417         return false;
3418     }
3419 
3420     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3421 
3422     bool isSupported = false;
3423     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3424     {
3425         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3426                                    IsMultiplicationSupported,
3427                                    data.m_Backends,
3428                                    isSupported,
3429                                    input0.GetTensorInfo(),
3430                                    input1.GetTensorInfo(),
3431                                    outputInfo);
3432     };
3433 
3434     if(!IsDynamicTensor(outputInfo))
3435     {
3436         validateFunc(outputInfo, isSupported);
3437     }
3438     else
3439     {
3440         isSupported = AreDynamicTensorsSupported();
3441     }
3442 
3443     if (!isSupported)
3444     {
3445         return false;
3446     }
3447 
3448     armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3449 
3450     const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3451     const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3452 
3453     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3454     if (!isReshapeSupported)
3455     {
3456         return false;
3457     }
3458 
3459     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3460                                                    data, nullptr, validateFunc, activationFunction);
3461 }
3462 
3463 template<typename HalPolicy,
3464          typename HalOperation = typename HalPolicy::Operation,
3465          typename HalModel     = typename HalPolicy::Model>
ConvertPad(HalOperation & operation,const HalModel & model,ConversionData & data)3466 bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
3467 {
3468     using HalOperand = typename HalPolicy::Operand;
3469 
3470     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3471     if (!input.IsValid())
3472     {
3473         return Fail("%s: Operation has invalid inputs", __func__);
3474     }
3475 
3476     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3477     unsigned int rank = inputInfo.GetNumDimensions();
3478 
3479     armnn::PadDescriptor descriptor;
3480     if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3481     {
3482         return Fail("%s: Could not convert paddings", __func__);
3483     }
3484 
3485     // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3486     // the scale and zeroPoint must be the same as input0
3487     // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3488     // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3489     // (QuantizationOffset - QuantizationOffset) * scale = 0.
3490     if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3491     {
3492         descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3493     }
3494 
3495     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3496     if (!output)
3497     {
3498         return Fail("%s: Could not read output", __func__);
3499     }
3500 
3501     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3502 
3503     bool isSupported = false;
3504     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3505     {
3506         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3507                                    IsPadSupported,
3508                                    data.m_Backends,
3509                                    isSupported,
3510                                    inputInfo,
3511                                    outputInfo,
3512                                    descriptor);
3513     };
3514 
3515     if(!IsDynamicTensor(outputInfo))
3516     {
3517         validateFunc(outputInfo, isSupported);
3518     }
3519     else
3520     {
3521         isSupported = AreDynamicTensorsSupported();
3522     }
3523 
3524     if (!isSupported)
3525     {
3526         return false;
3527     }
3528 
3529     armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3530     assert(layer != nullptr);
3531     input.Connect(layer->GetInputSlot(0));
3532 
3533     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3534 }
3535 
3536 template<typename HalPolicy,
3537          typename HalOperation = typename HalPolicy::Operation,
3538          typename HalModel     = typename HalPolicy::Model>
ConvertReshape(const HalOperation & operation,const HalModel & model,ConversionData & data)3539 bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
3540 {
3541     using HalOperand = typename HalPolicy::Operand;
3542 
3543     const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3544     const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3545     const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
3546 
3547     if (inputOperand == nullptr
3548         || requestedShapeOperand == nullptr
3549         || outputOperand == nullptr)
3550     {
3551         return Fail("%s: Operation has invalid inputs", __func__);
3552     }
3553 
3554     if (requestedShapeOperand->dimensions.size() != 1)
3555     {
3556         return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3557                     __func__, requestedShapeOperand->dimensions.size());
3558     }
3559 
3560     std::vector<int32_t> targetDimensions;
3561     if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3562     {
3563         return Fail("%s: Could not read values of input 1", __func__);
3564     }
3565 
3566     const Shape inputOperandShape = GetOperandShape(*inputOperand);
3567 
3568     Shape requestedShape;
3569     // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3570     // function that resolves these values into a fully specified tensor shape.
3571     if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3572     {
3573         return Fail("%s: Failed to resolve the requested shape", __func__);
3574     }
3575 
3576     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3577     if (!input.IsValid())
3578     {
3579         return Fail("%s: Could not read input 0", __func__);
3580     }
3581 
3582     armnn::ReshapeDescriptor reshapeDescriptor;
3583     reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3584                                                          requestedShape.dimensions.data());
3585 
3586     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3587 
3588     bool isSupported = false;
3589     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3590     {
3591         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3592                                    IsReshapeSupported,
3593                                    data.m_Backends,
3594                                    isSupported,
3595                                    input.GetTensorInfo(),
3596                                    outputInfo,
3597                                    reshapeDescriptor);
3598     };
3599 
3600     if(!IsDynamicTensor(outputInfo))
3601     {
3602         validateFunc(outputInfo, isSupported);
3603     }
3604     else
3605     {
3606         isSupported = AreDynamicTensorsSupported();
3607     }
3608 
3609     if (!isSupported)
3610     {
3611         return false;
3612     }
3613 
3614     armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3615     assert(layer != nullptr);
3616     input.Connect(layer->GetInputSlot(0));
3617 
3618     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3619 }
3620 
3621 template<typename HalPolicy,
3622          typename HalOperation = typename HalPolicy::Operation,
3623          typename HalModel     = typename HalPolicy::Model>
ConvertSub(const HalOperation & operation,const HalModel & model,ConversionData & data)3624 bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
3625 {
3626     using HalOperand = typename HalPolicy::Operand;
3627 
3628     LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3629     LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3630 
3631     if (!input0.IsValid() || !input1.IsValid())
3632     {
3633         return Fail("%s: Operation has invalid inputs", __func__);
3634     }
3635 
3636     // The FuseActivation parameter is always the input index 2
3637     // and it should be optional
3638     ActivationFn activationFunction;
3639     if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3640     {
3641         return Fail("%s: Operation has invalid inputs", __func__);
3642     }
3643 
3644     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3645     if (!output)
3646     {
3647         return Fail("%s: Could not read output 0", __func__);
3648     }
3649 
3650     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3651 
3652     bool isSupported = false;
3653     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3654     {
3655         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3656                                    IsSubtractionSupported,
3657                                    data.m_Backends,
3658                                    isSupported,
3659                                    input0.GetTensorInfo(),
3660                                    input1.GetTensorInfo(),
3661                                    outputInfo);
3662     };
3663 
3664     if(IsDynamicTensor(outputInfo))
3665     {
3666         isSupported = AreDynamicTensorsSupported();
3667     }
3668     else
3669     {
3670         validateFunc(outputInfo, isSupported);
3671     }
3672 
3673     if (!isSupported)
3674     {
3675         return false;
3676     }
3677 
3678     armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3679 
3680     const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3681     const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3682 
3683     bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3684     if (!isReshapeSupported)
3685     {
3686         return false;
3687     }
3688     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3689                                                    data, nullptr, validateFunc, activationFunction);
3690 }
3691 
3692 template<typename HalPolicy,
3693          typename HalOperation = typename HalPolicy::Operation,
3694          typename HalModel     = typename HalPolicy::Model>
ConvertSqueeze(const HalOperation & operation,const HalModel & model,ConversionData & data)3695 bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
3696 {
3697     using HalOperand = typename HalPolicy::Operand;
3698 
3699     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3700     if (!input.IsValid())
3701     {
3702         return Fail("%s: Operation has invalid inputs", __func__);
3703     }
3704 
3705     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
3706     unsigned int rank = inputInfo.GetNumDimensions();
3707     if (rank > 4)
3708     {
3709         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3710     }
3711 
3712     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3713     if (!output)
3714     {
3715         return Fail("%s: Could not read output 0", __func__);
3716     }
3717 
3718     if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
3719     {
3720         return Fail("%s: Dynamic output tensors are not supported", __func__);
3721     }
3722 
3723     // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3724     // if the operand index is out of bounds.
3725     const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3726 
3727     const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3728 
3729     std::vector<int32_t> axis;
3730     if (!axisOperand)
3731     {
3732         axis.assign(dimensionSequence,
3733                     dimensionSequence + rank);
3734     }
3735     else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3736     {
3737         return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
3738     }
3739 
3740     std::vector<uint32_t> outputDims;
3741     for (unsigned int i = 0; i < rank; i++)
3742     {
3743         bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3744         auto currentDimension = inputInfo.GetShape()[i];
3745         if (skipSqueeze || currentDimension != 1)
3746         {
3747             outputDims.push_back(currentDimension);
3748         }
3749     }
3750 
3751     armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3752 
3753     armnn::TensorInfo outputInfo = inputInfo;
3754     outputInfo.SetShape(outShape);
3755 
3756     armnn::ReshapeDescriptor reshapeDesc;
3757     reshapeDesc.m_TargetShape = outputInfo.GetShape();
3758 
3759     bool isSupported = false;
3760     FORWARD_LAYER_SUPPORT_FUNC(__func__,
3761                                IsReshapeSupported,
3762                                data.m_Backends,
3763                                isSupported,
3764                                inputInfo,
3765                                outputInfo,
3766                                reshapeDesc);
3767 
3768     if (!isSupported)
3769     {
3770         return false;
3771     }
3772 
3773     armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3774     assert(layer != nullptr);
3775     input.Connect(layer->GetInputSlot(0));
3776 
3777     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3778 }
3779 
3780 template<typename HalPolicy,
3781          typename HalOperation = typename HalPolicy::Operation,
3782          typename HalModel     = typename HalPolicy::Model>
ConvertStridedSlice(const HalOperation & operation,const HalModel & model,ConversionData & data)3783 bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
3784 {
3785     using HalOperand = typename HalPolicy::Operand;
3786 
3787     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3788     if (!input.IsValid())
3789     {
3790         return Fail("%s: Operation has invalid inputs", __func__);
3791     }
3792 
3793     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3794     unsigned int rank = inputInfo.GetNumDimensions();
3795     if (rank > 4)
3796     {
3797         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3798     }
3799 
3800     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3801     if (!output)
3802     {
3803         return Fail("%s: Could not read output 0", __func__);
3804     }
3805 
3806     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3807 
3808     const HalOperand* beginOperand   = GetInputOperand<HalPolicy>(operation, 1, model);
3809     const HalOperand* endOperand     = GetInputOperand<HalPolicy>(operation, 2, model);
3810     const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3811 
3812     std::vector<int32_t> beginValues;
3813     std::vector<int32_t> endValues;
3814     std::vector<int32_t> stridesValues;
3815 
3816     // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3817     auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
3818     {
3819         if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3820         {
3821             return false;
3822         }
3823 
3824         if (operandValues.size() != rank)
3825         {
3826             return false;
3827         }
3828 
3829         return true;
3830     };
3831 
3832     if (!ValidateInputOperands(*beginOperand, beginValues)
3833         || !ValidateInputOperands(*endOperand, endValues)
3834         || !ValidateInputOperands(*stridesOperand, stridesValues))
3835     {
3836         return Fail("%s: Operation has invalid input operand", __func__);
3837     }
3838 
3839     // Stride cannot have value '0'
3840     if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3841     {
3842         return Fail("%s: Stride must be non-zero value.", __func__);
3843     }
3844 
3845     armnn::StridedSliceDescriptor descriptor;
3846     descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3847     descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3848     descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3849     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3850 
3851     // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3852     if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3853         !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3854         !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3855     {
3856         return Fail("%s: Operation has invalid inputs", __func__);
3857     }
3858 
3859     bool isSupported = false;
3860     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3861     {
3862         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3863                                    IsStridedSliceSupported,
3864                                    data.m_Backends,
3865                                    isSupported,
3866                                    inputInfo,
3867                                    outputInfo,
3868                                    descriptor);
3869     };
3870 
3871     if(IsDynamicTensor(outputInfo))
3872     {
3873         isSupported = AreDynamicTensorsSupported();
3874     }
3875     else
3876     {
3877         validateFunc(outputInfo, isSupported);
3878     }
3879 
3880     if (!isSupported)
3881     {
3882         return false;
3883     }
3884 
3885     // Check if slice can fit in a inferred output
3886     armnn::TensorShape inputShape = inputInfo.GetShape();
3887     for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3888     {
3889         int stride = descriptor.m_Stride[i];
3890         int start  = descriptor.GetStartForAxis(inputShape, i);
3891         int stop   = descriptor.GetStopForAxis(inputShape, i, start);
3892 
3893         if (descriptor.m_ShrinkAxisMask & (1 << i))
3894         {
3895             // If the difference between the start point and the end point of the slice on an axis being shrunk
3896             // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3897             if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3898                                || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3899             {
3900                 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3901             }
3902 
3903             if(stride < 0)
3904             {
3905                 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3906             }
3907         }
3908     }
3909 
3910     armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3911     assert(layer != nullptr);
3912     input.Connect(layer->GetInputSlot(0));
3913 
3914     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3915 }
3916 
3917 template<typename HalPolicy,
3918          typename HalOperation = typename HalPolicy::Operation,
3919          typename HalModel     = typename HalPolicy::Model>
ConvertTranspose(const HalOperation & operation,const HalModel & model,ConversionData & data)3920 bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
3921 {
3922     using HalOperand = typename HalPolicy::Operand;
3923     using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
3924 
3925     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3926     if (!input.IsValid())
3927     {
3928         return Fail("%s: Operation has invalid inputs", __func__);
3929     }
3930 
3931     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3932     unsigned int rank = inputInfo.GetNumDimensions();
3933     if (rank > 4)
3934     {
3935         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3936     }
3937 
3938     // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3939     // if the operand index is out of bounds.
3940     const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3941 
3942     std::vector<int32_t> perm(rank);
3943     if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
3944     {
3945         for (unsigned int i = rank; i > 0; i--)
3946         {
3947             perm[rank - i] = armnn::numeric_cast<int> (i - 1);
3948         }
3949     }
3950     else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
3951     {
3952         return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
3953     }
3954 
3955     std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3956 
3957     armnn::TransposeDescriptor transposeDesc;
3958     transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
3959 
3960     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3961     if (!output)
3962     {
3963         return Fail("%s: Could not read output 0", __func__);
3964     }
3965 
3966     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3967 
3968     bool isSupported = false;
3969     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3970     {
3971         FORWARD_LAYER_SUPPORT_FUNC(__func__,
3972                                    IsTransposeSupported,
3973                                    data.m_Backends,
3974                                    isSupported,
3975                                    inputInfo,
3976                                    outputInfo,
3977                                    transposeDesc);
3978         };
3979 
3980     if(IsDynamicTensor(outputInfo))
3981     {
3982         isSupported = AreDynamicTensorsSupported();
3983     }
3984     else
3985     {
3986         validateFunc(outputInfo, isSupported);
3987     }
3988 
3989     if (!isSupported)
3990     {
3991         return false;
3992     }
3993 
3994     armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
3995     assert(layer != nullptr);
3996     input.Connect(layer->GetInputSlot(0));
3997 
3998     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
3999 }
4000 
4001 template<typename HalPolicy,
4002          typename HalOperation   = typename HalPolicy::Operation,
4003          typename HalOperand     = typename HalPolicy::Operand,
4004          typename HalModel       = typename HalPolicy::Model>
ConvertBatchToSpaceNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4005 bool ConvertBatchToSpaceNd(const HalOperation& operation,
4006                            const HalModel& model,
4007                            ConversionData& data)
4008 {
4009 
4010     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4011     if (!input.IsValid())
4012     {
4013         return Fail("%s: Operation has invalid inputs", __func__);
4014     }
4015 
4016     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4017     if (!output)
4018     {
4019         return Fail("%s: Could not read output 0", __func__);
4020     }
4021 
4022     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4023 
4024     const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4025     if (!blockOperand)
4026     {
4027         return Fail("%s: Could not read input 1", __func__);
4028     }
4029 
4030     // Convert the block operand to int32
4031     std::vector<int32_t> block;
4032     if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4033     {
4034         return Fail("%s: Input 1 has invalid values", __func__);
4035     }
4036 
4037     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4038 
4039     unsigned int rank = inputInfo.GetNumDimensions();
4040     if (rank != 4)
4041     {
4042         Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4043     }
4044 
4045     if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4046     {
4047         return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4048                     " greater than or equal to 1", __func__);
4049     }
4050 
4051     armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4052     batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4053     batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4054 
4055     if (Is12OrLaterOperand(*output))
4056     {
4057         batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
4058     }
4059     // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4060     batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4061 
4062     bool isSupported = false;
4063     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4064     {
4065         FORWARD_LAYER_SUPPORT_FUNC(__func__,
4066                                    IsBatchToSpaceNdSupported,
4067                                    data.m_Backends,
4068                                    isSupported,
4069                                    inputInfo,
4070                                    outputInfo,
4071                                    batchToSpaceNdDesc);
4072     };
4073 
4074     if(!IsDynamicTensor(outputInfo))
4075     {
4076         validateFunc(outputInfo, isSupported);
4077     }
4078     else
4079     {
4080         isSupported = AreDynamicTensorsSupported();
4081     }
4082 
4083 
4084     if (!isSupported)
4085     {
4086         return false;
4087     }
4088 
4089     armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4090     assert(layer != nullptr);
4091     input.Connect(layer->GetInputSlot(0));
4092 
4093     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4094 }
4095 
4096 template<typename HalPolicy,
4097          typename HalOperation = typename HalPolicy::Operation,
4098          typename HalOperand   = typename HalPolicy::Operand,
4099          typename HalModel     = typename HalPolicy::Model>
ConvertSpaceToBatchNd(const HalOperation & operation,const HalModel & model,ConversionData & data)4100 bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4101 {
4102     LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4103     if (!input.IsValid())
4104     {
4105         return Fail("%s: Operation has invalid inputs", __func__);
4106     }
4107 
4108     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4109     unsigned int rank = inputInfo.GetNumDimensions();
4110     unsigned int spatialDim = rank - 2;
4111 
4112     if (rank != 4)
4113     {
4114         Fail("%s: Only inputs with rank 4 are supported", __func__);
4115     }
4116 
4117     const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4118     if (!output)
4119     {
4120         return Fail("%s: Could not read output 0", __func__);
4121     }
4122 
4123     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4124 
4125     const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4126     const HalOperand* paddingsOperand   = GetInputOperand<HalPolicy>(operation, 2, model);
4127 
4128     armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4129     if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4130     {
4131         return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4132     }
4133 
4134     std::vector<int32_t> blockShape;
4135     if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4136     {
4137         return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4138     }
4139     if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4140     {
4141         return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4142     }
4143 
4144     armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4145     if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4146     {
4147         return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4148     }
4149 
4150     std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4151     std::vector<int32_t> paddings;
4152     if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4153     {
4154         return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4155     }
4156     for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4157     {
4158         int paddingBeforeInput = paddings[i];
4159         int paddingAfterInput = paddings[i + 1];
4160         if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4161         {
4162             return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4163         }
4164 
4165         paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4166     }
4167 
4168     armnn::SpaceToBatchNdDescriptor descriptor;
4169     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4170     descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4171     descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4172 
4173     if (Is12OrLaterOperand(*output))
4174     {
4175         descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4176     }
4177 
4178     bool isSupported = false;
4179     auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4180     {
4181         FORWARD_LAYER_SUPPORT_FUNC(__func__,
4182                                    IsSpaceToBatchNdSupported,
4183                                    data.m_Backends,
4184                                    isSupported,
4185                                    inputInfo,
4186                                    outputInfo,
4187                                    descriptor);
4188     };
4189 
4190     if(IsDynamicTensor(outputInfo))
4191     {
4192         isSupported = AreDynamicTensorsSupported();
4193     }
4194     else
4195     {
4196         validateFunc(outputInfo, isSupported);
4197     }
4198 
4199     if (!isSupported)
4200     {
4201         return false;
4202     }
4203 
4204     armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4205     assert(layer != nullptr);
4206     input.Connect(layer->GetInputSlot(0));
4207 
4208     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
4209 }
4210 
4211 } // namespace armnn_driver
4212 #ifdef __clang__
4213 #pragma clang diagnostic pop
4214 #endif
4215