• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Tensor.hpp>
9 #include <armnn/utility/Assert.hpp>
10 #include <backendsCommon/WorkloadData.hpp>
11 
12 #include <arm_compute/core/Types.h>
13 
14 namespace armnn
15 {
16 
17 inline arm_compute::NormalizationLayerInfo
CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo & tensorInfo,armnn::DataLayout dataLayout)18 CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
19                                                   armnn::DataLayout dataLayout)
20 {
21     unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
22     const unsigned int depth = tensorInfo.GetShape()[depthDimension];
23 
24     // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
25     // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
26     // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
27     // parameters.
28     //
29     // Please refer to both the reference implementation of the normalization layer and the implementation of
30     // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
31 
32     // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
33     // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
34     // ACL's normalization_layer_cross_map() CL function.
35     const uint32_t normSize = depth * 2u + 1u;
36 
37     // See ACL's NormalizationLayerInfo::scale_coeff() definition.
38     // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
39     const float alpha = 1.0f;
40 
41     // Don't offset the reduction.
42     const float kappa = 0.0f;
43 
44     // pow(reduction, -0.5) = 1 / sqrt(reduction)
45     const float beta = 0.5f;
46 
47     return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
48 }
49 
50 inline arm_compute::ActivationLayerInfo::ActivationFunction
ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)51 ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
52 {
53     using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
54 
55     switch (armnnFunction)
56     {
57         case ActivationFunction::Linear:        return AclActivationFunction::LINEAR;
58         // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
59         case ActivationFunction::Sigmoid:       return AclActivationFunction::LOGISTIC;
60         case ActivationFunction::ReLu:          return AclActivationFunction::RELU;
61         case ActivationFunction::BoundedReLu:   return AclActivationFunction::LU_BOUNDED_RELU;
62         case ActivationFunction::SoftReLu:      return AclActivationFunction::SOFT_RELU;
63         case ActivationFunction::LeakyReLu:     return AclActivationFunction::LEAKY_RELU;
64         case ActivationFunction::Abs:           return AclActivationFunction::ABS;
65         case ActivationFunction::Sqrt:          return AclActivationFunction::SQRT;
66         case ActivationFunction::Square:        return AclActivationFunction::SQUARE;
67         case ActivationFunction::TanH:          return AclActivationFunction::TANH;
68         case ActivationFunction::Elu:           return AclActivationFunction::ELU;
69         case ActivationFunction::HardSwish:     return AclActivationFunction::HARD_SWISH;
70         default:                                throw InvalidArgumentException("Unsupported activation function");
71     }
72 }
73 
74 inline arm_compute::ActivationLayerInfo
ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor & actDesc)75 ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
76 {
77     return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
78         actDesc.m_A, actDesc.m_B);
79 }
80 
81 inline arm_compute::ActivationLayerInfo
ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor * activationDescPtr)82 ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor* activationDescPtr)
83 {
84     if (activationDescPtr != nullptr)
85     {
86         return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
87                                                                            *activationDescPtr));
88     }
89     return arm_compute::ActivationLayerInfo();
90 }
91 
92 inline arm_compute::ActivationLayerInfo
ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor & queueDescriptor)93 ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor& queueDescriptor)
94 {
95     const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
96 
97     if (activationDescPtr != nullptr)
98     {
99         return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
100                 *activationDescPtr));
101     }
102     return arm_compute::ActivationLayerInfo();
103 }
104 
ConvertComparisonOperationToAcl(const ComparisonDescriptor & descriptor)105 inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
106 {
107     switch (descriptor.m_Operation)
108     {
109         case ComparisonOperation::Greater:         return arm_compute::ComparisonOperation::Greater;
110         case ComparisonOperation::GreaterOrEqual:  return arm_compute::ComparisonOperation::GreaterEqual;
111         case ComparisonOperation::Less:            return arm_compute::ComparisonOperation::Less;
112         case ComparisonOperation::LessOrEqual:     return arm_compute::ComparisonOperation::LessEqual;
113         case ComparisonOperation::Equal:           return arm_compute::ComparisonOperation::Equal;
114         case ComparisonOperation::NotEqual:        return arm_compute::ComparisonOperation::NotEqual;
115         default:                                   throw InvalidArgumentException("Unsupported comparison function");
116     }
117 }
118 
ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)119 inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
120 {
121     using arm_compute::PoolingType;
122 
123     switch (poolingAlgorithm)
124     {
125         case PoolingAlgorithm::Max:             return PoolingType::MAX;
126         case PoolingAlgorithm::Average:         return PoolingType::AVG;
127         case PoolingAlgorithm::L2:              return PoolingType::L2;
128         default:                                throw InvalidArgumentException("Unsupported pooling algorithm");
129     }
130 }
131 
ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)132 inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
133                                                                                                               rounding)
134 {
135     using arm_compute::DimensionRoundingType;
136 
137     switch (rounding)
138     {
139         case OutputShapeRounding::Ceiling:  return DimensionRoundingType::CEIL;
140         case OutputShapeRounding::Floor:    return DimensionRoundingType::FLOOR;
141         default:                            throw InvalidArgumentException("Unsupported Output Shape Rounding type");
142     }
143 }
144 
145 inline arm_compute::NormType
ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)146 ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
147 {
148     using arm_compute::NormType;
149     switch (channelType)
150     {
151         case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
152         case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
153         default:    throw InvalidArgumentException("Unsupported normalization algorithm channel type");
154     }
155 }
156 
157 inline arm_compute::FullyConnectedLayerInfo
ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor & fullyConnectedDesc,const ActivationDescriptor * activationDesc)158 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
159                                                             const ActivationDescriptor* activationDesc)
160 {
161     arm_compute::FullyConnectedLayerInfo fc_info;
162     fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
163     fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
164     return fc_info;
165 }
166 
167 inline arm_compute::FullyConnectedLayerInfo
ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor & fullyConnectedDesc,arm_compute::ActivationLayerInfo activationLayerInfo)168 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
169         arm_compute::ActivationLayerInfo activationLayerInfo)
170 {
171     arm_compute::FullyConnectedLayerInfo fc_info;
172     fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
173     fc_info.activation_info = activationLayerInfo;
174     return fc_info;
175 }
176 
ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)177 inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
178 {
179     switch (resizeMethod)
180     {
181         case ResizeMethod::Bilinear:
182             return arm_compute::InterpolationPolicy::BILINEAR;
183         case ResizeMethod::NearestNeighbor:
184             return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
185         default:
186             throw InvalidArgumentException("Unsupported resize method");
187     }
188 }
189 
190 template<typename T>
ComputeSoftmaxAclAxis(const SoftmaxDescriptor & softmaxDesc,const armnn::TensorInfo & tensor)191 inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
192 {
193     // Detect the Android default value of -1 and return the ACL default value of 0.
194     if (softmaxDesc.m_Axis == -1)
195     {
196         return 0;
197     }
198 
199     unsigned int dim = tensor.GetNumDimensions();
200 
201     ARMNN_ASSERT(dim != 0);
202 
203     // Currently ArmNN support axis 1.
204     auto aclAxis = (static_cast<T>(dim) - 1);
205     aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
206 
207     return aclAxis;
208 }
209 
ComputeSplitAxis(const armnn::SplitterDescriptor & desc,const TensorShape & input)210 inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
211 {
212     unsigned int numSplit = desc.GetNumViews();
213     unsigned int numDimensions = desc.GetNumDimensions();
214     std::set<unsigned int> splitAxis;
215 
216     for (unsigned int i = 0; i < numSplit; ++i)
217     {
218         for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
219         {
220             if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
221             {
222                 splitAxis.insert(dimIdx);
223             }
224         }
225     }
226     return splitAxis;
227 }
228 
229 /// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
ComputeAclAxis(const int & armnnAxis,const armnn::TensorInfo & tensor)230 inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
231 {
232     int rank = static_cast<int>(tensor.GetNumDimensions());
233 
234     ARMNN_ASSERT(rank != 0);
235     ARMNN_ASSERT((-1 * rank) <= armnnAxis);
236     ARMNN_ASSERT(armnnAxis < rank);
237 
238     int sign = (armnnAxis < 0) ? -1 : 1;
239     int aclAxis = sign * rank - 1  - armnnAxis;
240 
241     return aclAxis;
242 }
243 
244 /// Function to convert axis to its positive equivalent value.
245 /// [-rank, rank) --> [0, rank)
ComputePositiveAxis(const int & axis,const armnn::TensorInfo & tensor)246 inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor)
247 {
248     int rank = static_cast<int>(tensor.GetNumDimensions());
249 
250     ARMNN_ASSERT(rank != 0);
251     ARMNN_ASSERT((-1 * rank) <= axis);
252     ARMNN_ASSERT(axis < rank);
253 
254     int positiveAxis = (axis < 0) ? rank + axis : axis;
255     return static_cast<unsigned int>(positiveAxis);
256 }
257 
258 } // namespace armnn
259