• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
8 #include <armnn/DescriptorsFwd.hpp>
9 
10 #include <armnn/utility/NumericCast.hpp>
11 
12 #include <arm_compute/core/ITensor.h>
13 #include <arm_compute/core/TensorInfo.h>
14 #include <arm_compute/core/Types.h>
15 #include <arm_compute/core/Size2D.h>
16 
17 #include <Half.hpp>
18 
19 namespace armnn
20 {
21 class ITensorHandle;
22 
23 namespace armcomputetensorutils
24 {
25 
26 /// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
27 arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
28 
29 /// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
30 arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
31                                                              unsigned int originalInputRank,
32                                                              const std::vector<unsigned int>& armnnAxes);
33 
34 /// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape.
35 arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
36 
37 /// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
38 /// armnn::ITensorInfo.
39 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
40 
41 /// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
42 /// armnn::ITensorInfo.
43 /// armnn::DataLayout.
44 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
45                                                   armnn::DataLayout dataLayout);
46 
47 /// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
48 /// armnn::DataLayout.
49 arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
50 
51 /// Utility function used to setup an arm_compute::PoolingLayerInfo object from given
52 /// armnn::Pooling2dDescriptor
53 /// bool fpMixedPrecision
54 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
55                                                               bool fpMixedPrecision = false);
56 
57 /// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor.
58 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
59 
60 /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
61 arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
62 
63 /// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
64 arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector);
65 
66 /// Utility function used to setup an arm_compute::Size2D object from width and height values.
67 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
68 
69 /// Gets the appropriate PixelValue for the input DataType
70 arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue);
71 
72 /// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
73 template <typename Descriptor>
BuildArmComputePadStrideInfo(const Descriptor & descriptor)74 arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
75 {
76     return arm_compute::PadStrideInfo(descriptor.m_StrideX,
77                                       descriptor.m_StrideY,
78                                       descriptor.m_PadLeft,
79                                       descriptor.m_PadRight,
80                                       descriptor.m_PadTop,
81                                       descriptor.m_PadBottom,
82                                       arm_compute::DimensionRoundingType::FLOOR);
83 }
84 
85 /// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
86 template <typename Tensor>
BuildArmComputeTensor(Tensor & tensor,const armnn::TensorInfo & tensorInfo)87 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
88 {
89     tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
90 }
91 
92 /// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
93 template <typename Tensor>
BuildArmComputeTensor(Tensor & tensor,const armnn::TensorInfo & tensorInfo,DataLayout dataLayout)94 void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
95 {
96     tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
97 }
98 
99 template <typename Tensor>
InitialiseArmComputeTensorEmpty(Tensor & tensor)100 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
101 {
102     tensor.allocator()->allocate();
103 }
104 
105 /// Utility function to free unused tensors after a workload is configured and prepared
106 template <typename Tensor>
FreeTensorIfUnused(std::unique_ptr<Tensor> & tensor)107 void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
108 {
109     if (tensor && !tensor->is_used())
110     {
111         tensor.reset(nullptr);
112     }
113 }
114 
115 // Helper function to obtain byte offset into tensor data
GetTensorOffset(const arm_compute::ITensorInfo & info,uint32_t depthIndex,uint32_t batchIndex,uint32_t channelIndex,uint32_t y,uint32_t x)116 inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
117                               uint32_t depthIndex,
118                               uint32_t batchIndex,
119                               uint32_t channelIndex,
120                               uint32_t y,
121                               uint32_t x)
122 {
123     arm_compute::Coordinates coords;
124     coords.set(4, static_cast<int>(depthIndex));
125     coords.set(3, static_cast<int>(batchIndex));
126     coords.set(2, static_cast<int>(channelIndex));
127     coords.set(1, static_cast<int>(y));
128     coords.set(0, static_cast<int>(x));
129     return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
130 }
131 
132 // Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
GetLinearBufferOffset(const arm_compute::ITensorInfo & info,uint32_t depthIndex,uint32_t batchIndex,uint32_t channelIndex,uint32_t y,uint32_t x)133 inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
134                                     uint32_t depthIndex,
135                                     uint32_t batchIndex,
136                                     uint32_t channelIndex,
137                                     uint32_t y,
138                                     uint32_t x)
139 {
140     const arm_compute::TensorShape& shape = info.tensor_shape();
141     uint32_t width = static_cast<uint32_t>(shape[0]);
142     uint32_t height = static_cast<uint32_t>(shape[1]);
143     uint32_t numChannels = static_cast<uint32_t>(shape[2]);
144     uint32_t numBatches = static_cast<uint32_t>(shape[3]);
145     return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
146 }
147 
148 template <typename T>
CopyArmComputeITensorData(const arm_compute::ITensor & srcTensor,T * dstData)149 void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
150 {
151     // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
152     static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
153     {
154         const arm_compute::ITensorInfo& info = *srcTensor.info();
155         const arm_compute::TensorShape& shape = info.tensor_shape();
156         const uint8_t* const bufferPtr = srcTensor.buffer();
157         uint32_t width = static_cast<uint32_t>(shape[0]);
158         uint32_t height = static_cast<uint32_t>(shape[1]);
159         uint32_t numChannels = static_cast<uint32_t>(shape[2]);
160         uint32_t numBatches = static_cast<uint32_t>(shape[3]);
161         uint32_t depth = static_cast<uint32_t>(shape[4]);
162 
163         for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
164         {
165             for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
166             {
167                 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
168                 {
169                     for (unsigned int y = 0; y < height; ++y)
170                     {
171                         // Copies one row from arm_compute tensor buffer to linear memory buffer.
172                         // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
173                         memcpy(
174                          dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
175                          bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
176                          width * sizeof(T));
177                     }
178                 }
179             }
180         }
181     }
182 }
183 
184 template <typename T>
CopyArmComputeITensorData(const T * srcData,arm_compute::ITensor & dstTensor)185 void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
186 {
187     // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
188     static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
189     {
190         const arm_compute::ITensorInfo& info = *dstTensor.info();
191         const arm_compute::TensorShape& shape = info.tensor_shape();
192         uint8_t* const bufferPtr = dstTensor.buffer();
193         uint32_t width = static_cast<uint32_t>(shape[0]);
194         uint32_t height = static_cast<uint32_t>(shape[1]);
195         uint32_t numChannels = static_cast<uint32_t>(shape[2]);
196         uint32_t numBatches = static_cast<uint32_t>(shape[3]);
197         uint32_t depth = static_cast<uint32_t>(shape[4]);
198 
199         for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
200         {
201             for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
202             {
203                 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
204                 {
205                     for (unsigned int y = 0; y < height; ++y)
206                     {
207                         // Copies one row from linear memory buffer to arm_compute tensor buffer.
208                         // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
209                         memcpy(
210                          bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
211                          srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
212                          width * sizeof(T));
213                     }
214                 }
215             }
216         }
217     }
218 }
219 
220 /// Construct a TensorShape object from an ArmCompute object based on arm_compute::Dimensions.
221 /// \tparam ArmComputeType Any type that implements the Dimensions interface
222 /// \tparam T Shape value type
223 /// \param shapelike An ArmCompute object that implements the Dimensions interface
224 /// \param initial A default value to initialise the shape with
225 /// \return A TensorShape object filled from the Acl shapelike object.
226 template<typename ArmComputeType, typename T>
GetTensorShape(const ArmComputeType & shapelike,T initial)227 TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
228 {
229     std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
230     for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
231     {
232         s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
233     }
234     return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
235 };
236 
237 /// Get the strides from an ACL strides object
GetStrides(const arm_compute::Strides & strides)238 inline TensorShape GetStrides(const arm_compute::Strides& strides)
239 {
240     return GetTensorShape(strides, 0U);
241 }
242 
243 /// Get the shape from an ACL shape object
GetShape(const arm_compute::TensorShape & shape)244 inline TensorShape GetShape(const arm_compute::TensorShape& shape)
245 {
246     return GetTensorShape(shape, 1U);
247 }
248 
249 } // namespace armcomputetensorutils
250 } // namespace armnn
251