• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <memory>
10 #include <stdint.h>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace armnn
16 {
17 
18 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19 
20 /// The lowest performance data capture interval we support is 10 miliseconds.
21 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22 
23 /// @enum Status enumeration
24 /// @var Status::Successful
25 /// @var Status::Failure
26 enum class Status
27 {
28     Success = 0,
29     Failure = 1
30 };
31 
32 enum class DataType
33 {
34     Float16 = 0,
35     Float32 = 1,
36     QAsymmU8 = 2,
37     Signed32 = 3,
38     Boolean = 4,
39     QSymmS16 = 5,
40     QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
41     QSymmS8 = 7,
42     QAsymmS8 = 8,
43     BFloat16 = 9,
44     Signed64 = 10,
45 
46     QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
47     QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
48 };
49 
50 enum class DataLayout
51 {
52     NCHW = 1,
53     NHWC = 2
54 };
55 
56 enum class ActivationFunction
57 {
58     Sigmoid     = 0,
59     TanH        = 1,
60     Linear      = 2,
61     ReLu        = 3,
62     BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
63     SoftReLu    = 5,
64     LeakyReLu   = 6,
65     Abs         = 7,
66     Sqrt        = 8,
67     Square      = 9,
68     Elu         = 10,
69     HardSwish   = 11
70 };
71 
72 enum class ArgMinMaxFunction
73 {
74     Min = 0,
75     Max = 1
76 };
77 
78 enum class ComparisonOperation
79 {
80     Equal          = 0,
81     Greater        = 1,
82     GreaterOrEqual = 2,
83     Less           = 3,
84     LessOrEqual    = 4,
85     NotEqual       = 5
86 };
87 
88 enum class LogicalBinaryOperation
89 {
90     LogicalAnd = 0,
91     LogicalOr  = 1
92 };
93 
94 enum class UnaryOperation
95 {
96     Abs        = 0,
97     Exp        = 1,
98     Sqrt       = 2,
99     Rsqrt      = 3,
100     Neg        = 4,
101     LogicalNot = 5
102 };
103 
104 enum class PoolingAlgorithm
105 {
106     Max     = 0,
107     Average = 1,
108     L2      = 2
109 };
110 
111 enum class ResizeMethod
112 {
113     Bilinear        = 0,
114     NearestNeighbor = 1
115 };
116 
117 enum class Dimensionality
118 {
119     NotSpecified = 0,
120     Specified    = 1,
121     Scalar       = 2
122 };
123 
124 ///
125 /// The padding method modifies the output of pooling layers.
126 /// In both supported methods, the values are ignored (they are
127 /// not even zeroes, which would make a difference for max pooling
128 /// a tensor with negative values). The difference between
129 /// IgnoreValue and Exclude is that the former counts the padding
130 /// fields in the divisor of Average and L2 pooling, while
131 /// Exclude does not.
132 ///
133 enum class PaddingMethod
134 {
135     /// The padding fields count, but are ignored
136     IgnoreValue = 0,
137     /// The padding fields don't count and are ignored
138     Exclude     = 1
139 };
140 
141 enum class NormalizationAlgorithmChannel
142 {
143     Across = 0,
144     Within = 1
145 };
146 
147 enum class NormalizationAlgorithmMethod
148 {
149     /// Krichevsky 2012: Local Brightness Normalization
150     LocalBrightness = 0,
151     /// Jarret 2009: Local Contrast Normalization
152     LocalContrast = 1
153 };
154 
155 enum class OutputShapeRounding
156 {
157     Floor       = 0,
158     Ceiling     = 1
159 };
160 
161 ///
162 /// The ShapeInferenceMethod modify how the output shapes are treated.
163 /// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
164 /// and any mismatch is reported.
165 /// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the
166 /// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
167 /// specified explicitly, however this information can be calculated from the inputs.
168 ///
169 enum class ShapeInferenceMethod
170 {
171     /// Validate all output shapes
172     ValidateOnly     = 0,
173     /// Infer missing output shapes and validate all output shapes
174     InferAndValidate = 1
175 };
176 
177 /// Each backend should implement an IBackend.
178 class IBackend
179 {
180 protected:
IBackend()181     IBackend() {}
~IBackend()182     virtual ~IBackend() {}
183 
184 public:
185     virtual const BackendId& GetId() const = 0;
186 };
187 
188 using IBackendSharedPtr = std::shared_ptr<IBackend>;
189 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
190 
191 /// Device specific knowledge to be passed to the optimizer.
192 class IDeviceSpec
193 {
194 protected:
IDeviceSpec()195     IDeviceSpec() {}
~IDeviceSpec()196     virtual ~IDeviceSpec() {}
197 public:
198     virtual const BackendIdSet& GetSupportedBackends() const = 0;
199 };
200 
201 /// Type of identifiers for bindable layers (inputs, outputs).
202 using LayerBindingId = int;
203 
204 class PermutationVector
205 {
206 public:
207     using ValueType = unsigned int;
208     using SizeType = unsigned int;
209     using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
210     using ConstIterator = typename ArrayType::const_iterator;
211 
212     /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
213     /// when source and target potentially have different memory layouts.
214     ///
215     /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
216     /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
217     /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
218     /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
219     /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
220     /// [ 0, 2, 3, 1 ].
221     ///
222     /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
223     /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
224     /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
225     /// [ 0, 3, 1, 2 ].
226     ///
227     PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
228 
229     PermutationVector(std::initializer_list<ValueType> dimMappings);
230 
operator [](SizeType i) const231     ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
232 
GetSize() const233     SizeType GetSize() const { return m_NumDimMappings; }
234 
begin() const235     ConstIterator begin() const { return m_DimMappings.begin(); }
end() const236     ConstIterator end() const { return m_DimMappings.end(); }
237 
IsEqual(const PermutationVector & other) const238     bool IsEqual(const PermutationVector& other) const
239     {
240         if (m_NumDimMappings != other.m_NumDimMappings) return false;
241         for (unsigned int i = 0; i < m_NumDimMappings; ++i)
242         {
243             if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
244         }
245         return true;
246     }
247 
IsInverse(const PermutationVector & other) const248     bool IsInverse(const PermutationVector& other) const
249     {
250         bool isInverse = (GetSize() == other.GetSize());
251         for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
252         {
253             isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
254         }
255         return isInverse;
256     }
257 
258 private:
259     ArrayType m_DimMappings;
260     /// Number of valid entries in @ref m_DimMappings
261     SizeType m_NumDimMappings;
262 };
263 
264 namespace profiling { class ProfilingGuid; }
265 
266 /// Define LayerGuid type.
267 using LayerGuid = profiling::ProfilingGuid;
268 
269 class ITensorHandle;
270 
271 /// Define the type of callback for the Debug layer to call
272 /// @param guid - guid of layer connected to the input of the Debug layer
273 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
274 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
275 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
276 
277 
278 namespace profiling
279 {
280 
281 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
282 
283 class ProfilingGuid
284 {
285 public:
ProfilingGuid()286     ProfilingGuid() : m_Guid(0) {}
287 
ProfilingGuid(uint64_t guid)288     ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
289 
operator uint64_t() const290     operator uint64_t() const { return m_Guid; }
291 
operator ==(const ProfilingGuid & other) const292     bool operator==(const ProfilingGuid& other) const
293     {
294         return m_Guid == other.m_Guid;
295     }
296 
operator !=(const ProfilingGuid & other) const297     bool operator!=(const ProfilingGuid& other) const
298     {
299         return m_Guid != other.m_Guid;
300     }
301 
operator <(const ProfilingGuid & other) const302     bool operator<(const ProfilingGuid& other) const
303     {
304         return m_Guid < other.m_Guid;
305     }
306 
operator <=(const ProfilingGuid & other) const307     bool operator<=(const ProfilingGuid& other) const
308     {
309         return m_Guid <= other.m_Guid;
310     }
311 
operator >(const ProfilingGuid & other) const312     bool operator>(const ProfilingGuid& other) const
313     {
314         return m_Guid > other.m_Guid;
315     }
316 
operator >=(const ProfilingGuid & other) const317     bool operator>=(const ProfilingGuid& other) const
318     {
319         return m_Guid >= other.m_Guid;
320     }
321 
322 protected:
323     uint64_t m_Guid;
324 };
325 
326 /// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
327 struct ProfilingDynamicGuid : public ProfilingGuid
328 {
329     using ProfilingGuid::ProfilingGuid;
330 };
331 
332 struct ProfilingStaticGuid : public ProfilingGuid
333 {
334     using ProfilingGuid::ProfilingGuid;
335 };
336 
337 } // namespace profiling
338 
339 } // namespace armnn
340 
341 
342 namespace std
343 {
344 /// make ProfilingGuid hashable
345 template<>
346 struct hash<armnn::profiling::ProfilingGuid>
347 {
operator ()std::hash348     std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
349     {
350         return hash<uint64_t>()(uint64_t(guid));
351     }
352 };
353 
354 /// make ProfilingDynamicGuid hashable
355 template<>
356 struct hash<armnn::profiling::ProfilingDynamicGuid>
357 {
operator ()std::hash358     std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
359     {
360         return hash<uint64_t>()(uint64_t(guid));
361     }
362 };
363 
364 /// make ProfilingStaticGuid hashable
365 template<>
366 struct hash<armnn::profiling::ProfilingStaticGuid>
367 {
operator ()std::hash368     std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
369     {
370         return hash<uint64_t>()(uint64_t(guid));
371     }
372 };
373 } // namespace std
374