• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 
13 #include "Tensor.hpp"
14 #include "Types.hpp"
15 
16 namespace armnn
17 {
18 
19 /// An ActivationDescriptor for the ActivationLayer.
20 struct ActivationDescriptor
21 {
ActivationDescriptorarmnn::ActivationDescriptor22     ActivationDescriptor()
23         : m_Function(ActivationFunction::Sigmoid)
24         , m_A(0)
25         , m_B(0)
26     {}
27 
ActivationDescriptorarmnn::ActivationDescriptor28     ActivationDescriptor(armnn::ActivationFunction activation,
29                          float a = 0,
30                          float b = 0)
31             : m_Function(activation)
32             , m_A(a)
33             , m_B(b)
34     {}
35 
operator ==armnn::ActivationDescriptor36     bool operator ==(const ActivationDescriptor &rhs) const
37     {
38         return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
39     }
40 
41     /// @brief The activation function to use
42     /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
43     ActivationFunction m_Function;
44     /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
45     float              m_A;
46     /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
47     float              m_B;
48 };
49 
50 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
51 struct ArgMinMaxDescriptor
52 {
ArgMinMaxDescriptorarmnn::ArgMinMaxDescriptor53     ArgMinMaxDescriptor()
54         : m_Function(ArgMinMaxFunction::Min)
55         , m_Axis(-1)
56         , m_Output_Type(armnn::DataType::Signed32)
57     {}
58 
operator ==armnn::ArgMinMaxDescriptor59     bool operator ==(const ArgMinMaxDescriptor &rhs) const
60     {
61         return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
62     }
63 
64     /// Specify if the function is to find Min or Max.
65     ArgMinMaxFunction m_Function;
66     /// Axis to reduce across the input tensor.
67     int m_Axis;
68     // Tensor data type and this could be int32 or int64. Default type is int64.
69     armnn::DataType m_Output_Type;
70 };
71 
72 /// A ComparisonDescriptor for the ComparisonLayer
73 struct ComparisonDescriptor
74 {
ComparisonDescriptorarmnn::ComparisonDescriptor75     ComparisonDescriptor()
76         : ComparisonDescriptor(ComparisonOperation::Equal)
77     {}
78 
ComparisonDescriptorarmnn::ComparisonDescriptor79     ComparisonDescriptor(ComparisonOperation operation)
80         : m_Operation(operation)
81     {}
82 
operator ==armnn::ComparisonDescriptor83     bool operator ==(const ComparisonDescriptor &rhs) const
84     {
85         return m_Operation == rhs.m_Operation;
86     }
87 
88     /// Specifies the comparison operation to execute
89     ComparisonOperation m_Operation;
90 };
91 
92 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
93 struct ElementwiseUnaryDescriptor
94 {
ElementwiseUnaryDescriptorarmnn::ElementwiseUnaryDescriptor95     ElementwiseUnaryDescriptor()
96         : ElementwiseUnaryDescriptor(UnaryOperation::Abs)
97     {}
98 
ElementwiseUnaryDescriptorarmnn::ElementwiseUnaryDescriptor99     ElementwiseUnaryDescriptor(UnaryOperation operation)
100         : m_Operation(operation)
101     {}
102 
operator ==armnn::ElementwiseUnaryDescriptor103     bool operator ==(const ElementwiseUnaryDescriptor &rhs) const
104     {
105         return m_Operation == rhs.m_Operation;
106     }
107 
108     /// Specifies the elementwiseUnary operation to execute
109     UnaryOperation m_Operation;
110 };
111 
112 /// A PermuteDescriptor for the PermuteLayer.
113 struct PermuteDescriptor
114 {
PermuteDescriptorarmnn::PermuteDescriptor115     PermuteDescriptor()
116         : m_DimMappings{}
117     {}
118 
PermuteDescriptorarmnn::PermuteDescriptor119     PermuteDescriptor(const PermutationVector& dimMappings)
120         : m_DimMappings(dimMappings)
121     {}
122 
operator ==armnn::PermuteDescriptor123     bool operator ==(const PermuteDescriptor &rhs) const
124     {
125         return m_DimMappings.IsEqual(rhs.m_DimMappings);
126     }
127 
128     /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
129     /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
130     PermutationVector m_DimMappings;
131 };
132 
133 /// A SoftmaxDescriptor for the SoftmaxLayer.
134 struct SoftmaxDescriptor
135 {
SoftmaxDescriptorarmnn::SoftmaxDescriptor136     SoftmaxDescriptor()
137         : m_Beta(1.0f)
138         , m_Axis(-1)
139     {}
140 
operator ==armnn::SoftmaxDescriptor141     bool operator ==(const SoftmaxDescriptor& rhs) const
142     {
143         return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
144     }
145 
146     /// Exponentiation value.
147     float m_Beta;
148     /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
149     int m_Axis;
150 };
151 
152 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
153 using LogSoftmaxDescriptor = SoftmaxDescriptor;
154 
155 /// @brief An OriginsDescriptor for the ConcatLayer.
156 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
157 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
158 struct OriginsDescriptor
159 {
160     OriginsDescriptor();
161     OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
162     OriginsDescriptor(const OriginsDescriptor& other);
163     OriginsDescriptor(OriginsDescriptor&& other);
164 
165     ~OriginsDescriptor();
166 
167     OriginsDescriptor& operator=(OriginsDescriptor rhs);
168 
169     bool operator ==(const OriginsDescriptor& rhs) const;
170 
171     /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
172     /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
173     /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
174     Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
175     /// Get the number of views.
176     uint32_t GetNumViews() const;
177     /// Get the number of dimensions.
178     uint32_t GetNumDimensions() const;
179     /// Return the view origin at the int value idx.
180     const uint32_t* GetViewOrigin(uint32_t idx) const;
181     /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
182     /// The number of views must match number of elements in the new ordering array.
183     void ReorderOrigins(unsigned int*  newOrdering, unsigned int numNewOrdering);
184     /// Swap the ViewsDescriptor values first and second.
185     friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
186     /// Set the concatenation axis value.
187     void SetConcatAxis(unsigned int concatAxis);
188     /// Get the concatenation axis value.
189     unsigned int GetConcatAxis() const;
190 
191 private:
192     unsigned int m_ConcatAxis;
193     uint32_t     m_NumViews;
194     uint32_t     m_NumDimensions;
195     uint32_t**   m_ViewOrigins;
196 };
197 
198 /// @brief A ViewsDescriptor for the SplitterLayer.
199 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
200 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
201 struct ViewsDescriptor
202 {
203     ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
204     ViewsDescriptor(const ViewsDescriptor& other);
205     ViewsDescriptor();
206     ViewsDescriptor(ViewsDescriptor&& other);
207 
208     ~ViewsDescriptor();
209 
210     ViewsDescriptor& operator=(ViewsDescriptor rhs);
211 
212     bool operator ==(const ViewsDescriptor& rhs) const;
213 
214     /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
215     /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
216     /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
217     Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
218     /// @brief Set the size of the views. The arguments are: view, dimension, value.
219     /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
220     /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
221     Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
222 
223     /// Get the number of views.
224     uint32_t GetNumViews() const;
225     /// Get the number of dimensions.
226     uint32_t GetNumDimensions() const;
227     /// Get the view origin at the int value idx.
228     const uint32_t* GetViewOrigin(uint32_t idx) const;
229     /// Get the view sizes at the int value idx.
230     const uint32_t* GetViewSizes(uint32_t idx) const;
231     /// Get the View Origins
232     const OriginsDescriptor& GetOrigins() const;
233 
234     /// Swap the ViewsDescriptor value first and second.
235     friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
236 private:
237     OriginsDescriptor m_Origins;
238     uint32_t**        m_ViewSizes;
239 };
240 
241 template <typename TensorShapeIt>
242 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
CreateMergerDescriptorForConcatenation(TensorShapeIt first,TensorShapeIt last,unsigned int concatenationDimension)243 OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first,
244                                                          TensorShapeIt last,
245                                                          unsigned int concatenationDimension)
246 {
247     return CreateDescriptorForConcatenation(first, last, concatenationDimension);
248 }
249 
250 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
251 /// concatenation of a number of input tensors.
252 template <typename TensorShapeIt>
CreateDescriptorForConcatenation(TensorShapeIt first,TensorShapeIt last,unsigned int concatenationDimension)253 OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first,
254                                                    TensorShapeIt last,
255                                                    unsigned int concatenationDimension)
256 {
257     auto numInputs = std::distance(first, last);
258 
259     if (numInputs < 2)
260     {
261         throw InvalidArgumentException("Concatenation requires at least 2 inputs");
262     }
263 
264     const auto& firstInputShape = *first;
265 
266     const unsigned int numDimensions = firstInputShape.GetNumDimensions();
267     for (auto it = first + 1; it != last; ++it)
268     {
269         if (it->GetNumDimensions() != numDimensions)
270         {
271             throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
272         }
273     }
274 
275     if (concatenationDimension >= numDimensions)
276     {
277         throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
278     }
279 
280     for (auto it = first; it != last; ++it)
281     {
282         for (unsigned int d = 0; d < numDimensions; ++d)
283         {
284             const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
285             if (!dimSizeOk)
286             {
287                 throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
288                     " except the concatenation dimension");
289             }
290         }
291     }
292 
293     OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
294     viewsDescriptor.SetConcatAxis(concatenationDimension);
295 
296     uint32_t viewIndex = 0u;
297     uint32_t coordAlongConcatDim = 0u;
298     for (auto it = first; it != last; ++it)
299     {
300         const auto& inputShape = *it;
301 
302         for (unsigned int i = 0; i < concatenationDimension; ++i)
303         {
304             viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
305         }
306 
307         viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
308         unsigned int dimSize = inputShape[concatenationDimension];
309         coordAlongConcatDim += dimSize;
310 
311 
312         for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
313         {
314             viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
315         }
316 
317         ++viewIndex;
318     }
319 
320     return viewsDescriptor;
321 }
322 
323 /// A Pooling2dDescriptor for the Pooling2dLayer.
324 struct Pooling2dDescriptor
325 {
Pooling2dDescriptorarmnn::Pooling2dDescriptor326     Pooling2dDescriptor()
327         : m_PoolType(PoolingAlgorithm::Max)
328         , m_PadLeft(0)
329         , m_PadRight(0)
330         , m_PadTop(0)
331         , m_PadBottom(0)
332         , m_PoolWidth(0)
333         , m_PoolHeight(0)
334         , m_StrideX(0)
335         , m_StrideY(0)
336         , m_OutputShapeRounding(OutputShapeRounding::Floor)
337         , m_PaddingMethod(PaddingMethod::Exclude)
338         , m_DataLayout(DataLayout::NCHW)
339     {}
340 
operator ==armnn::Pooling2dDescriptor341     bool operator ==(const Pooling2dDescriptor& rhs) const
342     {
343         return m_PoolType            == rhs.m_PoolType &&
344                m_PadLeft             == rhs.m_PadLeft &&
345                m_PadRight            == rhs.m_PadRight &&
346                m_PadTop              == rhs.m_PadTop &&
347                m_PadBottom           == rhs.m_PadBottom &&
348                m_PoolWidth           == rhs.m_PoolWidth &&
349                m_PoolHeight          == rhs.m_PoolHeight &&
350                m_StrideX             == rhs.m_StrideX &&
351                m_StrideY             == rhs.m_StrideY &&
352                m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
353                m_PaddingMethod       == rhs.m_PaddingMethod &&
354                m_DataLayout          == rhs.m_DataLayout;
355     }
356 
357     /// The pooling algorithm to use (Max. Average, L2).
358     PoolingAlgorithm    m_PoolType;
359     /// Padding left value in the width dimension.
360     uint32_t            m_PadLeft;
361     /// Padding right value in the width dimension.
362     uint32_t            m_PadRight;
363     /// Padding top value in the height dimension.
364     uint32_t            m_PadTop;
365     /// Padding bottom value in the height dimension.
366     uint32_t            m_PadBottom;
367     /// Pooling width value.
368     uint32_t            m_PoolWidth;
369     /// Pooling height value.
370     uint32_t            m_PoolHeight;
371     /// Stride value when proceeding through input for the width dimension.
372     uint32_t            m_StrideX;
373     /// Stride value when proceeding through input for the height dimension.
374     uint32_t            m_StrideY;
375     /// The rounding method for the output shape. (Floor, Ceiling).
376     OutputShapeRounding m_OutputShapeRounding;
377     /// The padding method to be used. (Exclude, IgnoreValue).
378     PaddingMethod       m_PaddingMethod;
379     /// The data layout to be used (NCHW, NHWC).
380     DataLayout   m_DataLayout;
381 };
382 
383 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
384 struct FullyConnectedDescriptor
385 {
FullyConnectedDescriptorarmnn::FullyConnectedDescriptor386     FullyConnectedDescriptor()
387         : m_BiasEnabled(false)
388         , m_TransposeWeightMatrix(false)
389     {}
390 
operator ==armnn::FullyConnectedDescriptor391     bool operator ==(const FullyConnectedDescriptor& rhs) const
392     {
393         return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
394     }
395 
396     /// Enable/disable bias.
397     bool m_BiasEnabled;
398     /// Enable/disable transpose weight matrix.
399     bool m_TransposeWeightMatrix;
400 };
401 
402 /// A Convolution2dDescriptor for the Convolution2dLayer.
403 struct Convolution2dDescriptor
404 {
Convolution2dDescriptorarmnn::Convolution2dDescriptor405     Convolution2dDescriptor()
406         : m_PadLeft(0)
407         , m_PadRight(0)
408         , m_PadTop(0)
409         , m_PadBottom(0)
410         , m_StrideX(0)
411         , m_StrideY(0)
412         , m_DilationX(1)
413         , m_DilationY(1)
414         , m_BiasEnabled(false)
415         , m_DataLayout(DataLayout::NCHW)
416     {}
417 
operator ==armnn::Convolution2dDescriptor418     bool operator ==(const Convolution2dDescriptor& rhs) const
419     {
420         return m_PadLeft     == rhs.m_PadLeft &&
421                m_PadRight    == rhs.m_PadRight &&
422                m_PadTop      == rhs.m_PadTop &&
423                m_PadBottom   == rhs.m_PadBottom &&
424                m_StrideX     == rhs.m_StrideX &&
425                m_StrideY     == rhs.m_StrideY &&
426                m_DilationX   == rhs.m_DilationX &&
427                m_DilationY   == rhs.m_DilationY &&
428                m_BiasEnabled == rhs.m_BiasEnabled &&
429                m_DataLayout  == rhs.m_DataLayout;
430     }
431 
432     /// Padding left value in the width dimension.
433     uint32_t             m_PadLeft;
434     /// Padding right value in the width dimension.
435     uint32_t             m_PadRight;
436     /// Padding top value in the height dimension.
437     uint32_t             m_PadTop;
438     /// Padding bottom value in the height dimension.
439     uint32_t             m_PadBottom;
440     /// Stride value when proceeding through input for the width dimension.
441     uint32_t             m_StrideX;
442     /// Stride value when proceeding through input for the height dimension.
443     uint32_t             m_StrideY;
444     /// Dilation along x axis
445     uint32_t             m_DilationX;
446     /// Dilation along y axis
447     uint32_t             m_DilationY;
448     /// Enable/disable bias.
449     bool                 m_BiasEnabled;
450     /// The data layout to be used (NCHW, NHWC).
451     DataLayout           m_DataLayout;
452 };
453 
454 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
455 struct DepthwiseConvolution2dDescriptor
456 {
DepthwiseConvolution2dDescriptorarmnn::DepthwiseConvolution2dDescriptor457     DepthwiseConvolution2dDescriptor()
458         : m_PadLeft(0)
459         , m_PadRight(0)
460         , m_PadTop(0)
461         , m_PadBottom(0)
462         , m_StrideX(0)
463         , m_StrideY(0)
464         , m_DilationX(1)
465         , m_DilationY(1)
466         , m_BiasEnabled(false)
467         , m_DataLayout(DataLayout::NCHW)
468     {}
469 
operator ==armnn::DepthwiseConvolution2dDescriptor470     bool operator ==(const DepthwiseConvolution2dDescriptor& rhs) const
471     {
472         return m_PadLeft     == rhs.m_PadLeft &&
473                m_PadRight    == rhs.m_PadRight &&
474                m_PadTop      == rhs.m_PadTop &&
475                m_PadBottom   == rhs.m_PadBottom &&
476                m_StrideX     == rhs.m_StrideX &&
477                m_StrideY     == rhs.m_StrideY &&
478                m_DilationX   == rhs.m_DilationX &&
479                m_DilationY   == rhs.m_DilationY &&
480                m_BiasEnabled == rhs.m_BiasEnabled &&
481                m_DataLayout  == rhs.m_DataLayout;
482     }
483 
484     /// Padding left value in the width dimension.
485     uint32_t   m_PadLeft;
486     /// Padding right value in the width dimension.
487     uint32_t   m_PadRight;
488     /// Padding top value in the height dimension.
489     uint32_t   m_PadTop;
490     /// Padding bottom value in the height dimension.
491     uint32_t   m_PadBottom;
492     /// Stride value when proceeding through input for the width dimension.
493     uint32_t   m_StrideX;
494     /// Stride value when proceeding through input for the height dimension.
495     uint32_t   m_StrideY;
496     /// Dilation factor value for width dimension.
497     uint32_t   m_DilationX;
498     /// Dilation factor value for height dimension.
499     uint32_t   m_DilationY;
500     /// Enable/disable bias.
501     bool       m_BiasEnabled;
502     /// The data layout to be used (NCHW, NHWC).
503     DataLayout m_DataLayout;
504 };
505 
506 struct DetectionPostProcessDescriptor
507 {
DetectionPostProcessDescriptorarmnn::DetectionPostProcessDescriptor508     DetectionPostProcessDescriptor()
509         : m_MaxDetections(0)
510         , m_MaxClassesPerDetection(1)
511         , m_DetectionsPerClass(1)
512         , m_NmsScoreThreshold(0)
513         , m_NmsIouThreshold(0)
514         , m_NumClasses(0)
515         , m_UseRegularNms(false)
516         , m_ScaleX(0)
517         , m_ScaleY(0)
518         , m_ScaleW(0)
519         , m_ScaleH(0)
520     {}
521 
operator ==armnn::DetectionPostProcessDescriptor522     bool operator ==(const DetectionPostProcessDescriptor& rhs) const
523     {
524         return m_MaxDetections          == rhs.m_MaxDetections &&
525                m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
526                m_DetectionsPerClass     == rhs.m_DetectionsPerClass &&
527                m_NmsScoreThreshold      == rhs.m_NmsScoreThreshold &&
528                m_NmsIouThreshold        == rhs.m_NmsIouThreshold &&
529                m_NumClasses             == rhs.m_NumClasses &&
530                m_UseRegularNms          == rhs.m_UseRegularNms &&
531                m_ScaleX                 == rhs.m_ScaleX &&
532                m_ScaleY                 == rhs.m_ScaleY &&
533                m_ScaleW                 == rhs.m_ScaleW &&
534                m_ScaleH                 == rhs.m_ScaleH;
535     }
536 
537     /// Maximum numbers of detections.
538     uint32_t m_MaxDetections;
539     /// Maximum numbers of classes per detection, used in Fast NMS.
540     uint32_t m_MaxClassesPerDetection;
541     /// Detections per classes, used in Regular NMS.
542     uint32_t m_DetectionsPerClass;
543     /// NMS score threshold.
544     float m_NmsScoreThreshold;
545     /// Intersection over union threshold.
546     float m_NmsIouThreshold;
547     /// Number of classes.
548     uint32_t m_NumClasses;
549     /// Use Regular NMS.
550     bool m_UseRegularNms;
551     /// Center size encoding scale x.
552     float m_ScaleX;
553     /// Center size encoding scale y.
554     float m_ScaleY;
555     /// Center size encoding scale weight.
556     float m_ScaleW;
557     /// Center size encoding scale height.
558     float m_ScaleH;
559 };
560 
561 /// A NormalizationDescriptor for the NormalizationLayer.
562 struct NormalizationDescriptor
563 {
NormalizationDescriptorarmnn::NormalizationDescriptor564     NormalizationDescriptor()
565         : m_NormChannelType(NormalizationAlgorithmChannel::Across)
566         , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
567         , m_NormSize(0)
568         , m_Alpha(0.f)
569         , m_Beta(0.f)
570         , m_K(0.f)
571         , m_DataLayout(DataLayout::NCHW)
572     {}
573 
operator ==armnn::NormalizationDescriptor574     bool operator ==(const NormalizationDescriptor& rhs) const
575     {
576         return m_NormChannelType == rhs.m_NormChannelType &&
577                m_NormMethodType  == rhs.m_NormMethodType &&
578                m_NormSize        == rhs.m_NormSize &&
579                m_Alpha           == rhs.m_Alpha &&
580                m_Beta            == rhs.m_Beta &&
581                m_K               == rhs.m_K &&
582                m_DataLayout      == rhs.m_DataLayout;
583     }
584 
585     /// Normalization channel algorithm to use (Across, Within).
586     NormalizationAlgorithmChannel m_NormChannelType;
587     /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
588     NormalizationAlgorithmMethod  m_NormMethodType;
589     /// Depth radius value.
590     uint32_t                      m_NormSize;
591     /// Alpha value for the normalization equation.
592     float                         m_Alpha;
593     /// Beta value for the normalization equation.
594     float                         m_Beta;
595     /// Kappa value used for the across channel normalization equation.
596     float                         m_K;
597     /// The data layout to be used (NCHW, NHWC).
598     DataLayout                    m_DataLayout;
599 };
600 
601 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
602 struct L2NormalizationDescriptor
603 {
L2NormalizationDescriptorarmnn::L2NormalizationDescriptor604     L2NormalizationDescriptor()
605         : m_Eps(1e-12f)
606         , m_DataLayout(DataLayout::NCHW)
607     {}
608 
operator ==armnn::L2NormalizationDescriptor609     bool operator ==(const L2NormalizationDescriptor& rhs) const
610     {
611         return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
612     }
613 
614     /// Used to avoid dividing by zero.
615     float m_Eps;
616     /// The data layout to be used (NCHW, NHWC).
617     DataLayout m_DataLayout;
618 };
619 
620 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
621 struct BatchNormalizationDescriptor
622 {
BatchNormalizationDescriptorarmnn::BatchNormalizationDescriptor623     BatchNormalizationDescriptor()
624         : m_Eps(0.0001f)
625         , m_DataLayout(DataLayout::NCHW)
626     {}
627 
operator ==armnn::BatchNormalizationDescriptor628     bool operator ==(const BatchNormalizationDescriptor& rhs) const
629     {
630         return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
631     }
632 
633     /// Value to add to the variance. Used to avoid dividing by zero.
634     float m_Eps;
635     /// The data layout to be used (NCHW, NHWC).
636     DataLayout m_DataLayout;
637 };
638 
639 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
640 struct InstanceNormalizationDescriptor
641 {
InstanceNormalizationDescriptorarmnn::InstanceNormalizationDescriptor642     InstanceNormalizationDescriptor()
643         : m_Gamma(1.0f)
644         , m_Beta(0.0f)
645         , m_Eps(1e-12f)
646         , m_DataLayout(DataLayout::NCHW)
647     {}
648 
operator ==armnn::InstanceNormalizationDescriptor649     bool operator ==(const InstanceNormalizationDescriptor& rhs) const
650     {
651         return m_Gamma      == rhs.m_Gamma &&
652                m_Beta       == rhs.m_Beta &&
653                m_Eps        == rhs.m_Eps &&
654                m_DataLayout == rhs.m_DataLayout;
655     }
656 
657     /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
658     float m_Gamma;
659     /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
660     float m_Beta;
661     /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
662     float m_Eps;
663     /// The data layout to be used (NCHW, NHWC).
664     DataLayout m_DataLayout;
665 };
666 
667 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
668 struct BatchToSpaceNdDescriptor
669 {
BatchToSpaceNdDescriptorarmnn::BatchToSpaceNdDescriptor670     BatchToSpaceNdDescriptor()
671         : m_BlockShape({1, 1})
672         , m_Crops({{0, 0}, {0, 0}})
673         , m_DataLayout(DataLayout::NCHW)
674     {}
675 
BatchToSpaceNdDescriptorarmnn::BatchToSpaceNdDescriptor676     BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
677                              std::vector<std::pair<unsigned int, unsigned int>> crops)
678         : m_BlockShape(blockShape)
679         , m_Crops(crops)
680         , m_DataLayout(DataLayout::NCHW)
681     {}
682 
operator ==armnn::BatchToSpaceNdDescriptor683     bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
684     {
685         return m_BlockShape == rhs.m_BlockShape &&
686                m_Crops      == rhs.m_Crops &&
687                m_DataLayout == rhs.m_DataLayout;
688     }
689 
690     /// Block shape values.
691     std::vector<unsigned int> m_BlockShape;
692     /// The values to crop from the input dimension.
693     std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
694     /// The data layout to be used (NCHW, NHWC).
695     DataLayout m_DataLayout;
696 };
697 
698 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
699 struct FakeQuantizationDescriptor
700 {
FakeQuantizationDescriptorarmnn::FakeQuantizationDescriptor701         FakeQuantizationDescriptor()
702         : m_Min(-6.0f)
703         , m_Max(6.0f)
704     {}
705 
operator ==armnn::FakeQuantizationDescriptor706     bool operator ==(const FakeQuantizationDescriptor& rhs) const
707     {
708         return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
709     }
710 
711     /// Minimum value.
712     float m_Min;
713     /// Maximum value.
714     float m_Max;
715 };
716 
717 /// A FillDescriptor for the FillLayer
718 struct FillDescriptor
719 {
FillDescriptorarmnn::FillDescriptor720     FillDescriptor()
721     : m_Value(0)
722     {}
723 
FillDescriptorarmnn::FillDescriptor724     FillDescriptor(const float& value)
725     : m_Value(value)
726     {}
727 
operator ==armnn::FillDescriptor728     bool operator ==(const FillDescriptor& rhs) const
729     {
730         return m_Value == rhs.m_Value;
731     }
732 
733     float m_Value;
734 };
735 
736 /// A GatherDescriptor for the GatherLayer.
737 struct GatherDescriptor
738 {
GatherDescriptorarmnn::GatherDescriptor739     GatherDescriptor()
740         : m_Axis(0)
741     {}
742 
GatherDescriptorarmnn::GatherDescriptor743     GatherDescriptor(int32_t axis)
744         : m_Axis(axis)
745     {}
746 
operator ==armnn::GatherDescriptor747     bool operator ==(const GatherDescriptor& rhs) const
748     {
749         return m_Axis == rhs.m_Axis;
750     }
751 
752     /// The axis in params to gather indices from
753     int32_t m_Axis;
754 };
755 
756 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
757 struct ResizeBilinearDescriptor
758 {
ResizeBilinearDescriptorarmnn::ResizeBilinearDescriptor759     ResizeBilinearDescriptor()
760         : m_TargetWidth(0)
761         , m_TargetHeight(0)
762         , m_DataLayout(DataLayout::NCHW)
763         , m_AlignCorners(false)
764         , m_HalfPixelCenters(false)
765     {}
766 
767     /// Target width value.
768     uint32_t          m_TargetWidth;
769     /// Target height value.
770     uint32_t          m_TargetHeight;
771     /// The data layout to be used (NCHW, NHWC).
772     DataLayout m_DataLayout;
773     /// Aligned corners
774     bool m_AlignCorners;
775     /// Half Pixel Centers
776     bool m_HalfPixelCenters;
777 };
778 
779 /// A ResizeDescriptor for the ResizeLayer.
780 struct ResizeDescriptor
781 {
ResizeDescriptorarmnn::ResizeDescriptor782     ResizeDescriptor()
783         : m_TargetWidth(0)
784         , m_TargetHeight(0)
785         , m_Method(ResizeMethod::NearestNeighbor)
786         , m_DataLayout(DataLayout::NCHW)
787         , m_AlignCorners(false)
788         , m_HalfPixelCenters(false)
789     {}
790 
operator ==armnn::ResizeDescriptor791     bool operator ==(const ResizeDescriptor& rhs) const
792     {
793         return m_TargetWidth          == rhs.m_TargetWidth &&
794                m_TargetHeight         == rhs.m_TargetHeight &&
795                m_Method               == rhs.m_Method &&
796                m_DataLayout           == rhs.m_DataLayout &&
797                m_AlignCorners         == rhs.m_AlignCorners &&
798                m_HalfPixelCenters     == rhs.m_HalfPixelCenters;
799     }
800 
801     /// Target width value.
802     uint32_t m_TargetWidth;
803     /// Target height value.
804     uint32_t m_TargetHeight;
805     /// The Interpolation method to use
806     /// (Bilinear, NearestNeighbor).
807     ResizeMethod m_Method;
808     /// The data layout to be used (NCHW, NHWC).
809     DataLayout m_DataLayout;
810     /// Aligned corners
811     bool m_AlignCorners;
812     /// Half Pixel Centers
813     bool m_HalfPixelCenters;
814 };
815 
816 
817 /// A ReshapeDescriptor for the ReshapeLayer.
818 struct ReshapeDescriptor
819 {
ReshapeDescriptorarmnn::ReshapeDescriptor820     ReshapeDescriptor()
821         : m_TargetShape()
822     {}
823 
ReshapeDescriptorarmnn::ReshapeDescriptor824     ReshapeDescriptor(const TensorShape& shape)
825         : m_TargetShape(shape)
826     {}
827 
operator ==armnn::ReshapeDescriptor828     bool operator ==(const ReshapeDescriptor& rhs) const
829     {
830         return m_TargetShape == rhs.m_TargetShape;
831     }
832 
833     /// Target shape value.
834     TensorShape m_TargetShape;
835 };
836 
837 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
838 struct SpaceToBatchNdDescriptor
839 {
SpaceToBatchNdDescriptorarmnn::SpaceToBatchNdDescriptor840     SpaceToBatchNdDescriptor()
841         : m_BlockShape({1, 1})
842         , m_PadList({{0, 0}, {0, 0}})
843         , m_DataLayout(DataLayout::NCHW)
844     {}
845 
SpaceToBatchNdDescriptorarmnn::SpaceToBatchNdDescriptor846     SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
847                              const std::vector<std::pair<unsigned int, unsigned int>>& padList)
848         : m_BlockShape(blockShape)
849         , m_PadList(padList)
850         , m_DataLayout(DataLayout::NCHW)
851     {}
852 
operator ==armnn::SpaceToBatchNdDescriptor853     bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
854     {
855         return m_BlockShape == rhs.m_BlockShape &&
856                m_PadList    == rhs.m_PadList &&
857                m_DataLayout == rhs.m_DataLayout;
858     }
859 
860     /// Block shape value.
861     std::vector<unsigned int> m_BlockShape;
862     /// @brief Specifies the padding values for the input dimension:
863     /// heightPad{top, bottom} widthPad{left, right}.
864     std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
865     /// The data layout to be used (NCHW, NHWC).
866     DataLayout m_DataLayout;
867 };
868 
869 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
870 struct SpaceToDepthDescriptor
871 {
SpaceToDepthDescriptorarmnn::SpaceToDepthDescriptor872     SpaceToDepthDescriptor()
873         : SpaceToDepthDescriptor(1u, DataLayout::NHWC)
874     {}
875 
SpaceToDepthDescriptorarmnn::SpaceToDepthDescriptor876     SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
877         : m_BlockSize(blockSize)
878         , m_DataLayout(dataLayout)
879     {}
880 
operator ==armnn::SpaceToDepthDescriptor881     bool operator ==(const SpaceToDepthDescriptor& rhs) const
882     {
883         return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
884     }
885 
886     /// Scalar specifying the input block size. It must be >= 1
887     unsigned int m_BlockSize;
888 
889     /// The data layout to be used (NCHW, NHWC).
890     DataLayout m_DataLayout;
891 };
892 
893 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
894 using DepthToSpaceDescriptor = SpaceToDepthDescriptor;
895 
896 /// An LstmDescriptor for the LstmLayer.
897 struct LstmDescriptor
898 {
LstmDescriptorarmnn::LstmDescriptor899     LstmDescriptor()
900         : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
901         , m_ClippingThresCell(0.0)
902         , m_ClippingThresProj(0.0)
903         , m_CifgEnabled(true)
904         , m_PeepholeEnabled(false)
905         , m_ProjectionEnabled(false)
906         , m_LayerNormEnabled(false)
907     {}
908 
operator ==armnn::LstmDescriptor909     bool operator ==(const LstmDescriptor& rhs) const
910     {
911         return m_ActivationFunc    == rhs.m_ActivationFunc &&
912                m_ClippingThresCell == rhs.m_ClippingThresCell &&
913                m_ClippingThresProj == rhs.m_ClippingThresProj &&
914                m_CifgEnabled       == rhs.m_CifgEnabled &&
915                m_PeepholeEnabled   == rhs.m_PeepholeEnabled &&
916                m_LayerNormEnabled  == rhs.m_LayerNormEnabled;
917     }
918 
919     /// @brief The activation function to use.
920     /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
921     uint32_t m_ActivationFunc;
922     /// Clipping threshold value for the cell state.
923     float m_ClippingThresCell;
924     /// Clipping threshold value for the projection.
925     float m_ClippingThresProj;
926     /// Enable/disable cifg (coupled input & forget gate).
927     bool m_CifgEnabled;
928     /// Enable/disable peephole.
929     bool m_PeepholeEnabled;
930     /// Enable/disable the projection layer.
931     bool m_ProjectionEnabled;
932     /// Enable/disable layer normalization
933     bool m_LayerNormEnabled;
934 };
935 
936 /// A MeanDescriptor for the MeanLayer.
937 struct MeanDescriptor
938 {
MeanDescriptorarmnn::MeanDescriptor939     MeanDescriptor()
940         : m_Axis()
941         , m_KeepDims(false)
942     {}
943 
MeanDescriptorarmnn::MeanDescriptor944     MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
945         : m_Axis(axis)
946         , m_KeepDims(keepDims)
947     {}
948 
operator ==armnn::MeanDescriptor949     bool operator ==(const MeanDescriptor& rhs) const
950     {
951         return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
952     }
953 
954     /// Values for the dimensions to reduce.
955     std::vector<unsigned int> m_Axis;
956     /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
957     bool m_KeepDims;
958 };
959 
960 /// A PadDescriptor for the PadLayer.
961 struct PadDescriptor
962 {
PadDescriptorarmnn::PadDescriptor963     PadDescriptor() : m_PadValue(0)
964     {}
965 
PadDescriptorarmnn::PadDescriptor966     PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
967         : m_PadList(padList)
968         , m_PadValue(padValue)
969     {}
970 
operator ==armnn::PadDescriptor971     bool operator ==(const PadDescriptor& rhs) const
972     {
973         return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
974     }
975 
976     /// @brief Specifies the padding for input dimension.
977     /// First is the number of values to add before the tensor in the dimension.
978     /// Second is the number of values to add after the tensor in the dimension.
979     /// The number of pairs should match the number of dimensions in the input tensor.
980     std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
981 
982     /// Optional value to use for padding, defaults to 0
983     float m_PadValue;
984 };
985 
986 /// A SliceDescriptor for the SliceLayer.
987 struct SliceDescriptor
988 {
SliceDescriptorarmnn::SliceDescriptor989     SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
990         : m_Begin(begin)
991         , m_Size(size)
992     {}
993 
SliceDescriptorarmnn::SliceDescriptor994     SliceDescriptor() : SliceDescriptor({}, {})
995     {}
996 
operator ==armnn::SliceDescriptor997     bool operator ==(const SliceDescriptor& rhs) const
998     {
999         return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1000     }
1001 
1002     /// Beginning indices of the slice in each dimension.
1003     std::vector<unsigned int> m_Begin;
1004 
1005     /// Size of the slice in each dimension.
1006     std::vector<unsigned int> m_Size;
1007 };
1008 
1009 /// A StackDescriptor for the StackLayer.
1010 struct StackDescriptor
1011 {
StackDescriptorarmnn::StackDescriptor1012     StackDescriptor()
1013         : m_Axis(0)
1014         , m_NumInputs(0)
1015         , m_InputShape()
1016     {}
1017 
StackDescriptorarmnn::StackDescriptor1018     StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1019         : m_Axis(axis)
1020         , m_NumInputs(numInputs)
1021         , m_InputShape(inputShape)
1022     {}
1023 
operator ==armnn::StackDescriptor1024     bool operator ==(const StackDescriptor& rhs) const
1025     {
1026         return m_Axis       == rhs.m_Axis &&
1027                m_NumInputs  == rhs.m_NumInputs &&
1028                m_InputShape == rhs.m_InputShape;
1029     }
1030 
1031     /// 0-based axis along which to stack the input tensors.
1032     uint32_t m_Axis;
1033     /// Number of input tensors.
1034     uint32_t m_NumInputs;
1035     /// Required shape of all input tensors.
1036     TensorShape m_InputShape;
1037 };
1038 
1039 /// A StandInDescriptor for the StandIn layer
1040 struct StandInDescriptor
1041 {
StandInDescriptorarmnn::StandInDescriptor1042     StandInDescriptor() {};
1043 
StandInDescriptorarmnn::StandInDescriptor1044     StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1045         : m_NumInputs(numInputs)
1046         , m_NumOutputs(numOutputs)
1047     {}
1048 
operator ==armnn::StandInDescriptor1049     bool operator ==(const StandInDescriptor& rhs) const
1050     {
1051         return m_NumInputs  == rhs.m_NumInputs &&
1052                m_NumOutputs == rhs.m_NumOutputs;
1053     }
1054 
1055     /// Number of input tensors
1056     uint32_t m_NumInputs = 0;
1057     /// Number of output tensors
1058     uint32_t m_NumOutputs = 0;
1059 };
1060 
1061 /// A StridedSliceDescriptor for the StridedSliceLayer.
1062 struct StridedSliceDescriptor
1063 {
StridedSliceDescriptorarmnn::StridedSliceDescriptor1064     StridedSliceDescriptor(const std::vector<int>& begin,
1065                            const std::vector<int>& end,
1066                            const std::vector<int>& stride)
1067         : m_Begin(begin)
1068         , m_End(end)
1069         , m_Stride(stride)
1070         , m_BeginMask(0)
1071         , m_EndMask(0)
1072         , m_ShrinkAxisMask(0)
1073         , m_EllipsisMask(0)
1074         , m_NewAxisMask(0)
1075         , m_DataLayout(DataLayout::NCHW)
1076     {}
1077 
StridedSliceDescriptorarmnn::StridedSliceDescriptor1078     StridedSliceDescriptor()
1079         : StridedSliceDescriptor({}, {}, {})
1080     {}
1081 
operator ==armnn::StridedSliceDescriptor1082     bool operator ==(const StridedSliceDescriptor& rhs) const
1083     {
1084         return m_Begin          == rhs.m_Begin &&
1085                m_End            == rhs.m_End &&
1086                m_Stride         == rhs.m_Stride &&
1087                m_BeginMask      == rhs.m_BeginMask &&
1088                m_EndMask        == rhs.m_EndMask &&
1089                m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1090                m_EllipsisMask   == rhs.m_EllipsisMask &&
1091                m_NewAxisMask    == rhs.m_NewAxisMask &&
1092                m_DataLayout     == rhs.m_DataLayout;
1093     }
1094 
1095     int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1096     int GetStopForAxis(const TensorShape& inputShape,
1097                        unsigned int axis,
1098                        int startForAxis) const;
1099 
1100     /// Begin values for the input that will be sliced.
1101     std::vector<int> m_Begin;
1102     /// End values for the input that will be sliced.
1103     std::vector<int> m_End;
1104     /// Stride values for the input that will be sliced.
1105     std::vector<int> m_Stride;
1106 
1107     /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1108     /// range is used for the dimension.
1109     int32_t m_BeginMask;
1110     /// @brief End mask value. If set, then the end is disregarded and the fullest range
1111     /// is used for the dimension.
1112     int32_t m_EndMask;
1113     /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1114     int32_t m_ShrinkAxisMask;
1115     /// Ellipsis mask value.
1116     int32_t m_EllipsisMask;
1117     /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1118     /// a new 1 dimension is inserted to this location of the output tensor.
1119     int32_t m_NewAxisMask;
1120 
1121     /// The data layout to be used (NCHW, NHWC).
1122     DataLayout m_DataLayout;
1123 };
1124 
1125 /// A PreCompiledDescriptor for the PreCompiledLayer.
1126 struct PreCompiledDescriptor
1127 {
PreCompiledDescriptorarmnn::PreCompiledDescriptor1128     PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1129         : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1130     {}
1131 
1132     ~PreCompiledDescriptor() = default;
1133 
1134     unsigned int m_NumInputSlots;
1135     unsigned int m_NumOutputSlots;
1136 };
1137 
1138 /// A QLstmDescriptor for the QLstmLayer.
1139 struct QLstmDescriptor
1140 {
QLstmDescriptorarmnn::QLstmDescriptor1141     QLstmDescriptor()
1142             : m_CellClip(0.0)
1143             , m_ProjectionClip(0.0)
1144             , m_CifgEnabled(true)
1145             , m_PeepholeEnabled(false)
1146             , m_ProjectionEnabled(false)
1147             , m_LayerNormEnabled(false)
1148             , m_InputIntermediateScale(0.0)
1149             , m_ForgetIntermediateScale(0.0)
1150             , m_CellIntermediateScale(0.0)
1151             , m_OutputIntermediateScale(0.0)
1152             , m_HiddenStateZeroPoint(0)
1153             , m_HiddenStateScale(0.0)
1154     {}
1155 
operator ==armnn::QLstmDescriptor1156     bool operator ==(const QLstmDescriptor& rhs) const
1157     {
1158         return m_CellClip          == rhs.m_CellClip &&
1159                m_ProjectionClip    == rhs.m_ProjectionClip &&
1160                m_CifgEnabled       == rhs.m_CifgEnabled &&
1161                m_PeepholeEnabled   == rhs.m_PeepholeEnabled &&
1162                m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1163                m_LayerNormEnabled  == rhs.m_LayerNormEnabled &&
1164                m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1165                m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1166                m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1167                m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1168                m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1169                m_HiddenStateScale == rhs.m_HiddenStateScale;
1170     }
1171 
1172     /// Clipping threshold value for the cell state
1173     float m_CellClip;
1174     /// Clipping threshold value for the projection
1175     float m_ProjectionClip;
1176     /// Enable/disable CIFG (coupled input & forget gate).
1177     bool m_CifgEnabled;
1178     /// Enable/disable peephole
1179     bool m_PeepholeEnabled;
1180     /// Enable/disable the projection layer
1181     bool m_ProjectionEnabled;
1182     /// Enable/disable layer normalization
1183     bool m_LayerNormEnabled;
1184     /// Input intermediate quantization scale
1185     float m_InputIntermediateScale;
1186     /// Forget intermediate quantization scale
1187     float m_ForgetIntermediateScale;
1188     /// Cell intermediate quantization scale
1189     float m_CellIntermediateScale;
1190     /// Output intermediate quantization scale
1191     float m_OutputIntermediateScale;
1192     /// Hidden State zero point
1193     int32_t m_HiddenStateZeroPoint;
1194     /// Hidden State quantization scale
1195     float m_HiddenStateScale;
1196 };
1197 
1198 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1199 struct TransposeConvolution2dDescriptor
1200 {
TransposeConvolution2dDescriptorarmnn::TransposeConvolution2dDescriptor1201     TransposeConvolution2dDescriptor() :
1202         m_PadLeft(0),
1203         m_PadRight(0),
1204         m_PadTop(0),
1205         m_PadBottom(0),
1206         m_StrideX(0),
1207         m_StrideY(0),
1208         m_BiasEnabled(false),
1209         m_DataLayout(DataLayout::NCHW),
1210         m_OutputShapeEnabled(false)
1211     {}
1212 
operator ==armnn::TransposeConvolution2dDescriptor1213     bool operator ==(const TransposeConvolution2dDescriptor& rhs) const
1214     {
1215         return m_PadLeft            == rhs.m_PadLeft &&
1216                m_PadRight           == rhs.m_PadRight &&
1217                m_PadTop             == rhs.m_PadTop &&
1218                m_PadBottom          == rhs.m_PadBottom &&
1219                m_StrideX            == rhs.m_StrideX &&
1220                m_StrideY            == rhs.m_StrideY &&
1221                m_BiasEnabled        == rhs.m_BiasEnabled &&
1222                m_DataLayout         == rhs.m_DataLayout &&
1223                m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1224                m_OutputShape        == rhs.m_OutputShape;
1225     }
1226 
1227     /// Padding left value in the width dimension.
1228     uint32_t                  m_PadLeft;
1229     /// Padding right value in the width dimension.
1230     uint32_t                  m_PadRight;
1231     /// Padding top value in the height dimension.
1232     uint32_t                  m_PadTop;
1233     /// Padding bottom value in the height dimension.
1234     uint32_t                  m_PadBottom;
1235     /// Stride value when proceeding through input for the width dimension.
1236     uint32_t                  m_StrideX;
1237     /// Stride value when proceeding through input for the height dimension.
1238     uint32_t                  m_StrideY;
1239     /// Enable/disable bias.
1240     bool                      m_BiasEnabled;
1241     /// The data layout to be used (NCHW, NHWC).
1242     DataLayout                m_DataLayout;
1243     /// Output shape if it has been specified.
1244     bool                      m_OutputShapeEnabled;
1245     std::vector<unsigned int> m_OutputShape;
1246 };
1247 
1248 /// A TransposeDescriptor for the TransposeLayer.
1249 struct TransposeDescriptor
1250 {
TransposeDescriptorarmnn::TransposeDescriptor1251     TransposeDescriptor()
1252             : m_DimMappings{}
1253     {}
1254 
TransposeDescriptorarmnn::TransposeDescriptor1255     TransposeDescriptor(const PermutationVector& dimMappings)
1256             : m_DimMappings(dimMappings)
1257     {}
1258 
operator ==armnn::TransposeDescriptor1259     bool operator ==(const TransposeDescriptor &rhs) const
1260     {
1261         return m_DimMappings.IsEqual(rhs.m_DimMappings);
1262     }
1263 
1264     /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1265     /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1266     PermutationVector m_DimMappings;
1267 };
1268 
1269 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1270 struct LogicalBinaryDescriptor
1271 {
LogicalBinaryDescriptorarmnn::LogicalBinaryDescriptor1272     LogicalBinaryDescriptor()
1273         : LogicalBinaryDescriptor(LogicalBinaryOperation::LogicalAnd)
1274     {}
1275 
LogicalBinaryDescriptorarmnn::LogicalBinaryDescriptor1276     LogicalBinaryDescriptor(LogicalBinaryOperation operation)
1277         : m_Operation(operation)
1278     {}
1279 
operator ==armnn::LogicalBinaryDescriptor1280     bool operator ==(const LogicalBinaryDescriptor &rhs) const
1281     {
1282         return m_Operation == rhs.m_Operation;
1283     }
1284 
1285     /// Specifies the logical operation to execute
1286     LogicalBinaryOperation m_Operation;
1287 };
1288 
1289 } // namespace armnn
1290