• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/utility/Assert.hpp>
10 #include <armnn/utility/IgnoreUnused.hpp>
11 #include <armnn/utility/NumericCast.hpp>
12 #include <armnnUtils/FloatingPointConverter.hpp>
13 #include <armnnUtils/TensorUtils.hpp>
14 
15 #include <ResolveType.hpp>
16 
17 namespace armnn
18 {
19 
20 class BaseIterator
21 {
22 public:
BaseIterator()23     BaseIterator() {}
24 
~BaseIterator()25     virtual ~BaseIterator() {}
26 
27     virtual BaseIterator& operator++() = 0;
28 
29     virtual BaseIterator& operator+=(const unsigned int increment) = 0;
30 
31     virtual BaseIterator& operator-=(const unsigned int increment) = 0;
32 
33     virtual BaseIterator& operator[](const unsigned int index) = 0;
34 };
35 
36 template<typename IType>
37 class Decoder : public BaseIterator
38 {
39 public:
Decoder()40     Decoder() {}
41 
~Decoder()42     virtual ~Decoder() {}
43 
44     virtual void Reset(void*) = 0;
45 
46     virtual IType Get() const = 0;
47 
48     virtual std::vector<float>
49     DecodeTensor(const TensorShape &tensorShape,
50                  bool isDepthwise = false) = 0;
51 };
52 
53 template<typename IType>
54 class Encoder : public BaseIterator
55 {
56 public:
Encoder()57     Encoder() {}
58 
~Encoder()59     virtual ~Encoder() {}
60 
61     virtual void Reset(void*) = 0;
62 
63     virtual void Set(IType right) = 0;
64 
65     virtual IType Get() const = 0;
66 };
67 
68 template<typename T, typename Base>
69 class TypedIterator : public Base
70 {
71 public:
TypedIterator(T * data=nullptr)72     TypedIterator(T* data = nullptr)
73         : m_Iterator(data), m_Start(data)
74     {}
75 
Reset(void * data)76     void Reset(void* data) override
77     {
78         m_Iterator = reinterpret_cast<T*>(data);
79         m_Start = m_Iterator;
80     }
81 
operator ++()82     TypedIterator& operator++() override
83     {
84         ARMNN_ASSERT(m_Iterator);
85         ++m_Iterator;
86         return *this;
87     }
88 
operator +=(const unsigned int increment)89     TypedIterator& operator+=(const unsigned int increment) override
90     {
91         ARMNN_ASSERT(m_Iterator);
92         m_Iterator += increment;
93         return *this;
94     }
95 
operator -=(const unsigned int increment)96     TypedIterator& operator-=(const unsigned int increment) override
97     {
98         ARMNN_ASSERT(m_Iterator);
99         m_Iterator -= increment;
100         return *this;
101     }
102 
operator [](const unsigned int index)103     TypedIterator& operator[](const unsigned int index) override
104     {
105         ARMNN_ASSERT(m_Iterator);
106         m_Iterator = m_Start + index;
107         return *this;
108     }
109 
110 protected:
111     T* m_Iterator;
112     T* m_Start;
113 };
114 
115 class QASymm8Decoder : public TypedIterator<const uint8_t, Decoder<float>>
116 {
117 public:
QASymm8Decoder(const uint8_t * data,const float scale,const int32_t offset)118     QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset)
119         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
120 
QASymm8Decoder(const float scale,const int32_t offset)121     QASymm8Decoder(const float scale, const int32_t offset)
122         : QASymm8Decoder(nullptr, scale, offset) {}
123 
Get() const124     float Get() const override
125     {
126         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
127     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)128     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
129                                      const bool isDepthwise) override
130     {
131         IgnoreUnused(isDepthwise);
132 
133         const unsigned int size = tensorShape.GetNumElements();
134         std::vector<float> decodedTensor;
135         decodedTensor.reserve(size);
136 
137         for (uint32_t i = 0; i < size; ++i)
138         {
139             this->operator[](i);
140             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
141         }
142 
143         return decodedTensor;
144     }
145 
146 private:
147 
148     const float m_Scale;
149     const int32_t m_Offset;
150 };
151 
152 class QASymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
153 {
154 public:
QASymmS8Decoder(const int8_t * data,const float scale,const int32_t offset)155     QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
156         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
157 
QASymmS8Decoder(const float scale,const int32_t offset)158     QASymmS8Decoder(const float scale, const int32_t offset)
159         : QASymmS8Decoder(nullptr, scale, offset) {}
160 
Get() const161     float Get() const override
162     {
163         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
164     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)165     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
166                                      const bool isDepthwise) override
167     {
168         IgnoreUnused(isDepthwise);
169 
170         const unsigned int size = tensorShape.GetNumElements();
171         std::vector<float> decodedTensor;
172         decodedTensor.reserve(size);
173 
174         for (uint32_t i = 0; i < size; ++i)
175         {
176             this->operator[](i);
177             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
178         }
179 
180         return decodedTensor;
181     }
182 
183 private:
184     const float m_Scale;
185     const int32_t m_Offset;
186 
187 };
188 
189 class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
190 {
191 public:
QSymmS8Decoder(const int8_t * data,const float scale,const int32_t offset)192     QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
193             : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
194 
QSymmS8Decoder(const float scale,const int32_t offset)195     QSymmS8Decoder(const float scale, const int32_t offset)
196             : QSymmS8Decoder(nullptr, scale, offset) {}
197 
Get() const198     float Get() const override
199     {
200         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
201     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)202     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
203                                      const bool isDepthwise) override
204     {
205         IgnoreUnused(isDepthwise);
206 
207         const unsigned int size = tensorShape.GetNumElements();
208         std::vector<float> decodedTensor;
209         decodedTensor.reserve(size);
210 
211         for (uint32_t i = 0; i < size; ++i)
212         {
213             this->operator[](i);
214             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
215         }
216 
217         return decodedTensor;
218     }
219 
220 private:
221     const float m_Scale;
222     const int32_t m_Offset;
223 
224 };
225 
226 class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>>
227 {
228 public:
QSymm16Decoder(const int16_t * data,const float scale,const int32_t offset)229     QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
230         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
231 
QSymm16Decoder(const float scale,const int32_t offset)232     QSymm16Decoder(const float scale, const int32_t offset)
233         : QSymm16Decoder(nullptr, scale, offset) {}
234 
Get() const235     float Get() const override
236     {
237         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
238     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)239     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
240                                      const bool isDepthwise) override
241     {
242         IgnoreUnused(isDepthwise);
243 
244         const unsigned int size = tensorShape.GetNumElements();
245         std::vector<float> decodedTensor;
246         decodedTensor.reserve(size);
247 
248         for (uint32_t i = 0; i < size; ++i)
249         {
250             this->operator[](i);
251             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
252         }
253 
254         return decodedTensor;
255     }
256 
257 private:
258     const float m_Scale;
259     const int32_t m_Offset;
260 
261 };
262 
263 class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
264 {
265 public:
Float16Decoder(const Half * data)266     Float16Decoder(const Half* data)
267         : TypedIterator(data) {}
268 
Float16Decoder()269     Float16Decoder()
270         : Float16Decoder(nullptr) {}
271 
Get() const272     float Get() const override
273     {
274         float val = 0.f;
275         armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
276         return val;
277     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)278     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
279                                      const bool isDepthwise) override
280     {
281         IgnoreUnused(isDepthwise);
282 
283         const unsigned int size = tensorShape.GetNumElements();
284         std::vector<float> decodedTensor;
285         decodedTensor.reserve(size);
286 
287         for (uint32_t i = 0; i < size; ++i)
288         {
289             float val = 0.f;
290             this->operator[](i);
291             armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
292             decodedTensor.emplace_back(val);
293         }
294 
295         return decodedTensor;
296     }
297 
298 
299 };
300 
301 class Float32Decoder : public TypedIterator<const float, Decoder<float>>
302 {
303 public:
Float32Decoder(const float * data)304     Float32Decoder(const float* data)
305         : TypedIterator(data) {}
306 
Float32Decoder()307     Float32Decoder()
308         : Float32Decoder(nullptr) {}
309 
Get() const310     float Get() const override
311     {
312         return *m_Iterator;
313     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)314     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
315                                      const bool isDepthwise) override
316     {
317         IgnoreUnused(isDepthwise);
318         const unsigned int size = tensorShape.GetNumElements();
319         std::vector<float> decodedTensor;
320 
321         decodedTensor.reserve(size);
322         decodedTensor.assign(m_Start, m_Start + size);
323 
324         return decodedTensor;
325     }
326 };
327 
328 class ScaledInt32Decoder : public TypedIterator<const int32_t, Decoder<float>>
329 {
330 public:
ScaledInt32Decoder(const int32_t * data,const float scale)331     ScaledInt32Decoder(const int32_t* data, const float scale)
332         : TypedIterator(data), m_Scale(scale) {}
333 
ScaledInt32Decoder(const float scale)334     ScaledInt32Decoder(const float scale)
335         : ScaledInt32Decoder(nullptr, scale) {}
336 
Get() const337     float Get() const override
338     {
339         return static_cast<float>(*m_Iterator) * m_Scale;
340     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)341     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
342                                      const bool isDepthwise) override
343     {
344         IgnoreUnused(isDepthwise);
345 
346         const unsigned int size = tensorShape.GetNumElements();
347         std::vector<float> decodedTensor;
348         decodedTensor.reserve(size);
349 
350         for (uint32_t i = 0; i < size; ++i)
351         {
352             this->operator[](i);
353             decodedTensor.emplace_back(static_cast<float>(*m_Iterator) * m_Scale);
354         }
355 
356         return decodedTensor;
357     }
358 
359 private:
360     const float m_Scale;
361 
362 };
363 
364 class Int32Decoder : public TypedIterator<const int32_t, Decoder<float>>
365 {
366 public:
Int32Decoder(const int32_t * data)367     Int32Decoder(const int32_t* data)
368         : TypedIterator(data) {}
369 
Int32Decoder()370     Int32Decoder()
371         : Int32Decoder(nullptr) {}
372 
Get() const373     float Get() const override
374     {
375         return static_cast<float>(*m_Iterator);
376     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)377     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
378                                      const bool isDepthwise) override
379     {
380         IgnoreUnused(isDepthwise);
381 
382         const unsigned int size = tensorShape.GetNumElements();
383         std::vector<float> decodedTensor;
384         decodedTensor.reserve(size);
385 
386         for (uint32_t i = 0; i < size; ++i)
387         {
388             this->operator[](i);
389             decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
390         }
391 
392         return decodedTensor;
393     }
394 };
395 
396 class Int32ToInt32tDecoder : public TypedIterator<const int32_t, Decoder<int32_t>>
397 {
398 public:
Int32ToInt32tDecoder(const int32_t * data)399     Int32ToInt32tDecoder(const int32_t* data)
400             : TypedIterator(data){}
401 
Int32ToInt32tDecoder()402     Int32ToInt32tDecoder()
403             : Int32ToInt32tDecoder(nullptr) {}
404 
Get() const405     int32_t Get() const override
406     {
407         return *m_Iterator;
408     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)409     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
410                                      const bool isDepthwise) override
411     {
412         IgnoreUnused(isDepthwise);
413 
414         const unsigned int size = tensorShape.GetNumElements();
415         std::vector<float> decodedTensor;
416         decodedTensor.reserve(size);
417 
418         for (uint32_t i = 0; i < size; ++i)
419         {
420             this->operator[](i);
421             decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
422         }
423 
424         return decodedTensor;
425     }
426 };
427 
428 class BooleanDecoder : public TypedIterator<const uint8_t, Decoder<float>>
429 {
430 public:
BooleanDecoder(const uint8_t * data)431     BooleanDecoder(const uint8_t* data)
432             : TypedIterator(data) {}
433 
BooleanDecoder()434     BooleanDecoder()
435             : BooleanDecoder(nullptr) {}
436 
Get() const437     float Get() const override
438     {
439         return *m_Iterator;
440     }
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)441     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
442                                      const bool isDepthwise) override
443     {
444         IgnoreUnused(isDepthwise);
445 
446         const unsigned int size = tensorShape.GetNumElements();
447         std::vector<float> decodedTensor;
448         decodedTensor.reserve(size);
449 
450         for (uint32_t i = 0; i < size; ++i)
451         {
452             this->operator[](i);
453             decodedTensor.emplace_back(*m_Iterator);
454         }
455 
456         return decodedTensor;
457     }
458 };
459 
460 class BooleanDecoderBool : public TypedIterator<const uint8_t, Decoder<bool>>
461 {
462 public:
BooleanDecoderBool(const uint8_t * data)463     BooleanDecoderBool(const uint8_t* data)
464         : TypedIterator(data) {}
465 
BooleanDecoderBool()466     BooleanDecoderBool()
467         : BooleanDecoderBool(nullptr) {}
468 
Get() const469     bool Get() const override
470     {
471         return *m_Iterator;
472     }
473 
DecodeTensor(const TensorShape & tensorShape,const bool isDepthwise)474     std::vector<float> DecodeTensor(const TensorShape& tensorShape,
475                                     const bool isDepthwise) override
476     {
477         IgnoreUnused(isDepthwise);
478 
479         const unsigned int size = tensorShape.GetNumElements();
480         std::vector<float> decodedTensor;
481         decodedTensor.reserve(size);
482 
483         for (uint32_t i = 0; i < size; ++i)
484         {
485             this->operator[](i);
486             decodedTensor.emplace_back(*m_Iterator);
487         }
488 
489         return decodedTensor;
490     }
491 };
492 
493 class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
494 {
495 public:
QASymm8Encoder(uint8_t * data,const float scale,const int32_t offset)496     QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset)
497         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
498 
QASymm8Encoder(const float scale,const int32_t offset)499     QASymm8Encoder(const float scale, const int32_t offset)
500         : QASymm8Encoder(nullptr, scale, offset) {}
501 
Set(float right)502     void Set(float right) override
503     {
504         *m_Iterator = armnn::Quantize<uint8_t>(right, m_Scale, m_Offset);
505     }
506 
Get() const507     float Get() const override
508     {
509         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
510     }
511 
512 private:
513     const float m_Scale;
514     const int32_t m_Offset;
515 };
516 
517 class QASymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
518 {
519 public:
QASymmS8Encoder(int8_t * data,const float scale,const int32_t offset)520     QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
521         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
522 
QASymmS8Encoder(const float scale,const int32_t offset)523     QASymmS8Encoder(const float scale, const int32_t offset)
524         : QASymmS8Encoder(nullptr, scale, offset) {}
525 
Set(float right)526     void Set(float right) override
527     {
528         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
529     }
530 
Get() const531     float Get() const override
532     {
533         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
534     }
535 
536 private:
537     const float m_Scale;
538     const int32_t m_Offset;
539 };
540 
541 class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
542 {
543 public:
QSymmS8Encoder(int8_t * data,const float scale,const int32_t offset)544     QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
545             : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
546 
QSymmS8Encoder(const float scale,const int32_t offset)547     QSymmS8Encoder(const float scale, const int32_t offset)
548             : QSymmS8Encoder(nullptr, scale, offset) {}
549 
Set(float right)550     void Set(float right) override
551     {
552         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
553     }
554 
Get() const555     float Get() const override
556     {
557         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
558     }
559 
560 private:
561     const float m_Scale;
562     const int32_t m_Offset;
563 };
564 
565 class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>>
566 {
567 public:
QSymm16Encoder(int16_t * data,const float scale,const int32_t offset)568     QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
569         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
570 
QSymm16Encoder(const float scale,const int32_t offset)571     QSymm16Encoder(const float scale, const int32_t offset)
572         : QSymm16Encoder(nullptr, scale, offset) {}
573 
Set(float right)574     void Set(float right) override
575     {
576         *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
577     }
578 
Get() const579     float Get() const override
580     {
581         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
582     }
583 
584 private:
585     const float m_Scale;
586     const int32_t m_Offset;
587 };
588 
589 class Float16Encoder : public TypedIterator<Half, Encoder<float>>
590 {
591 public:
Float16Encoder(Half * data)592     Float16Encoder(Half* data)
593         : TypedIterator(data) {}
594 
Float16Encoder()595     Float16Encoder()
596         : Float16Encoder(nullptr) {}
597 
Set(float right)598     void Set(float right) override
599     {
600         armnnUtils::FloatingPointConverter::ConvertFloat32To16(&right, 1, m_Iterator);
601     }
602 
Get() const603     float Get() const override
604     {
605         float val = 0.f;
606         armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
607         return val;
608     }
609 };
610 
611 class Float32Encoder : public TypedIterator<float, Encoder<float>>
612 {
613 public:
Float32Encoder(float * data)614     Float32Encoder(float* data)
615         : TypedIterator(data) {}
616 
Float32Encoder()617     Float32Encoder()
618         : Float32Encoder(nullptr) {}
619 
Set(float right)620     void Set(float right) override
621     {
622         *m_Iterator = right;
623     }
624 
Get() const625     float Get() const override
626     {
627         return *m_Iterator;
628     }
629 };
630 
631 class Int32Encoder : public TypedIterator<int32_t, Encoder<float>>
632 {
633 public:
Int32Encoder(int32_t * data)634     Int32Encoder(int32_t* data)
635         : TypedIterator(data) {}
636 
Int32Encoder()637     Int32Encoder()
638         : Int32Encoder(nullptr) {}
639 
Set(float right)640     void Set(float right) override
641     {
642         *m_Iterator = static_cast<int32_t>(right);
643     }
644 
Get() const645     float Get() const override
646     {
647         return static_cast<float>(*m_Iterator);
648     }
649 };
650 
651 class Int32ToInt32tEncoder : public TypedIterator<int32_t, Encoder<int32_t>>
652 {
653 public:
Int32ToInt32tEncoder(int32_t * data)654     Int32ToInt32tEncoder(int32_t* data)
655         : TypedIterator(data){}
656 
Int32ToInt32tEncoder()657     Int32ToInt32tEncoder()
658         : Int32ToInt32tEncoder(nullptr) {}
659 
Set(int32_t right)660     void Set(int32_t right) override
661     {
662         *m_Iterator = right;
663     }
664 
Get() const665     int32_t Get() const override
666     {
667         return *m_Iterator;
668     }
669 };
670 
671 class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
672 {
673 public:
BooleanEncoder(uint8_t * data)674     BooleanEncoder(uint8_t* data)
675         : TypedIterator(data) {}
676 
BooleanEncoder()677     BooleanEncoder()
678         : BooleanEncoder(nullptr) {}
679 
Set(bool right)680     void Set(bool right) override
681     {
682         *m_Iterator = right;
683     }
684 
Get() const685     bool Get() const override
686     {
687         return *m_Iterator;
688     }
689 };
690 
691 /// PerAxisIterator for per-axis quantization. Iterates over a tensor as layed out in memory and keeps track
692 /// of the axis index.
693 template<typename T, typename Base>
694 class PerAxisIterator : public Base
695 {
696 public:
PerAxisIterator(T * data=nullptr,unsigned int axisFactor=0,unsigned int axisDimensionality=0)697     PerAxisIterator(T* data = nullptr,
698                     unsigned int axisFactor = 0,
699                     unsigned int axisDimensionality=0)
700         : m_Iterator(data),
701           m_Start(data),
702           m_AxisIndex(0), // iterates over the dimension of axis
703           m_AxisDimensionality(axisDimensionality), // tensorShape[quantization_dim]
704           m_AxisFactor(axisFactor),
705           m_Index(0)
706     {}
707 
PerAxisIterator(T * data=nullptr,const armnn::TensorShape & tensorShape=TensorShape (),const unsigned int axis=0)708     PerAxisIterator(T* data = nullptr,
709                     const armnn::TensorShape& tensorShape = TensorShape(),
710                     const unsigned int axis = 0)
711         : m_Iterator(data),
712           m_Start(data),
713           m_AxisIndex(0),
714           m_Index(0)
715     {
716         m_AxisDimensionality = tensorShape[axis];
717         m_AxisFactor = armnnUtils::GetNumElementsAfter(tensorShape, axis);
718     }
719 
Reset(void * data)720     void Reset(void* data) override
721     {
722         m_Iterator = reinterpret_cast<T*>(data);
723         m_Start = m_Iterator;
724         m_AxisIndex = 0;
725         m_Index = 0;
726     }
727 
operator ++()728     PerAxisIterator& operator++() override
729     {
730         ++m_Index;
731         this -> operator[](m_Index);
732         return *this;
733     }
734 
operator +=(const unsigned int increment)735     PerAxisIterator& operator+=(const unsigned int increment) override
736     {
737         m_Index += increment;
738         this -> operator[](m_Index);
739         return *this;
740     }
741 
operator -=(const unsigned int decrement)742     PerAxisIterator& operator-=(const unsigned int decrement) override
743     {
744         m_Index -= decrement;
745         this -> operator[](m_Index);
746         return *this;
747     }
748 
749 
SetIndexOnMem(const unsigned int index)750     inline PerAxisIterator& SetIndexOnMem(const unsigned int index)
751     {
752         ARMNN_ASSERT(m_Iterator);
753         m_Iterator = m_Start + index;
754         if (index < m_AxisFactor)
755         {
756             m_AxisIndex = 0;
757         }
758         else
759         {
760             m_AxisIndex = (index / m_AxisFactor) % m_AxisDimensionality;
761         }
762         m_Index = index;
763         return *this;
764     }
765 
operator [](const unsigned int index)766     PerAxisIterator& operator[](const unsigned int index) override
767     {
768         SetIndexOnMem(index);
769         return *this;
770     }
771 
772     protected:
773         T* m_Iterator;
774         T* m_Start;
775         unsigned int m_AxisIndex;
776         unsigned int m_AxisDimensionality; // tensorShape[quantization_dim]
777         unsigned int m_AxisFactor;
778         unsigned int m_Index;
779 };
780 
781 class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
782 {
783 public:
QSymm8PerAxisDecoder(const int8_t * data,const armnn::TensorInfo & tensorInfo)784     QSymm8PerAxisDecoder(const int8_t* data, const armnn::TensorInfo& tensorInfo)
785             : PerAxisIterator(data, tensorInfo.GetShape(), tensorInfo.GetQuantizationDim().value()),
786               m_Scales(tensorInfo.GetQuantizationScales())
787     {}
788 
Get() const789     float Get() const override
790     {
791         return armnn::Dequantize(*m_Iterator, GetScale(), 0);
792     }
793 
794     // Get scale of the current value
GetScale() const795     float GetScale() const
796     {
797         return m_Scales[m_AxisIndex];
798     }
799 
DecodeTensor(const TensorShape & tensorShape,bool isDepthwise)800     std::vector<float> DecodeTensor(const TensorShape &tensorShape,
801                                     bool isDepthwise) override
802     {
803         IgnoreUnused(isDepthwise);
804 
805         const unsigned int size = tensorShape.GetNumElements();
806         std::vector<float> decodedTensor;
807         decodedTensor.reserve(size);
808 
809         for (uint32_t i = 0; i < size; ++i)
810         {
811             SetIndexOnMem(i);
812             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, GetScale(), 0));
813         }
814         return decodedTensor;
815     }
816 
817 private:
818     std::vector<float> m_Scales;
819 };
820 
821 class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
822 {
823 public:
QSymm8PerAxisEncoder(int8_t * data,const std::vector<float> & scale,unsigned int axisFactor)824     QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
825         : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
826 
Set(float right)827     void Set(float right)
828     {
829         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
830     }
831 
Get() const832     float Get() const
833     {
834         return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
835     }
836 
837     // Get scale of the current value
GetScale() const838     float GetScale() const
839     {
840         return m_Scale[m_AxisIndex];
841     }
842 
843 private:
844     std::vector<float> m_Scale;
845 };
846 
847 class ScaledInt32PerAxisDecoder : public PerAxisIterator<const int32_t, Decoder<float>>
848 {
849 public:
ScaledInt32PerAxisDecoder(const int32_t * data,const armnn::TensorInfo tensorInfo)850     ScaledInt32PerAxisDecoder(const int32_t* data, const armnn::TensorInfo tensorInfo)
851     : PerAxisIterator(data, tensorInfo.GetShape(), tensorInfo.GetQuantizationDim().value()),
852       m_Scales(tensorInfo.GetQuantizationScales())
853     {}
854 
Get() const855     float Get() const override
856     {
857         return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
858     }
859 
860     // Get scale of the current value
GetScale() const861     float GetScale() const
862     {
863         return m_Scales[m_AxisIndex];
864     }
865 
DecodeTensor(const TensorShape & tensorShape,bool isDepthwise)866     std::vector<float> DecodeTensor(const TensorShape &tensorShape,
867                                     bool isDepthwise) override
868     {
869         const uint32_t size = tensorShape.GetNumElements();
870 
871         const uint32_t stepSize = isDepthwise ?
872                                   tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
873 
874         const uint32_t stepNum = size / stepSize;
875 
876         std::vector<float> decodedTensor;
877         decodedTensor.reserve(size);
878 
879         // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
880         // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
881         // stepNum is the number of those steps/blocks in the tensor
882         for (uint32_t step = 0; step < stepNum; ++step)
883         {
884             //scale = (channelMultiplier * step + mult) % scaleSize;
885             for (uint32_t i = 0; i < stepSize; ++i)
886             {
887                 unsigned int index = step * stepSize + i;
888                 this->operator[](index);
889                 decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[step], 0));
890             }
891         }
892         return decodedTensor;
893     }
894 
895 private:
896     std::vector<float> m_Scales;
897 };
898 
899 } // namespace armnn
900