• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/utility/Assert.hpp>
10 #include <armnn/utility/IgnoreUnused.hpp>
11 #include <armnnUtils/FloatingPointConverter.hpp>
12 
13 #include <ResolveType.hpp>
14 
15 namespace armnn
16 {
17 
18 class BaseIterator
19 {
20 public:
BaseIterator()21     BaseIterator() {}
22 
~BaseIterator()23     virtual ~BaseIterator() {}
24 
25     virtual BaseIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) = 0;
26 
27     virtual BaseIterator& operator++() = 0;
28 
29     virtual BaseIterator& operator+=(const unsigned int increment) = 0;
30 
31     virtual BaseIterator& operator-=(const unsigned int increment) = 0;
32 
33     virtual BaseIterator& operator[](const unsigned int index) = 0;
34 };
35 
36 template<typename IType>
37 class Decoder : public BaseIterator
38 {
39 public:
Decoder()40     Decoder() {}
41 
~Decoder()42     virtual ~Decoder() {}
43 
44     virtual void Reset(void*) = 0;
45 
46     virtual IType Get() const = 0;
47 
48     virtual std::vector<float>
49     DecodeTensor(const TensorShape &tensorShape,
50                  const unsigned int channelMultiplier = 1,
51                  bool isDepthwise = false) = 0;
52 };
53 
54 template<typename IType>
55 class Encoder : public BaseIterator
56 {
57 public:
Encoder()58     Encoder() {}
59 
~Encoder()60     virtual ~Encoder() {}
61 
62     virtual void Reset(void*) = 0;
63 
64     virtual void Set(IType right) = 0;
65 
66     virtual IType Get() const = 0;
67 };
68 
69 template<typename T, typename Base>
70 class TypedIterator : public Base
71 {
72 public:
TypedIterator(T * data=nullptr)73     TypedIterator(T* data = nullptr)
74         : m_Iterator(data), m_Start(data)
75     {}
76 
Reset(void * data)77     void Reset(void* data) override
78     {
79         m_Iterator = reinterpret_cast<T*>(data);
80         m_Start = m_Iterator;
81     }
82 
operator ++()83     TypedIterator& operator++() override
84     {
85         ARMNN_ASSERT(m_Iterator);
86         ++m_Iterator;
87         return *this;
88     }
89 
operator +=(const unsigned int increment)90     TypedIterator& operator+=(const unsigned int increment) override
91     {
92         ARMNN_ASSERT(m_Iterator);
93         m_Iterator += increment;
94         return *this;
95     }
96 
operator -=(const unsigned int increment)97     TypedIterator& operator-=(const unsigned int increment) override
98     {
99         ARMNN_ASSERT(m_Iterator);
100         m_Iterator -= increment;
101         return *this;
102     }
103 
operator [](const unsigned int index)104     TypedIterator& operator[](const unsigned int index) override
105     {
106         ARMNN_ASSERT(m_Iterator);
107         m_Iterator = m_Start + index;
108         return *this;
109     }
110 
SetIndex(unsigned int index,unsigned int axisIndex=0)111     TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
112     {
113         IgnoreUnused(axisIndex);
114         ARMNN_ASSERT(m_Iterator);
115         m_Iterator = m_Start + index;
116         return *this;
117     }
118 
119 protected:
120     T* m_Iterator;
121     T* m_Start;
122 };
123 
124 class QASymm8Decoder : public TypedIterator<const uint8_t, Decoder<float>>
125 {
126 public:
QASymm8Decoder(const uint8_t * data,const float scale,const int32_t offset)127     QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset)
128         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
129 
QASymm8Decoder(const float scale,const int32_t offset)130     QASymm8Decoder(const float scale, const int32_t offset)
131         : QASymm8Decoder(nullptr, scale, offset) {}
132 
Get() const133     float Get() const override
134     {
135         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
136     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)137     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
138                                      const unsigned int channelMultiplier,
139                                      const bool isDepthwise) override
140     {
141         IgnoreUnused(channelMultiplier, isDepthwise);
142 
143         const unsigned int size = tensorShape.GetNumElements();
144         std::vector<float> decodedTensor;
145         decodedTensor.reserve(size);
146 
147         for (uint32_t i = 0; i < size; ++i)
148         {
149             this->operator[](i);
150             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
151         }
152 
153         return decodedTensor;
154     }
155 
156 private:
157 
158     const float m_Scale;
159     const int32_t m_Offset;
160 };
161 
162 class QASymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
163 {
164 public:
QASymmS8Decoder(const int8_t * data,const float scale,const int32_t offset)165     QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
166         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
167 
QASymmS8Decoder(const float scale,const int32_t offset)168     QASymmS8Decoder(const float scale, const int32_t offset)
169         : QASymmS8Decoder(nullptr, scale, offset) {}
170 
Get() const171     float Get() const override
172     {
173         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
174     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)175     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
176                                      const unsigned int channelMultiplier,
177                                      const bool isDepthwise) override
178     {
179         IgnoreUnused(channelMultiplier, isDepthwise);
180 
181         const unsigned int size = tensorShape.GetNumElements();
182         std::vector<float> decodedTensor;
183         decodedTensor.reserve(size);
184 
185         for (uint32_t i = 0; i < size; ++i)
186         {
187             this->operator[](i);
188             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
189         }
190 
191         return decodedTensor;
192     }
193 
194 private:
195     const float m_Scale;
196     const int32_t m_Offset;
197 
198 };
199 
200 class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
201 {
202 public:
QSymmS8Decoder(const int8_t * data,const float scale,const int32_t offset)203     QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
204             : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
205 
QSymmS8Decoder(const float scale,const int32_t offset)206     QSymmS8Decoder(const float scale, const int32_t offset)
207             : QSymmS8Decoder(nullptr, scale, offset) {}
208 
Get() const209     float Get() const override
210     {
211         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
212     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)213     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
214                                      const unsigned int channelMultiplier,
215                                      const bool isDepthwise) override
216     {
217         IgnoreUnused(channelMultiplier, isDepthwise);
218 
219         const unsigned int size = tensorShape.GetNumElements();
220         std::vector<float> decodedTensor;
221         decodedTensor.reserve(size);
222 
223         for (uint32_t i = 0; i < size; ++i)
224         {
225             this->operator[](i);
226             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
227         }
228 
229         return decodedTensor;
230     }
231 
232 private:
233     const float m_Scale;
234     const int32_t m_Offset;
235 
236 };
237 
238 class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>>
239 {
240 public:
QSymm16Decoder(const int16_t * data,const float scale,const int32_t offset)241     QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
242         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
243 
QSymm16Decoder(const float scale,const int32_t offset)244     QSymm16Decoder(const float scale, const int32_t offset)
245         : QSymm16Decoder(nullptr, scale, offset) {}
246 
Get() const247     float Get() const override
248     {
249         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
250     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)251     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
252                                      const unsigned int channelMultiplier,
253                                      const bool isDepthwise) override
254     {
255         IgnoreUnused(channelMultiplier, isDepthwise);
256 
257         const unsigned int size = tensorShape.GetNumElements();
258         std::vector<float> decodedTensor;
259         decodedTensor.reserve(size);
260 
261         for (uint32_t i = 0; i < size; ++i)
262         {
263             this->operator[](i);
264             decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
265         }
266 
267         return decodedTensor;
268     }
269 
270 private:
271     const float m_Scale;
272     const int32_t m_Offset;
273 
274 };
275 
276 class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>>
277 {
278 public:
BFloat16Decoder(const BFloat16 * data)279     BFloat16Decoder(const BFloat16* data)
280         : TypedIterator(data) {}
281 
BFloat16Decoder()282     BFloat16Decoder()
283         : BFloat16Decoder(nullptr) {}
284 
Get() const285     float Get() const override
286     {
287         float val = 0.f;
288         armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
289         return val;
290     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)291     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
292                                      const unsigned int channelMultiplier,
293                                      const bool isDepthwise) override
294     {
295         IgnoreUnused(channelMultiplier, isDepthwise);
296 
297         const unsigned int size = tensorShape.GetNumElements();
298         std::vector<float> decodedTensor;
299         decodedTensor.reserve(size);
300 
301         for (uint32_t i = 0; i < size; ++i)
302         {
303             this->operator[](i);
304 
305             float val = 0.f;
306             armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
307             decodedTensor.emplace_back(val);
308         }
309 
310         return decodedTensor;
311     }
312 
313 };
314 
315 class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
316 {
317 public:
Float16Decoder(const Half * data)318     Float16Decoder(const Half* data)
319         : TypedIterator(data) {}
320 
Float16Decoder()321     Float16Decoder()
322         : Float16Decoder(nullptr) {}
323 
Get() const324     float Get() const override
325     {
326         float val = 0.f;
327         armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
328         return val;
329     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)330     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
331                                      const unsigned int channelMultiplier,
332                                      const bool isDepthwise) override
333     {
334         IgnoreUnused(channelMultiplier, isDepthwise);
335 
336         const unsigned int size = tensorShape.GetNumElements();
337         std::vector<float> decodedTensor;
338         decodedTensor.reserve(size);
339 
340         for (uint32_t i = 0; i < size; ++i)
341         {
342             float val = 0.f;
343             this->operator[](i);
344             armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
345             decodedTensor.emplace_back(val);
346         }
347 
348         return decodedTensor;
349     }
350 
351 
352 };
353 
354 class Float32Decoder : public TypedIterator<const float, Decoder<float>>
355 {
356 public:
Float32Decoder(const float * data)357     Float32Decoder(const float* data)
358         : TypedIterator(data) {}
359 
Float32Decoder()360     Float32Decoder()
361         : Float32Decoder(nullptr) {}
362 
Get() const363     float Get() const override
364     {
365         return *m_Iterator;
366     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)367     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
368                                      const unsigned int channelMultiplier,
369                                      const bool isDepthwise) override
370     {
371         IgnoreUnused(channelMultiplier, isDepthwise);
372         const unsigned int size = tensorShape.GetNumElements();
373         std::vector<float> decodedTensor;
374 
375         decodedTensor.reserve(size);
376         decodedTensor.assign(m_Start, m_Start + size);
377 
378         return decodedTensor;
379     }
380 };
381 
382 class ScaledInt32Decoder : public TypedIterator<const int32_t, Decoder<float>>
383 {
384 public:
ScaledInt32Decoder(const int32_t * data,const float scale)385     ScaledInt32Decoder(const int32_t* data, const float scale)
386         : TypedIterator(data), m_Scale(scale) {}
387 
ScaledInt32Decoder(const float scale)388     ScaledInt32Decoder(const float scale)
389         : ScaledInt32Decoder(nullptr, scale) {}
390 
Get() const391     float Get() const override
392     {
393         return static_cast<float>(*m_Iterator) * m_Scale;
394     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)395     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
396                                      const unsigned int channelMultiplier,
397                                      const bool isDepthwise) override
398     {
399         IgnoreUnused(channelMultiplier, isDepthwise);
400 
401         const unsigned int size = tensorShape.GetNumElements();
402         std::vector<float> decodedTensor;
403         decodedTensor.reserve(size);
404 
405         for (uint32_t i = 0; i < size; ++i)
406         {
407             this->operator[](i);
408             decodedTensor.emplace_back(static_cast<float>(*m_Iterator) * m_Scale);
409         }
410 
411         return decodedTensor;
412     }
413 
414 private:
415     const float m_Scale;
416 
417 };
418 
419 class Int32Decoder : public TypedIterator<const int32_t, Decoder<float>>
420 {
421 public:
Int32Decoder(const int32_t * data)422     Int32Decoder(const int32_t* data)
423         : TypedIterator(data) {}
424 
Int32Decoder()425     Int32Decoder()
426         : Int32Decoder(nullptr) {}
427 
Get() const428     float Get() const override
429     {
430         return static_cast<float>(*m_Iterator);
431     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)432     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
433                                      const unsigned int channelMultiplier,
434                                      const bool isDepthwise) override
435     {
436         IgnoreUnused(channelMultiplier, isDepthwise);
437 
438         const unsigned int size = tensorShape.GetNumElements();
439         std::vector<float> decodedTensor;
440         decodedTensor.reserve(size);
441 
442         for (uint32_t i = 0; i < size; ++i)
443         {
444             this->operator[](i);
445             decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
446         }
447 
448         return decodedTensor;
449     }
450 };
451 
452 class Int32ToInt32tDecoder : public TypedIterator<const int32_t, Decoder<int32_t>>
453 {
454 public:
Int32ToInt32tDecoder(const int32_t * data)455     Int32ToInt32tDecoder(const int32_t* data)
456             : TypedIterator(data){}
457 
Int32ToInt32tDecoder()458     Int32ToInt32tDecoder()
459             : Int32ToInt32tDecoder(nullptr) {}
460 
Get() const461     int32_t Get() const override
462     {
463         return *m_Iterator;
464     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)465     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
466                                      const unsigned int channelMultiplier,
467                                      const bool isDepthwise) override
468     {
469         IgnoreUnused(channelMultiplier, isDepthwise);
470 
471         const unsigned int size = tensorShape.GetNumElements();
472         std::vector<float> decodedTensor;
473         decodedTensor.reserve(size);
474 
475         for (uint32_t i = 0; i < size; ++i)
476         {
477             this->operator[](i);
478             decodedTensor.emplace_back(*m_Iterator);
479         }
480 
481         return decodedTensor;
482     }
483 };
484 
485 class BooleanDecoder : public TypedIterator<const uint8_t, Decoder<float>>
486 {
487 public:
BooleanDecoder(const uint8_t * data)488     BooleanDecoder(const uint8_t* data)
489             : TypedIterator(data) {}
490 
BooleanDecoder()491     BooleanDecoder()
492             : BooleanDecoder(nullptr) {}
493 
Get() const494     float Get() const override
495     {
496         return *m_Iterator;
497     }
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)498     std::vector<float> DecodeTensor (const TensorShape& tensorShape,
499                                      const unsigned int channelMultiplier,
500                                      const bool isDepthwise) override
501     {
502         IgnoreUnused(channelMultiplier, isDepthwise);
503 
504         const unsigned int size = tensorShape.GetNumElements();
505         std::vector<float> decodedTensor;
506         decodedTensor.reserve(size);
507 
508         for (uint32_t i = 0; i < size; ++i)
509         {
510             this->operator[](i);
511             decodedTensor.emplace_back(*m_Iterator);
512         }
513 
514         return decodedTensor;
515     }
516 };
517 
518 class BooleanDecoderBool : public TypedIterator<const uint8_t, Decoder<bool>>
519 {
520 public:
BooleanDecoderBool(const uint8_t * data)521     BooleanDecoderBool(const uint8_t* data)
522         : TypedIterator(data) {}
523 
BooleanDecoderBool()524     BooleanDecoderBool()
525         : BooleanDecoderBool(nullptr) {}
526 
Get() const527     bool Get() const override
528     {
529         return *m_Iterator;
530     }
531 
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,const bool isDepthwise)532     std::vector<float> DecodeTensor(const TensorShape& tensorShape,
533                                     const unsigned int channelMultiplier,
534                                     const bool isDepthwise) override
535     {
536         IgnoreUnused(channelMultiplier, isDepthwise);
537 
538         const unsigned int size = tensorShape.GetNumElements();
539         std::vector<float> decodedTensor;
540         decodedTensor.reserve(size);
541 
542         for (uint32_t i = 0; i < size; ++i)
543         {
544             this->operator[](i);
545             decodedTensor.emplace_back(*m_Iterator);
546         }
547 
548         return decodedTensor;
549     }
550 };
551 
552 class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
553 {
554 public:
QASymm8Encoder(uint8_t * data,const float scale,const int32_t offset)555     QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset)
556         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
557 
QASymm8Encoder(const float scale,const int32_t offset)558     QASymm8Encoder(const float scale, const int32_t offset)
559         : QASymm8Encoder(nullptr, scale, offset) {}
560 
Set(float right)561     void Set(float right) override
562     {
563         *m_Iterator = armnn::Quantize<uint8_t>(right, m_Scale, m_Offset);
564     }
565 
Get() const566     float Get() const override
567     {
568         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
569     }
570 
571 private:
572     const float m_Scale;
573     const int32_t m_Offset;
574 };
575 
576 class QASymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
577 {
578 public:
QASymmS8Encoder(int8_t * data,const float scale,const int32_t offset)579     QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
580         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
581 
QASymmS8Encoder(const float scale,const int32_t offset)582     QASymmS8Encoder(const float scale, const int32_t offset)
583         : QASymmS8Encoder(nullptr, scale, offset) {}
584 
Set(float right)585     void Set(float right) override
586     {
587         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
588     }
589 
Get() const590     float Get() const override
591     {
592         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
593     }
594 
595 private:
596     const float m_Scale;
597     const int32_t m_Offset;
598 };
599 
600 class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
601 {
602 public:
QSymmS8Encoder(int8_t * data,const float scale,const int32_t offset)603     QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
604             : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
605 
QSymmS8Encoder(const float scale,const int32_t offset)606     QSymmS8Encoder(const float scale, const int32_t offset)
607             : QSymmS8Encoder(nullptr, scale, offset) {}
608 
Set(float right)609     void Set(float right) override
610     {
611         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
612     }
613 
Get() const614     float Get() const override
615     {
616         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
617     }
618 
619 private:
620     const float m_Scale;
621     const int32_t m_Offset;
622 };
623 
624 class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>>
625 {
626 public:
QSymm16Encoder(int16_t * data,const float scale,const int32_t offset)627     QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
628         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
629 
QSymm16Encoder(const float scale,const int32_t offset)630     QSymm16Encoder(const float scale, const int32_t offset)
631         : QSymm16Encoder(nullptr, scale, offset) {}
632 
Set(float right)633     void Set(float right) override
634     {
635         *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
636     }
637 
Get() const638     float Get() const override
639     {
640         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
641     }
642 
643 private:
644     const float m_Scale;
645     const int32_t m_Offset;
646 };
647 
648 class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>>
649 {
650 public:
BFloat16Encoder(armnn::BFloat16 * data)651     BFloat16Encoder(armnn::BFloat16* data)
652         : TypedIterator(data) {}
653 
BFloat16Encoder()654     BFloat16Encoder()
655         : BFloat16Encoder(nullptr) {}
656 
Set(float right)657     void Set(float right) override
658     {
659         armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(&right, 1, m_Iterator);
660     }
661 
Get() const662     float Get() const override
663     {
664         float val = 0.f;
665         armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
666         return val;
667     }
668 };
669 
670 class Float16Encoder : public TypedIterator<Half, Encoder<float>>
671 {
672 public:
Float16Encoder(Half * data)673     Float16Encoder(Half* data)
674         : TypedIterator(data) {}
675 
Float16Encoder()676     Float16Encoder()
677         : Float16Encoder(nullptr) {}
678 
Set(float right)679     void Set(float right) override
680     {
681         armnnUtils::FloatingPointConverter::ConvertFloat32To16(&right, 1, m_Iterator);
682     }
683 
Get() const684     float Get() const override
685     {
686         float val = 0.f;
687         armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
688         return val;
689     }
690 };
691 
692 class Float32Encoder : public TypedIterator<float, Encoder<float>>
693 {
694 public:
Float32Encoder(float * data)695     Float32Encoder(float* data)
696         : TypedIterator(data) {}
697 
Float32Encoder()698     Float32Encoder()
699         : Float32Encoder(nullptr) {}
700 
Set(float right)701     void Set(float right) override
702     {
703         *m_Iterator = right;
704     }
705 
Get() const706     float Get() const override
707     {
708         return *m_Iterator;
709     }
710 };
711 
712 class Int32Encoder : public TypedIterator<int32_t, Encoder<float>>
713 {
714 public:
Int32Encoder(int32_t * data)715     Int32Encoder(int32_t* data)
716         : TypedIterator(data) {}
717 
Int32Encoder()718     Int32Encoder()
719         : Int32Encoder(nullptr) {}
720 
Set(float right)721     void Set(float right) override
722     {
723         *m_Iterator = static_cast<int32_t>(right);
724     }
725 
Get() const726     float Get() const override
727     {
728         return static_cast<float>(*m_Iterator);
729     }
730 };
731 
732 class Int32ToInt32tEncoder : public TypedIterator<int32_t, Encoder<int32_t>>
733 {
734 public:
Int32ToInt32tEncoder(int32_t * data)735     Int32ToInt32tEncoder(int32_t* data)
736         : TypedIterator(data){}
737 
Int32ToInt32tEncoder()738     Int32ToInt32tEncoder()
739         : Int32ToInt32tEncoder(nullptr) {}
740 
Set(int32_t right)741     void Set(int32_t right) override
742     {
743         *m_Iterator = right;
744     }
745 
Get() const746     int32_t Get() const override
747     {
748         return *m_Iterator;
749     }
750 };
751 
752 class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
753 {
754 public:
BooleanEncoder(uint8_t * data)755     BooleanEncoder(uint8_t* data)
756         : TypedIterator(data) {}
757 
BooleanEncoder()758     BooleanEncoder()
759         : BooleanEncoder(nullptr) {}
760 
Set(bool right)761     void Set(bool right) override
762     {
763         *m_Iterator = right;
764     }
765 
Get() const766     bool Get() const override
767     {
768         return *m_Iterator;
769     }
770 };
771 
772 // PerAxisIterator for per-axis quantization
773 template<typename T, typename Base>
774 class PerAxisIterator : public Base
775 {
776 public:
777     // axisFactor is used to calculate channelStep
PerAxisIterator(T * data=nullptr,unsigned int axisFactor=0)778     PerAxisIterator(T* data = nullptr, unsigned int axisFactor = 0)
779         : m_Iterator(data), m_Start(data), m_AxisIndex(0), m_AxisFactor(axisFactor)
780     {}
781 
782     // This should be called to set index for per-axis Encoder/Decoder
SetIndex(unsigned int index,unsigned int axisIndex)783     PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
784     {
785          ARMNN_ASSERT(m_Iterator);
786          m_Iterator = m_Start + index;
787          m_AxisIndex = axisIndex;
788          return *this;
789     }
790 
Reset(void * data)791     void Reset(void* data) override
792     {
793         m_Iterator = reinterpret_cast<T*>(data);
794         m_Start = m_Iterator;
795         m_AxisIndex = 0;
796     }
797 
operator ++()798     PerAxisIterator& operator++() override
799     {
800         ARMNN_ASSERT(m_Iterator);
801         ++m_Iterator;
802         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
803         return *this;
804     }
805 
operator +=(const unsigned int increment)806     PerAxisIterator& operator+=(const unsigned int increment) override
807     {
808         ARMNN_ASSERT(m_Iterator);
809         m_Iterator += increment;
810         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
811         return *this;
812     }
813 
operator -=(const unsigned int decrement)814     PerAxisIterator& operator-=(const unsigned int decrement) override
815     {
816         ARMNN_ASSERT(m_Iterator);
817         m_Iterator -= decrement;
818         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
819         return *this;
820     }
821 
operator [](const unsigned int index)822     PerAxisIterator& operator[](const unsigned int index) override
823     {
824         ARMNN_ASSERT(m_Iterator);
825         m_Iterator = m_Start + index;
826         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
827         return *this;
828     }
829 
830     protected:
831         T* m_Iterator;
832         T* m_Start;
833         unsigned int m_AxisIndex;
834         unsigned int m_AxisFactor;
835 };
836 
837 class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
838 {
839 public:
QSymm8PerAxisDecoder(const int8_t * data,const std::vector<float> & scale,unsigned int axisFactor)840     QSymm8PerAxisDecoder(const int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
841         : PerAxisIterator(data, axisFactor), m_Scales(scale) {}
842 
Get() const843     float Get() const override
844     {
845         return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
846     }
847 
848     // Get scale of the current value
GetScale() const849     float GetScale() const
850     {
851         return m_Scales[m_AxisIndex];
852     }
853 
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,bool isDepthwise)854     std::vector<float> DecodeTensor(const TensorShape &tensorShape,
855                                     const unsigned int channelMultiplier,
856                                     bool isDepthwise) override
857     {
858         const uint32_t size = tensorShape.GetNumElements();
859         const uint32_t scaleSize = static_cast<uint32_t>(m_Scales.size());
860 
861         const uint32_t stepSize = isDepthwise ?
862                                   tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
863 
864         const uint32_t stepNum = size / (stepSize * channelMultiplier);
865         uint32_t scale;
866 
867         std::vector<float> decodedTensor;
868         decodedTensor.reserve(size);
869 
870         // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
871         // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
872         // stepNum is the number of those steps/blocks in the tensor
873         for (uint32_t mult = 0; mult < channelMultiplier; ++mult)
874         {
875             for (uint32_t step = 0; step < stepNum; ++step)
876             {
877                 scale = (channelMultiplier * step + mult) % scaleSize;
878                 for (uint32_t i = 0; i < stepSize; ++i)
879                 {
880                     unsigned int index = mult * stepSize * channelMultiplier +
881                                          step * stepSize + i;
882                     this->operator[](index);
883                     decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[scale], 0));
884                 }
885             }
886         }
887         return decodedTensor;
888     }
889 
890 private:
891     std::vector<float> m_Scales;
892 };
893 
894 class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
895 {
896 public:
QSymm8PerAxisEncoder(int8_t * data,const std::vector<float> & scale,unsigned int axisFactor)897     QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
898         : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
899 
Set(float right)900     void Set(float right)
901     {
902         *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
903     }
904 
Get() const905     float Get() const
906     {
907         return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
908     }
909 
910     // Get scale of the current value
GetScale() const911     float GetScale() const
912     {
913         return m_Scale[m_AxisIndex];
914     }
915 
916 private:
917     std::vector<float> m_Scale;
918 };
919 
920 class ScaledInt32PerAxisDecoder : public PerAxisIterator<const int32_t, Decoder<float>>
921 {
922 public:
ScaledInt32PerAxisDecoder(const int32_t * data,const std::vector<float> & scales,unsigned int axisFactor)923     ScaledInt32PerAxisDecoder(const int32_t* data, const std::vector<float>& scales, unsigned int axisFactor)
924         : PerAxisIterator(data, axisFactor), m_Scales(scales) {}
925 
Get() const926     float Get() const override
927     {
928         return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
929     }
930 
931     // Get scale of the current value
GetScale() const932     float GetScale() const
933     {
934         return m_Scales[m_AxisIndex];
935     }
936 
DecodeTensor(const TensorShape & tensorShape,const unsigned int channelMultiplier,bool isDepthwise)937     std::vector<float> DecodeTensor(const TensorShape &tensorShape,
938                                     const unsigned int channelMultiplier,
939                                     bool isDepthwise) override
940     {
941         const uint32_t size = tensorShape.GetNumElements();
942         const uint32_t scaleSize = static_cast<uint32_t>(m_Scales.size());
943 
944         const uint32_t stepSize = isDepthwise ?
945                                   tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
946 
947         const uint32_t stepNum = size / (stepSize * channelMultiplier);
948         uint32_t scale;
949 
950         std::vector<float> decodedTensor;
951         decodedTensor.reserve(size);
952 
953         // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
954         // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
955         // stepNum is the number of those steps/blocks in the tensor
956         for (uint32_t mult = 0; mult < channelMultiplier; ++mult)
957         {
958             for (uint32_t step = 0; step < stepNum; ++step)
959             {
960                 scale = (channelMultiplier * step + mult) % scaleSize;
961                 for (uint32_t i = 0; i < stepSize; ++i)
962                 {
963                     unsigned int index = mult * stepSize * channelMultiplier +
964                                          step * stepSize + i;
965                     this->operator[](index);
966                     decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[scale], 0));
967                 }
968             }
969         }
970         return decodedTensor;
971     }
972 
973 private:
974     std::vector<float> m_Scales;
975 };
976 
977 } // namespace armnn
978