• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "BaseIterator.hpp"
9 
10 #include <armnnUtils/FloatingPointConverter.hpp>
11 #include <armnnUtils/TensorUtils.hpp>
12 
13 #include <armnn/utility/Assert.hpp>
14 
15 namespace armnn
16 {
17 
18 namespace
19 {
20 
MakeSigned32PerAxisDecoder(const TensorInfo & info,const void * data)21 inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(const TensorInfo& info, const void* data)
22 {
23     auto params = armnnUtils::GetPerAxisParams(info);
24     return std::make_unique<ScaledInt32PerAxisDecoder>(
25         static_cast<const int32_t*>(data),
26         params.second,
27         params.first);
28 }
29 
MakeSigned32Decoder(const TensorInfo & info,const void * data)30 inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(const TensorInfo& info, const void* data)
31 {
32     if(info.HasMultipleQuantizationScales())
33     {
34         // NOTE: If we have multiple quantization scales, we create a ScaledInt32PerAxisDecoder.
35         // This will be used to decode per-axis quantized convolution biases.
36         return MakeSigned32PerAxisDecoder(info, data);
37     }
38     else
39     {
40         if (info.GetQuantizationDim().has_value())
41         {
42             // NOTE: Even though we only have a single quantization scale, if the quantization
43             // dimension is set, the tensor has per-axis quantization and we need to create a
44             // ScaledInt32PerAxisDecoder
45             return MakeSigned32PerAxisDecoder(info, data);
46         }
47 
48         const float scale = info.GetQuantizationScale();
49         if (scale == 0.f)
50         {
51             // NOTE:: If no quantization scale is set, we create an Int32Decoder, which simply
52             // casts the int value to float. This will be used for any INT32 data other than
53             // convolution biases.
54             return std::make_unique<Int32Decoder>(static_cast<const int32_t*>(data));
55         }
56 
57         // NOTE: If we only have a single (non-zero) quantization scale and no quantization
58         // dimension is specified, we need to create a ScaledInt32Decoder. This will be used
59         // to decode per-tensor quantized convolution biases.
60         return std::make_unique<ScaledInt32Decoder>(static_cast<const int32_t*>(data), scale);
61     }
62 }
63 
64 } // anonymous namespace
65 
66 template<typename T>
67 inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);
68 
69 template<>
MakeDecoder(const TensorInfo & info,const void * data)70 inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
71 {
72     switch(info.GetDataType())
73     {
74         ARMNN_NO_DEPRECATE_WARN_BEGIN
75         case armnn::DataType::QuantizedSymm8PerAxis:
76         {
77             std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
78             return std::make_unique<QSymm8PerAxisDecoder>(
79                 static_cast<const int8_t*>(data),
80                 params.second,
81                 params.first);
82         }
83         ARMNN_NO_DEPRECATE_WARN_END
84         case DataType::QAsymmS8:
85         {
86             return std::make_unique<QASymmS8Decoder>(
87                 static_cast<const int8_t*>(data),
88                 info.GetQuantizationScale(),
89                 info.GetQuantizationOffset());
90         }
91         case DataType::QAsymmU8:
92         {
93             return std::make_unique<QASymm8Decoder>(
94                 static_cast<const uint8_t*>(data),
95                 info.GetQuantizationScale(),
96                 info.GetQuantizationOffset());
97         }
98         case DataType::QSymmS16:
99         {
100             return std::make_unique<QSymm16Decoder>(
101                 static_cast<const int16_t*>(data),
102                 info.GetQuantizationScale(),
103                 info.GetQuantizationOffset());
104         }
105         case DataType::BFloat16:
106         {
107             return std::make_unique<BFloat16Decoder>(static_cast<const BFloat16*>(data));
108         }
109         case DataType::Float16:
110         {
111             return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
112         }
113         case DataType::Float32:
114         {
115             return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
116         }
117         case DataType::Signed32:
118         {
119             return MakeSigned32Decoder(info, data);
120         }
121         case DataType::QSymmS8:
122         {
123             if (info.HasPerAxisQuantization())
124             {
125                 std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
126                 return std::make_unique<QSymm8PerAxisDecoder>(
127                     static_cast<const int8_t*>(data),
128                     params.second,
129                     params.first);
130             }
131             else
132             {
133                 return std::make_unique<QSymmS8Decoder>(
134                     static_cast<const int8_t*>(data),
135                     info.GetQuantizationScale(),
136                     info.GetQuantizationOffset());
137             }
138         }
139         case armnn::DataType::Boolean:
140         {
141             return std::make_unique<BooleanDecoder>(static_cast<const uint8_t*>(data));
142         }
143         default:
144         {
145             ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
146             break;
147         }
148     }
149     return nullptr;
150 }
151 
152 template<>
MakeDecoder(const TensorInfo & info,const void * data)153 inline std::unique_ptr<Decoder<bool>> MakeDecoder(const TensorInfo& info, const void* data)
154 {
155     switch(info.GetDataType())
156     {
157         case DataType::Boolean:
158         {
159             return std::make_unique<BooleanDecoderBool>(static_cast<const uint8_t*>(data));
160         }
161         default:
162         {
163             ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
164             break;
165         }
166     }
167     return nullptr;
168 }
169 
170 template<>
MakeDecoder(const TensorInfo & info,const void * data)171 inline std::unique_ptr<Decoder<int32_t>> MakeDecoder(const TensorInfo& info, const void* data)
172 {
173     switch(info.GetDataType())
174     {
175         case DataType::Signed32:
176         {
177             return std::make_unique<Int32ToInt32tDecoder>(static_cast<const int32_t*>(data));
178         }
179         default:
180         {
181             ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
182             break;
183         }
184     }
185     return nullptr;
186 }
187 
188 } //namespace armnn
189