• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NetworkQuantizerUtils.hpp"
7 
8 #include <algorithm>
9 #include <cmath>
10 #include <stdint.h>
11 
12 namespace armnn
13 {
14 
CreateQuantizedConst(const ConstTensor & tensor,std::vector<uint8_t> & backing)15 ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing)
16 {
17     float scale = 0.0f;
18     int offset = 0;
19 
20     // Reserve the backing memory
21     backing.resize(tensor.GetInfo().GetNumElements());
22 
23     DataType type = tensor.GetInfo().GetDataType();
24     switch(type)
25     {
26         case DataType::Float32:
27         {
28             QuantizeConstant(static_cast<const float*>(tensor.GetMemoryArea()),
29                              backing.data(),
30                              backing.size(),
31                              scale,
32                              offset);
33         }
34             break;
35         default:
36             ARMNN_ASSERT_MSG(false, "Can't quantize unsupported data type");
37     }
38 
39     TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
40     return ConstTensor(qInfo, backing);
41 }
42 
43 } // namespace armnn
44