• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefQuantizeWorkload.hpp"
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include <armnn/TypesUtils.hpp>
11 
12 
13 namespace armnn
14 {
15 
16 namespace
17 {
18 
QuantizeImpl(Decoder<float> & in,Encoder<float> & out,size_t numValues)19 void QuantizeImpl(Decoder<float>& in, Encoder<float>& out, size_t numValues)
20 {
21     for (unsigned int i = 0; i < numValues; i++)
22     {
23         in[i];
24         out[i];
25         out.Set(in.Get());
26     }
27 }
28 
29 } //namespace
30 
RefQuantizeWorkload(const QuantizeQueueDescriptor & descriptor,const WorkloadInfo & info)31 RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info)
32     : BaseWorkload(descriptor, info)
33     , m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
34 {
35 }
36 
PostAllocationConfigure()37 void RefQuantizeWorkload::PostAllocationConfigure()
38 {
39     const TensorInfo& inputInfo = armnn::GetTensorInfo(m_Data.m_Inputs[0]);
40     m_InputDecoder = MakeDecoder<float>(inputInfo);
41 
42     const TensorInfo& outputInfo = armnn::GetTensorInfo(m_Data.m_Outputs[0]);
43     m_OutputEncoder = MakeEncoder<float>(outputInfo);
44 }
45 
Execute() const46 void RefQuantizeWorkload::Execute() const
47 {
48     m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
49     m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
50 
51     QuantizeImpl(*m_InputDecoder, *m_OutputEncoder, m_NumElements);
52 }
53 
54 } //namespace armnn