1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "FakeQuantizationTestImpl.hpp"
7
8
9 #include <backendsCommon/CpuTensorHandle.hpp>
10
11 #include <backendsCommon/test/TensorCopyUtils.hpp>
12 #include <backendsCommon/test/WorkloadTestUtils.hpp>
13
14 #include <test/TensorHelpers.hpp>
15
FakeQuantizationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)16 LayerTestResult<float, 2> FakeQuantizationTest(
17 armnn::IWorkloadFactory& workloadFactory,
18 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
19 const armnn::ITensorHandleFactory& tensorHandleFactory)
20 {
21 IgnoreUnused(memoryManager);
22 constexpr unsigned int width = 2;
23 constexpr unsigned int height = 3;
24
25 const armnn::TensorInfo tensorInfo({height, width },
26 armnn::DataType::Float32);
27
28 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
29 -10.0f, -5.0f,
30 0.0f, 5.0f,
31 10.0f, 10.0f
32 }));
33
34 LayerTestResult<float, 2> ret(tensorInfo);
35
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
38
39 armnn::FakeQuantizationQueueDescriptor data;
40 armnn::WorkloadInfo info;
41
42 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
44
45 float min = -10.f;
46 float max = 10.f;
47
48 data.m_Parameters.m_Min = min;
49 data.m_Parameters.m_Max = max;
50
51 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
52 armnn::FakeQuantizationQueueDescriptor refData = data;
53 armnn::WorkloadInfo refInfo = info;
54 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
57
58 inputHandle->Allocate();
59 outputHandle->Allocate();
60
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
62
63 workload->PostAllocationConfigure();
64 workload->Execute();
65
66 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
67
68 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
69 0.0f, 63.0f,
70 128.0f, 191.0f,
71 255.0f, 255.0f
72 }));
73
74 return ret;
75 }
76