• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConvertFp32ToFp16TestImpl.hpp"
7 
8 
9 #include <backendsCommon/test/TensorCopyUtils.hpp>
10 #include <backendsCommon/test/WorkloadTestUtils.hpp>
11 
12 #include <test/TensorHelpers.hpp>
13 
SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)14 LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
15     armnn::IWorkloadFactory& workloadFactory,
16     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
17     const armnn::ITensorHandleFactory& tensorHandleFactory)
18 {
19     IgnoreUnused(memoryManager);
20     using namespace half_float::literal;
21 
22     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
23     const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
24 
25     auto input = MakeTensor<float, 4>(inputTensorInfo,
26         { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
27           1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
28 
29     LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
30     ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
31         { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
32           1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
33 
34     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
35     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
36 
37     armnn::ConvertFp32ToFp16QueueDescriptor data;
38     armnn::WorkloadInfo info;
39     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
40     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
41 
42     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
43 
44     inputHandle->Allocate();
45     outputHandle->Allocate();
46 
47     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
48 
49     workload->Execute();
50 
51     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
52 
53     return ret;
54 }
55