• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConvertBf16ToFp32TestImpl.hpp"
7 
8 #include <backendsCommon/test/TensorCopyUtils.hpp>
9 #include <backendsCommon/test/WorkloadTestUtils.hpp>
10 
11 #include <test/TensorHelpers.hpp>
12 
ConvertBf16ToFp32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)13 LayerTestResult<float, 4> ConvertBf16ToFp32Test(
14     armnn::IWorkloadFactory& workloadFactory,
15     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
16     const armnn::ITensorHandleFactory& tensorHandleFactory)
17 {
18     IgnoreUnused(memoryManager);
19 
20     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16);
21     const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
22 
23     std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
24         {
25             -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
26           1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
27         },
28         1.0f, 0);
29 
30     auto input = MakeTensor<armnn::BFloat16, 4>(inputTensorInfo, std::vector<armnn::BFloat16>(inputValues));
31 
32     LayerTestResult<float, 4> ret(outputTensorInfo);
33     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
34         { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
35           1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
36 
37     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
39 
40     armnn::ConvertBf16ToFp32QueueDescriptor data;
41     armnn::WorkloadInfo info;
42     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
43     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
44 
45     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info);
46 
47     inputHandle->Allocate();
48     outputHandle->Allocate();
49 
50     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
51 
52     workload->Execute();
53 
54     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
55 
56     return ret;
57 }
58