1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <test/CreateWorkload.hpp>
8
9 #include <armnn/utility/PolymorphicDowncast.hpp>
10 #include <backendsCommon/MemCopyWorkload.hpp>
11 #include <reference/RefWorkloadFactory.hpp>
12 #include <reference/RefTensorHandle.hpp>
13
14 #if defined(ARMCOMPUTECL_ENABLED)
15 #include <cl/ClTensorHandle.hpp>
16 #endif
17
18 #if defined(ARMCOMPUTENEON_ENABLED)
19 #include <neon/NeonTensorHandle.hpp>
20 #endif
21
22 using namespace armnn;
23
24 namespace
25 {
26
27 using namespace std;
28
29 template<typename IComputeTensorHandle>
CompareTensorHandleShape(IComputeTensorHandle * tensorHandle,std::initializer_list<unsigned int> expectedDimensions)30 boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
31 std::initializer_list<unsigned int> expectedDimensions)
32 {
33 arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
34
35 auto infoNumDims = info->num_dimensions();
36 auto numExpectedDims = expectedDimensions.size();
37 if (infoNumDims != numExpectedDims)
38 {
39 boost::test_tools::predicate_result res(false);
40 res.message() << "Different number of dimensions [" << info->num_dimensions()
41 << "!=" << expectedDimensions.size() << "]";
42 return res;
43 }
44
45 size_t i = info->num_dimensions() - 1;
46
47 for (unsigned int expectedDimension : expectedDimensions)
48 {
49 if (info->dimension(i) != expectedDimension)
50 {
51 boost::test_tools::predicate_result res(false);
52 res.message() << "For dimension " << i <<
53 " expected size " << expectedDimension <<
54 " got " << info->dimension(i);
55 return res;
56 }
57
58 i--;
59 }
60
61 return true;
62 }
63
64 template<typename IComputeTensorHandle>
CreateMemCopyWorkloads(IWorkloadFactory & factory)65 void CreateMemCopyWorkloads(IWorkloadFactory& factory)
66 {
67 TensorHandleFactoryRegistry registry;
68 Graph graph;
69 RefWorkloadFactory refFactory;
70
71 // Creates the layers we're testing.
72 Layer* const layer1 = graph.AddLayer<MemCopyLayer>("layer1");
73 Layer* const layer2 = graph.AddLayer<MemCopyLayer>("layer2");
74
75 // Creates extra layers.
76 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
77 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
78
79 // Connects up.
80 TensorInfo tensorInfo({2, 3}, DataType::Float32);
81 Connect(input, layer1, tensorInfo);
82 Connect(layer1, layer2, tensorInfo);
83 Connect(layer2, output, tensorInfo);
84
85 input->CreateTensorHandles(registry, refFactory);
86 layer1->CreateTensorHandles(registry, factory);
87 layer2->CreateTensorHandles(registry, refFactory);
88 output->CreateTensorHandles(registry, refFactory);
89
90 // make the workloads and check them
91 auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
92 auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
93
94 MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
95 BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
96 BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
97 auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
98 auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
99 BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
100 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
101
102
103 MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
104 BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
105 BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
106 auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
107 auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
108 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
109 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
110 }
111
112 } //namespace
113