• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <ResolveType.hpp>
9 
10 
11 #include <armnn/backends/IBackendInternal.hpp>
12 #include <backendsCommon/WorkloadFactory.hpp>
13 
14 #include <backendsCommon/test/WorkloadTestUtils.hpp>
15 
16 #include <test/TensorHelpers.hpp>
17 
18 template<typename T>
SimplePermuteTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::PermuteDescriptor descriptor,armnn::TensorInfo inputTensorInfo,armnn::TensorInfo outputTensorInfo,const std::vector<T> & inputData,const std::vector<T> & outputExpectedData)19 LayerTestResult<T, 4> SimplePermuteTestImpl(
20         armnn::IWorkloadFactory& workloadFactory,
21         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
22         const armnn::ITensorHandleFactory& tensorHandleFactory,
23         armnn::PermuteDescriptor descriptor,
24         armnn::TensorInfo inputTensorInfo,
25         armnn::TensorInfo outputTensorInfo,
26         const std::vector<T>& inputData,
27         const std::vector<T>& outputExpectedData)
28 {
29     IgnoreUnused(memoryManager);
30     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
31 
32     LayerTestResult<T, 4> ret(outputTensorInfo);
33     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
34 
35     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37 
38     armnn::PermuteQueueDescriptor data;
39     data.m_Parameters = descriptor;
40     armnn::WorkloadInfo info;
41     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
42     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
43 
44     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
45 
46     inputHandle->Allocate();
47     outputHandle->Allocate();
48 
49     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
50 
51     workload->Execute();
52 
53     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
54 
55     return ret;
56 }
57 
58 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimplePermuteTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)59 LayerTestResult<T, 4> SimplePermuteTest(
60         armnn::IWorkloadFactory& workloadFactory,
61         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
62         const armnn::ITensorHandleFactory& tensorHandleFactory)
63 {
64     armnn::TensorInfo inputTensorInfo;
65     armnn::TensorInfo outputTensorInfo;
66 
67     unsigned int inputShape[] = { 1, 2, 2, 2 };
68     unsigned int outputShape[] = { 1, 2, 2, 2 };
69 
70     armnn::PermuteDescriptor descriptor;
71     descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
72 
73     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
74     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
75 
76     // Set quantization parameters if the requested type is a quantized type.
77     float qScale = 0.5f;
78     int32_t qOffset = 5;
79     if(armnn::IsQuantizedType<T>())
80     {
81         inputTensorInfo.SetQuantizationScale(qScale);
82         inputTensorInfo.SetQuantizationOffset(qOffset);
83         outputTensorInfo.SetQuantizationScale(qScale);
84         outputTensorInfo.SetQuantizationOffset(qOffset);
85     }
86 
87     std::vector<T> input = armnnUtils::QuantizedVector<T>(
88     {
89         1, 2,
90         3, 4,
91         5, 6,
92         7, 8
93     },
94     qScale, qOffset);
95 
96     std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
97     {
98         1, 5, 2, 6,
99         3, 7, 4, 8
100     },
101     qScale, qOffset);
102 
103     return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
104                                     descriptor, inputTensorInfo,
105                                     outputTensorInfo, input, outputExpected);
106 }
107 
108 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet1Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)109 LayerTestResult<T, 4> PermuteValueSet1Test(
110         armnn::IWorkloadFactory& workloadFactory,
111         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
112         const armnn::ITensorHandleFactory& tensorHandleFactory)
113 {
114     armnn::TensorInfo inputTensorInfo;
115     armnn::TensorInfo outputTensorInfo;
116 
117     unsigned int inputShape[]  = { 1, 2, 2, 3 };
118     unsigned int outputShape[] = { 1, 3, 2, 2 };
119 
120     armnn::PermuteDescriptor descriptor;
121     descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
122 
123     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
124     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
125 
126     // Set quantization parameters if the requested type is a quantized type.
127     float qScale = 0.5f;
128     int32_t qOffset = 5;
129     if(armnn::IsQuantizedType<T>())
130     {
131         inputTensorInfo.SetQuantizationScale(qScale);
132         inputTensorInfo.SetQuantizationOffset(qOffset);
133         outputTensorInfo.SetQuantizationScale(qScale);
134         outputTensorInfo.SetQuantizationOffset(qOffset);
135     }
136 
137     std::vector<T> input = armnnUtils::QuantizedVector<T>(
138     {
139          1,  2,  3,
140         11, 12, 13,
141         21, 22, 23,
142         31, 32, 33
143     },
144     qScale, qOffset);
145 
146     std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
147     {
148         1, 11, 21, 31,
149         2, 12, 22, 32,
150         3, 13, 23, 33
151     },
152     qScale, qOffset);
153 
154     return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
155                                     descriptor, inputTensorInfo,
156                                     outputTensorInfo, input, outputExpected);
157 }
158 
159 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet2Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)160 LayerTestResult<T, 4> PermuteValueSet2Test(
161         armnn::IWorkloadFactory& workloadFactory,
162         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
163         const armnn::ITensorHandleFactory& tensorHandleFactory)
164 {
165     armnn::TensorInfo inputTensorInfo;
166     armnn::TensorInfo outputTensorInfo;
167 
168     unsigned int inputShape[]  = { 1, 3, 2, 2 };
169     unsigned int outputShape[] = { 1, 2, 2, 3 };
170 
171     armnn::PermuteDescriptor descriptor;
172     descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
173 
174     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
175     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
176 
177     // Set quantization parameters if the requested type is a quantized type.
178     float qScale = 0.5f;
179     int32_t qOffset = 5;
180     if(armnn::IsQuantizedType<T>())
181     {
182         inputTensorInfo.SetQuantizationScale(qScale);
183         inputTensorInfo.SetQuantizationOffset(qOffset);
184         outputTensorInfo.SetQuantizationScale(qScale);
185         outputTensorInfo.SetQuantizationOffset(qOffset);
186     }
187 
188     std::vector<T> input = armnnUtils::QuantizedVector<T>(
189     {
190         1, 11, 21, 31,
191         2, 12, 22, 32,
192         3, 13, 23, 33
193     },
194     qScale, qOffset);
195 
196     std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
197     {
198          1,  2,  3,
199         11, 12, 13,
200         21, 22, 23,
201         31, 32, 33,
202     },
203     qScale, qOffset);
204 
205     return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
206                                     descriptor, inputTensorInfo,
207                                     outputTensorInfo, input, outputExpected);
208 }
209 
210 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
PermuteValueSet3Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)211 LayerTestResult<T, 4> PermuteValueSet3Test(
212         armnn::IWorkloadFactory& workloadFactory,
213         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
214         const armnn::ITensorHandleFactory& tensorHandleFactory)
215 {
216     armnn::TensorInfo inputTensorInfo;
217     armnn::TensorInfo outputTensorInfo;
218 
219     unsigned int inputShape[]  = { 1, 2, 3, 3 };
220     unsigned int outputShape[] = { 1, 3, 2, 3 };
221 
222     armnn::PermuteDescriptor descriptor;
223     descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
224 
225     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
226     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
227 
228     // Set quantization parameters if the requested type is a quantized type.
229     float qScale = 0.5f;
230     int32_t qOffset = 5;
231     if(armnn::IsQuantizedType<T>())
232     {
233         inputTensorInfo.SetQuantizationScale(qScale);
234         inputTensorInfo.SetQuantizationOffset(qOffset);
235         outputTensorInfo.SetQuantizationScale(qScale);
236         outputTensorInfo.SetQuantizationOffset(qOffset);
237     }
238 
239     std::vector<T> input = armnnUtils::QuantizedVector<T>(
240     {
241          1,  2,  3,
242         11, 12, 13,
243         21, 22, 23,
244         31, 32, 33,
245         41, 42, 43,
246         51, 52, 53
247     },
248     qScale, qOffset);
249 
250     std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
251     {
252         1, 11, 21, 31, 41, 51,
253         2, 12, 22, 32, 42, 52,
254         3, 13, 23, 33, 43, 53
255     },
256     qScale, qOffset);
257 
258     return SimplePermuteTestImpl<T>(workloadFactory, memoryManager, tensorHandleFactory,
259                                     descriptor, inputTensorInfo,
260                                     outputTensorInfo, input, outputExpected);
261 }
262