• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
9 #include <backendsCommon/test/DataTypeUtils.hpp>
10 #include <backendsCommon/test/TensorCopyUtils.hpp>
11 #include <backendsCommon/test/WorkloadTestUtils.hpp>
12 
13 #include <test/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ArgMinMaxTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr &,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::ArgMinMaxFunction argMinMaxFunction,const armnn::TensorInfo inputTensorInfo,const armnn::TensorInfo outputTensorInfo,const std::vector<float> & inputData,const std::vector<int32_t> & outputData,int axis=3)19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20         armnn::IWorkloadFactory& workloadFactory,
21         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
22         const armnn::ITensorHandleFactory& tensorHandleFactory,
23         armnn::ArgMinMaxFunction argMinMaxFunction,
24         const armnn::TensorInfo inputTensorInfo,
25         const armnn::TensorInfo outputTensorInfo,
26         const std::vector<float>& inputData,
27         const std::vector<int32_t>& outputData,
28         int axis = 3)
29 {
30     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
31 
32     LayerTestResult<int32_t, 3> result(outputTensorInfo);
33     result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
34 
35     std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37 
38     armnn::ArgMinMaxQueueDescriptor descriptor;
39     descriptor.m_Parameters.m_Function = argMinMaxFunction;
40     descriptor.m_Parameters.m_Axis = axis;
41     armnn::WorkloadInfo info;
42 
43     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45 
46     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
47 
48     inputHandle->Allocate();
49     outputHandle->Allocate();
50 
51     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
52 
53     workload->PostAllocationConfigure();
54     workload->Execute();
55 
56     CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
57 
58     return result;
59 }
60 
61 } // namespace
62 
63 template<armnn::DataType ArmnnType, typename T>
ArgMaxSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)64 LayerTestResult<int32_t, 3> ArgMaxSimpleTest(
65         armnn::IWorkloadFactory& workloadFactory,
66         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
67         const armnn::ITensorHandleFactory& tensorHandleFactory)
68 {
69     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
70     const armnn::TensorShape outputShape{ 1, 1, 1 };
71 
72     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
73 
74     if (armnn::IsQuantizedType<T>())
75     {
76         inputTensorInfo.SetQuantizationScale(1.0f);
77         inputTensorInfo.SetQuantizationOffset(0);
78     }
79 
80     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
81 
82     std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
83     std::vector<int32_t> outputValues({ 3 });
84 
85     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
86                                           armnn::ArgMinMaxFunction::Max,
87                                           inputTensorInfo, outputTensorInfo,
88                                           inputValues, outputValues, -1); // axis -1 === 3
89 }
90 
91 template<armnn::DataType ArmnnType, typename T>
ArgMinSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)92 LayerTestResult<int32_t, 3> ArgMinSimpleTest(
93         armnn::IWorkloadFactory& workloadFactory,
94         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
95         const armnn::ITensorHandleFactory& tensorHandleFactory)
96 {
97     const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
98     const armnn::TensorShape outputShape{ 1, 1, 1 };
99 
100     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
101 
102     if (armnn::IsQuantizedType<T>())
103     {
104         inputTensorInfo.SetQuantizationScale(1.0f);
105         inputTensorInfo.SetQuantizationOffset(0);
106     }
107 
108     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
109 
110     std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
111     std::vector<int32_t> outputValues({ 1 });
112 
113     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
114                                           armnn::ArgMinMaxFunction::Min,
115                                           inputTensorInfo, outputTensorInfo,
116                                           inputValues, outputValues, 3);
117 }
118 
119 template<armnn::DataType ArmnnType, typename T>
ArgMinChannelTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)120 LayerTestResult<int32_t, 3> ArgMinChannelTest(
121         armnn::IWorkloadFactory& workloadFactory,
122         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
123         const armnn::ITensorHandleFactory& tensorHandleFactory)
124 {
125     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
126     const armnn::TensorShape outputShape{ 1, 2, 4 };
127 
128     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
129 
130     if (armnn::IsQuantizedType<T>())
131     {
132         inputTensorInfo.SetQuantizationScale(1.0f);
133         inputTensorInfo.SetQuantizationOffset(0);
134     }
135 
136     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
137 
138     std::vector<float> inputValues({   1.0f,   2.0f,   3.0f,   4.0f,
139                                        5.0f,   6.0f,   7.0f,   8.0f,
140 
141                                       10.0f,  20.0f,  30.0f,  40.0f,
142                                       50.0f,  60.0f,  70.0f,  80.0f,
143 
144                                      100.0f, 200.0f, 300.0f, 400.0f,
145                                      500.0f, 600.0f, 700.0f, 800.0f });
146     std::vector<int32_t> outputValues({ 0, 0, 0, 0,
147                                         0, 0, 0, 0 });
148 
149     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
150                                           armnn::ArgMinMaxFunction::Min,
151                                           inputTensorInfo, outputTensorInfo,
152                                           inputValues, outputValues, 1);
153 }
154 
155 template<armnn::DataType ArmnnType, typename T>
ArgMaxChannelTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)156 LayerTestResult<int32_t, 3> ArgMaxChannelTest(
157         armnn::IWorkloadFactory& workloadFactory,
158         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
159         const armnn::ITensorHandleFactory& tensorHandleFactory)
160 {
161     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
162     const armnn::TensorShape outputShape{ 1, 2, 4 };
163 
164     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
165 
166     if (armnn::IsQuantizedType<T>())
167     {
168         inputTensorInfo.SetQuantizationScale(1.0f);
169         inputTensorInfo.SetQuantizationOffset(0);
170     }
171 
172     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
173 
174     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
175                                       5.0f,   6.0f,   7.0f,   8.0f,
176 
177                                      10.0f,  20.0f,  30.0f,  40.0f,
178                                      50.0f,  60.0f,  70.0f,  80.0f,
179 
180                                     100.0f, 200.0f, 300.0f, 400.0f,
181                                     500.0f, 600.0f, 700.0f, 800.0f });
182     std::vector<int32_t> outputValues({ 2, 2, 2, 2,
183                                         2, 2, 2, 2 });
184 
185     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
186                                           armnn::ArgMinMaxFunction::Max,
187                                           inputTensorInfo, outputTensorInfo,
188                                           inputValues, outputValues, 1);
189 }
190 
191 template<armnn::DataType ArmnnType, typename T>
ArgMaxHeightTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)192 LayerTestResult<int32_t, 3> ArgMaxHeightTest(
193         armnn::IWorkloadFactory& workloadFactory,
194         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
195         const armnn::ITensorHandleFactory& tensorHandleFactory)
196 {
197     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
198     const armnn::TensorShape outputShape{ 1, 3, 4 };
199 
200     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
201     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
202 
203     if (armnn::IsQuantizedType<T>())
204     {
205         inputTensorInfo.SetQuantizationScale(1.0f);
206         inputTensorInfo.SetQuantizationOffset(0);
207     }
208 
209     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
210                                       5.0f,   6.0f,   7.0f,   8.0f,
211 
212                                      10.0f,  20.0f,  30.0f,  40.0f,
213                                      50.0f,  60.0f,  70.0f,  80.0f,
214 
215                                     100.0f, 200.0f, 300.0f, 400.0f,
216                                     500.0f, 600.0f, 700.0f, 800.0f });
217     std::vector<int32_t> outputValues({ 1, 1, 1, 1,
218                                         1, 1, 1, 1,
219                                         1, 1, 1, 1 });
220 
221     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
222                                           armnn::ArgMinMaxFunction::Max,
223                                           inputTensorInfo, outputTensorInfo,
224                                           inputValues, outputValues, 2);
225 }
226 
227 template<armnn::DataType ArmnnType, typename T>
ArgMinWidthTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)228 LayerTestResult<int32_t, 3> ArgMinWidthTest(
229         armnn::IWorkloadFactory& workloadFactory,
230         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
231         const armnn::ITensorHandleFactory& tensorHandleFactory)
232 {
233     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
234     const armnn::TensorShape outputShape{ 1, 3, 2 };
235 
236     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
237     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
238 
239     if (armnn::IsQuantizedType<T>())
240     {
241         inputTensorInfo.SetQuantizationScale(1.0f);
242         inputTensorInfo.SetQuantizationOffset(0);
243     }
244 
245     std::vector<float> inputValues({  1.0f,   2.0f,   3.0f,   4.0f,
246                                       5.0f,   6.0f,   7.0f,   8.0f,
247 
248                                      10.0f,  20.0f,  30.0f,  40.0f,
249                                      50.0f,  60.0f,  70.0f,  80.0f,
250 
251                                     100.0f, 200.0f, 300.0f, 400.0f,
252                                     500.0f, 600.0f, 700.0f, 800.0f });
253     std::vector<int32_t> outputValues({ 0, 0,
254                                         0, 0,
255                                         0, 0 });
256 
257     return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
258                                           armnn::ArgMinMaxFunction::Min,
259                                           inputTensorInfo, outputTensorInfo,
260                                           inputValues, outputValues, 3);
261 }
262 
263 
264 // Explicit template specializations
265 
266 template LayerTestResult<int32_t, 3>
267 ArgMaxSimpleTest<armnn::DataType::Float32>(
268         armnn::IWorkloadFactory& workloadFactory,
269         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
270         const armnn::ITensorHandleFactory& tensorHandleFactory);
271 
272 template LayerTestResult<int32_t, 3>
273 ArgMaxSimpleTest<armnn::DataType::Float16>(
274         armnn::IWorkloadFactory& workloadFactory,
275         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
276         const armnn::ITensorHandleFactory& tensorHandleFactory);
277 
278 template LayerTestResult<int32_t, 3>
279 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
280         armnn::IWorkloadFactory& workloadFactory,
281         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
282         const armnn::ITensorHandleFactory& tensorHandleFactory);
283 
284 template LayerTestResult<int32_t, 3>
285 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
286         armnn::IWorkloadFactory& workloadFactory,
287         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
288         const armnn::ITensorHandleFactory& tensorHandleFactory);
289 
290 template LayerTestResult<int32_t, 3>
291 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
292         armnn::IWorkloadFactory& workloadFactory,
293         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
294         const armnn::ITensorHandleFactory& tensorHandleFactory);
295 
296 template LayerTestResult<int32_t, 3>
297 ArgMaxSimpleTest<armnn::DataType::Signed32>(
298         armnn::IWorkloadFactory& workloadFactory,
299         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
300         const armnn::ITensorHandleFactory& tensorHandleFactory);
301 
302 template LayerTestResult<int32_t, 3>
303 ArgMinSimpleTest<armnn::DataType::Float32>(
304         armnn::IWorkloadFactory& workloadFactory,
305         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
306         const armnn::ITensorHandleFactory& tensorHandleFactory);
307 
308 template LayerTestResult<int32_t, 3>
309 ArgMinSimpleTest<armnn::DataType::Float16>(
310         armnn::IWorkloadFactory& workloadFactory,
311         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
312         const armnn::ITensorHandleFactory& tensorHandleFactory);
313 
314 template LayerTestResult<int32_t, 3>
315 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
316         armnn::IWorkloadFactory& workloadFactory,
317         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
318         const armnn::ITensorHandleFactory& tensorHandleFactory);
319 
320 template LayerTestResult<int32_t, 3>
321 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
322         armnn::IWorkloadFactory& workloadFactory,
323         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
324         const armnn::ITensorHandleFactory& tensorHandleFactory);
325 
326 template LayerTestResult<int32_t, 3>
327 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
328         armnn::IWorkloadFactory& workloadFactory,
329         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
330         const armnn::ITensorHandleFactory& tensorHandleFactory);
331 
332 template LayerTestResult<int32_t, 3>
333 ArgMinSimpleTest<armnn::DataType::Signed32>(
334         armnn::IWorkloadFactory& workloadFactory,
335         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
336         const armnn::ITensorHandleFactory& tensorHandleFactory);
337 
338 template LayerTestResult<int32_t, 3>
339 ArgMinChannelTest<armnn::DataType::Float32>(
340         armnn::IWorkloadFactory& workloadFactory,
341         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
342         const armnn::ITensorHandleFactory& tensorHandleFactory);
343 
344 template LayerTestResult<int32_t, 3>
345 ArgMinChannelTest<armnn::DataType::Float16>(
346         armnn::IWorkloadFactory& workloadFactory,
347         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
348         const armnn::ITensorHandleFactory& tensorHandleFactory);
349 
350 template LayerTestResult<int32_t, 3>
351 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
352         armnn::IWorkloadFactory& workloadFactory,
353         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
354         const armnn::ITensorHandleFactory& tensorHandleFactory);
355 
356 template LayerTestResult<int32_t, 3>
357 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
358         armnn::IWorkloadFactory& workloadFactory,
359         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
360         const armnn::ITensorHandleFactory& tensorHandleFactory);
361 
362 template LayerTestResult<int32_t, 3>
363 ArgMinChannelTest<armnn::DataType::QSymmS16>(
364         armnn::IWorkloadFactory& workloadFactory,
365         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
366         const armnn::ITensorHandleFactory& tensorHandleFactory);
367 
368 template LayerTestResult<int32_t, 3>
369 ArgMinChannelTest<armnn::DataType::Signed32>(
370         armnn::IWorkloadFactory& workloadFactory,
371         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
372         const armnn::ITensorHandleFactory& tensorHandleFactory);
373 
374 template LayerTestResult<int32_t, 3>
375 ArgMaxChannelTest<armnn::DataType::Float32>(
376         armnn::IWorkloadFactory& workloadFactory,
377         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378         const armnn::ITensorHandleFactory& tensorHandleFactory);
379 
380 template LayerTestResult<int32_t, 3>
381 ArgMaxChannelTest<armnn::DataType::Float16>(
382         armnn::IWorkloadFactory& workloadFactory,
383         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
384         const armnn::ITensorHandleFactory& tensorHandleFactory);
385 
386 template LayerTestResult<int32_t, 3>
387 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
388         armnn::IWorkloadFactory& workloadFactory,
389         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
390         const armnn::ITensorHandleFactory& tensorHandleFactory);
391 
392 template LayerTestResult<int32_t, 3>
393 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
394         armnn::IWorkloadFactory& workloadFactory,
395         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396         const armnn::ITensorHandleFactory& tensorHandleFactory);
397 
398 template LayerTestResult<int32_t, 3>
399 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
400         armnn::IWorkloadFactory& workloadFactory,
401         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
402         const armnn::ITensorHandleFactory& tensorHandleFactory);
403 
404 template LayerTestResult<int32_t, 3>
405 ArgMaxChannelTest<armnn::DataType::Signed32>(
406         armnn::IWorkloadFactory& workloadFactory,
407         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408         const armnn::ITensorHandleFactory& tensorHandleFactory);
409 
410 template LayerTestResult<int32_t, 3>
411 ArgMaxHeightTest<armnn::DataType::Float32>(
412         armnn::IWorkloadFactory& workloadFactory,
413         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
414         const armnn::ITensorHandleFactory& tensorHandleFactory);
415 
416 template LayerTestResult<int32_t, 3>
417 ArgMaxHeightTest<armnn::DataType::Float16>(
418         armnn::IWorkloadFactory& workloadFactory,
419         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
420         const armnn::ITensorHandleFactory& tensorHandleFactory);
421 
422 template LayerTestResult<int32_t, 3>
423 ArgMaxHeightTest<armnn::DataType::Signed32>(
424         armnn::IWorkloadFactory& workloadFactory,
425         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
426         const armnn::ITensorHandleFactory& tensorHandleFactory);
427 
428 template LayerTestResult<int32_t, 3>
429 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
430         armnn::IWorkloadFactory& workloadFactory,
431         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
432         const armnn::ITensorHandleFactory& tensorHandleFactory);
433 
434 template LayerTestResult<int32_t, 3>
435 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
436         armnn::IWorkloadFactory& workloadFactory,
437         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438         const armnn::ITensorHandleFactory& tensorHandleFactory);
439 
440 template LayerTestResult<int32_t, 3>
441 ArgMinWidthTest<armnn::DataType::Float32>(
442         armnn::IWorkloadFactory& workloadFactory,
443         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
444         const armnn::ITensorHandleFactory& tensorHandleFactory);
445 
446 template LayerTestResult<int32_t, 3>
447 ArgMinWidthTest<armnn::DataType::Float16>(
448         armnn::IWorkloadFactory& workloadFactory,
449         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
450         const armnn::ITensorHandleFactory& tensorHandleFactory);
451 
452 template LayerTestResult<int32_t, 3>
453 ArgMinWidthTest<armnn::DataType::Signed32>(
454         armnn::IWorkloadFactory& workloadFactory,
455         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456         const armnn::ITensorHandleFactory& tensorHandleFactory);
457 
458 template LayerTestResult<int32_t, 3>
459 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
460         armnn::IWorkloadFactory& workloadFactory,
461         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
462         const armnn::ITensorHandleFactory& tensorHandleFactory);
463 
464 template LayerTestResult<int32_t, 3>
465 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
466         armnn::IWorkloadFactory& workloadFactory,
467         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
468         const armnn::ITensorHandleFactory& tensorHandleFactory);
469