• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "PadTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 
10 #include <backendsCommon/test/TensorCopyUtils.hpp>
11 #include <backendsCommon/test/WorkloadTestUtils.hpp>
12 
13 #include <test/TensorHelpers.hpp>
14 
15 //
16 // Implementation templates
17 //
18 
19 template<armnn::DataType ArmnnType, typename T>
Pad2dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset,const float customPaddingValue)20 LayerTestResult<T, 2> Pad2dTestCommon(
21     armnn::IWorkloadFactory& workloadFactory,
22     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23     const armnn::ITensorHandleFactory& tensorHandleFactory,
24     float qScale,
25     int32_t qOffset,
26     const float customPaddingValue)
27 {
28     IgnoreUnused(memoryManager);
29     const armnn::TensorShape inputShape{ 3, 3 };
30     const armnn::TensorShape outputShape{ 7, 7 };
31 
32     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
33     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
34 
35     std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
36         {
37             // Height (3) x Width (3)
38             4, 8, 6,
39             7, 4, 4,
40             3, 2, 4
41         },
42         qScale, qOffset);
43 
44     auto p = customPaddingValue;
45     std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
46         {
47             p, p, p, p, p, p, p,
48             p, p, p, p, p, p, p,
49             p, p, 4, 8, 6, p, p,
50             p, p, 7, 4, 4, p, p,
51             p, p, 3, 2, 4, p, p,
52             p, p, p, p, p, p, p,
53             p, p, p, p, p, p, p
54         },
55         qScale, qOffset);
56 
57     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
58 
59     LayerTestResult<T, 2> result(outputTensorInfo);
60     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
61 
62     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
63     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
64 
65 
66     armnn::PadQueueDescriptor descriptor;
67 
68     std::vector<std::pair<unsigned int, unsigned int>> padList;
69     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
70     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
71 
72     descriptor.m_Parameters.m_PadList = padList;
73     descriptor.m_Parameters.m_PadValue = customPaddingValue;
74     armnn::WorkloadInfo info;
75 
76     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
77     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
78 
79     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
80 
81     inputHandle->Allocate();
82     outputHandle->Allocate();
83 
84     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
85 
86     workload->PostAllocationConfigure();
87     workload->Execute();
88 
89     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
90 
91     return result;
92 }
93 
94 template<armnn::DataType ArmnnType, typename T>
Pad3dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)95 LayerTestResult<T, 3> Pad3dTestCommon(
96     armnn::IWorkloadFactory& workloadFactory,
97     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
98     const armnn::ITensorHandleFactory& tensorHandleFactory,
99     float qScale,
100     int32_t qOffset)
101 {
102     IgnoreUnused(memoryManager);
103     const armnn::TensorShape inputShape{ 2, 2, 2 };
104     const armnn::TensorShape outputShape{ 3, 5, 6 };
105 
106     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
107     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
108 
109     std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
110         {
111             // Channel 0, Height (2) x Width (2)
112             0, 4,
113             2, 5,
114 
115             // Channel 1, Height (2) x Width (2)
116             6, 1,
117             5, 2
118         },
119         qScale, qOffset);
120 
121     std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
122         {
123             0, 0, 0, 0, 0, 0,
124             0, 0, 0, 0, 0, 0,
125             0, 0, 0, 4, 0, 0,
126             0, 0, 2, 5, 0, 0,
127             0, 0, 0, 0, 0, 0,
128 
129             0, 0, 0, 0, 0, 0,
130             0, 0, 0, 0, 0, 0,
131             0, 0, 6, 1, 0, 0,
132             0, 0, 5, 2, 0, 0,
133             0, 0, 0, 0, 0, 0,
134 
135             0, 0, 0, 0, 0, 0,
136             0, 0, 0, 0, 0, 0,
137             0, 0, 0, 0, 0, 0,
138             0, 0, 0, 0, 0, 0,
139             0, 0, 0, 0, 0, 0
140        },
141        qScale, qOffset);
142 
143     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
144 
145     LayerTestResult<T, 3> result(outputTensorInfo);
146     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
147 
148     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
149     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
150 
151 
152     armnn::PadQueueDescriptor descriptor;
153 
154     std::vector<std::pair<unsigned int, unsigned int>> PadList;
155     PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
156     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
157     PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
158 
159     descriptor.m_Parameters.m_PadList = PadList;
160     armnn::WorkloadInfo info;
161 
162     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
163     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
164 
165     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
166 
167     inputHandle->Allocate();
168     outputHandle->Allocate();
169 
170     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
171 
172     workload->PostAllocationConfigure();
173     workload->Execute();
174 
175     CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
176 
177     return result;
178 }
179 
180 template<armnn::DataType ArmnnType, typename T>
Pad4dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)181 LayerTestResult<T, 4> Pad4dTestCommon(
182     armnn::IWorkloadFactory& workloadFactory,
183     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
184     const armnn::ITensorHandleFactory& tensorHandleFactory,
185     float qScale,
186     int32_t qOffset)
187 {
188     IgnoreUnused(memoryManager);
189     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
190     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
191 
192     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
193     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
194 
195     std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
196         {
197             // Batch 0, Channel 0, Height (3) x Width (2)
198              0,  1,
199              2,  3,
200              4,  5,
201 
202             // Batch 0, Channel 1, Height (3) x Width (2)
203              6,  7,
204              8,  9,
205             10, 11,
206 
207             // Batch 1, Channel 0, Height (3) x Width (2)
208             12, 13,
209             14, 15,
210             16, 17,
211 
212             // Batch 1, Channel 1, Height (3) x Width (2)
213             18, 19,
214             20, 21,
215             22, 23
216         },
217         qScale, qOffset);
218 
219     std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
220         {
221             0, 0, 0, 0,
222             0, 0, 0, 0,
223             0, 0, 0, 0,
224             0, 0, 0, 0,
225             0, 0, 0, 0,
226             0, 0, 0, 0,
227             0, 0, 0, 0,
228 
229             0, 0, 0, 0,
230             0, 0, 0, 0,
231             0, 0, 0, 0,
232             0, 0, 0, 0,
233             0, 0, 0, 0,
234             0, 0, 0, 0,
235             0, 0, 0, 0,
236 
237             0, 0, 0, 0,
238             0, 0, 0, 0,
239             0, 0, 0, 0,
240             0, 0, 0, 0,
241             0, 0, 0, 0,
242             0, 0, 0, 0,
243             0, 0, 0, 0,
244 
245             0, 0, 0, 0,
246             0, 0, 0, 0,
247             0, 0, 0, 0,
248             0, 0, 0, 0,
249             0, 0, 0, 0,
250             0, 0, 0, 0,
251             0, 0, 0, 0,
252 
253             0, 0, 0, 0,
254             0, 0, 0, 0,
255             0, 0, 0, 0,
256             0, 0, 0, 0,
257             0, 0, 0, 0,
258             0, 0, 0, 0,
259             0, 0, 0, 0,
260 
261             0, 0, 0, 0,
262             0, 0, 0, 0,
263             0, 0, 0, 0,
264             0, 0, 0, 0,
265             0, 0, 0, 0,
266             0, 0, 0, 0,
267             0, 0, 0, 0,
268 
269             0, 0, 0, 0,
270             0, 0, 0, 0,
271             0, 0, 0, 0,
272             0, 0, 0, 0,
273             0, 0, 0, 0,
274             0, 0, 0, 0,
275             0, 0, 0, 0,
276 
277             0, 0, 0, 0,
278             0, 0, 0, 0,
279             0, 0, 0, 0,
280             0, 0, 1, 0,
281             0, 2, 3, 0,
282             0, 4, 5, 0,
283             0, 0, 0, 0,
284 
285             0, 0, 0, 0,
286             0, 0, 0, 0,
287             0, 0, 0, 0,
288             0, 6, 7, 0,
289             0, 8, 9, 0,
290             0, 10, 11, 0,
291             0, 0, 0, 0,
292 
293             0, 0, 0, 0,
294             0, 0, 0, 0,
295             0, 0, 0, 0,
296             0, 0, 0, 0,
297             0, 0, 0, 0,
298             0, 0, 0, 0,
299             0, 0, 0, 0,
300 
301             0, 0, 0, 0,
302             0, 0, 0, 0,
303             0, 0, 0, 0,
304             0, 0, 0, 0,
305             0, 0, 0, 0,
306             0, 0, 0, 0,
307             0, 0, 0, 0,
308 
309             0, 0, 0, 0,
310             0, 0, 0, 0,
311             0, 0, 0, 0,
312             0, 0, 0, 0,
313             0, 0, 0, 0,
314             0, 0, 0, 0,
315             0, 0, 0, 0,
316 
317             0, 0, 0, 0,
318             0, 0, 0, 0,
319             0, 0, 0, 0,
320             0, 12, 13, 0,
321             0, 14, 15, 0,
322             0, 16, 17, 0,
323             0, 0, 0, 0,
324 
325             0, 0, 0, 0,
326             0, 0, 0, 0,
327             0, 0, 0, 0,
328             0, 18, 19, 0,
329             0, 20, 21, 0,
330             0, 22, 23, 0,
331             0, 0, 0, 0,
332 
333             0, 0, 0, 0,
334             0, 0, 0, 0,
335             0, 0, 0, 0,
336             0, 0, 0, 0,
337             0, 0, 0, 0,
338             0, 0, 0, 0,
339             0, 0, 0, 0,
340 
341             0, 0, 0, 0,
342             0, 0, 0, 0,
343             0, 0, 0, 0,
344             0, 0, 0, 0,
345             0, 0, 0, 0,
346             0, 0, 0, 0,
347             0, 0, 0, 0,
348 
349             0, 0, 0, 0,
350             0, 0, 0, 0,
351             0, 0, 0, 0,
352             0, 0, 0, 0,
353             0, 0, 0, 0,
354             0, 0, 0, 0,
355             0, 0, 0, 0,
356 
357             0, 0, 0, 0,
358             0, 0, 0, 0,
359             0, 0, 0, 0,
360             0, 0, 0, 0,
361             0, 0, 0, 0,
362             0, 0, 0, 0,
363             0, 0, 0, 0,
364 
365             0, 0, 0, 0,
366             0, 0, 0, 0,
367             0, 0, 0, 0,
368             0, 0, 0, 0,
369             0, 0, 0, 0,
370             0, 0, 0, 0,
371             0, 0, 0, 0,
372 
373             0, 0, 0, 0,
374             0, 0, 0, 0,
375             0, 0, 0, 0,
376             0, 0, 0, 0,
377             0, 0, 0, 0,
378             0, 0, 0, 0,
379             0, 0, 0, 0
380         },
381         qScale, qOffset);
382 
383     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
384 
385     LayerTestResult<T, 4> result(outputTensorInfo);
386     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
387 
388     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
389     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
390 
391     armnn::PadQueueDescriptor descriptor;
392 
393     std::vector<std::pair<unsigned int, unsigned int>> PadList;
394     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
395     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
396     PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
397     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
398 
399     descriptor.m_Parameters.m_PadList = PadList;
400     armnn::WorkloadInfo info;
401 
402     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
403     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
404 
405     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
406 
407     inputHandle->Allocate();
408     outputHandle->Allocate();
409 
410     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
411 
412     workload->PostAllocationConfigure();
413     workload->Execute();
414 
415     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
416 
417     return result;
418 }
419 
420 //
421 // Explicit template specializations
422 //
423 
424 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
425 Pad2dTestCommon<armnn::DataType::QSymmS16>(
426     armnn::IWorkloadFactory& workloadFactory,
427     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428     const armnn::ITensorHandleFactory& tensorHandleFactory,
429     float qScale,
430     int32_t qOffset,
431     const float customPaddingValue);
432 
433 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
434 Pad3dTestCommon<armnn::DataType::QSymmS16>(
435     armnn::IWorkloadFactory& workloadFactory,
436     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437     const armnn::ITensorHandleFactory& tensorHandleFactory,
438     float qScale,
439     int32_t qOffset);
440 
441 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
442 Pad4dTestCommon<armnn::DataType::QSymmS16>(
443     armnn::IWorkloadFactory& workloadFactory,
444     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
445     const armnn::ITensorHandleFactory& tensorHandleFactory,
446     float qScale,
447     int32_t qOffset);
448 
449 //
450 // Implementation functions
451 //
452 
PadUint82dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)453 LayerTestResult<uint8_t, 2> PadUint82dTest(
454     armnn::IWorkloadFactory& workloadFactory,
455     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456     const armnn::ITensorHandleFactory& tensorHandleFactory)
457 {
458     return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
459 }
460 
PadUint82dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)461 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
462     armnn::IWorkloadFactory& workloadFactory,
463     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
464     const armnn::ITensorHandleFactory& tensorHandleFactory)
465 {
466     return Pad2dTestCommon<armnn::DataType::QAsymmU8>(
467             workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
468 }
469 
PadUint83dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)470 LayerTestResult<uint8_t, 3> PadUint83dTest(
471     armnn::IWorkloadFactory& workloadFactory,
472     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
473     const armnn::ITensorHandleFactory& tensorHandleFactory)
474 {
475     return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
476 }
477 
PadUint84dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)478 LayerTestResult<uint8_t, 4> PadUint84dTest(
479     armnn::IWorkloadFactory& workloadFactory,
480     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
481     const armnn::ITensorHandleFactory& tensorHandleFactory)
482 {
483     return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
484 }
485 
PadFloat322dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)486 LayerTestResult<float, 2> PadFloat322dTest(
487     armnn::IWorkloadFactory& workloadFactory,
488     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
489     const armnn::ITensorHandleFactory& tensorHandleFactory)
490 {
491     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
492 }
493 
PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)494 LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
495     armnn::IWorkloadFactory& workloadFactory,
496     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
497     const armnn::ITensorHandleFactory& tensorHandleFactory)
498 {
499     return Pad2dTestCommon<armnn::DataType::Float32>(
500             workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
501 }
502 
PadFloat323dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)503 LayerTestResult<float, 3> PadFloat323dTest(
504     armnn::IWorkloadFactory& workloadFactory,
505     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
506     const armnn::ITensorHandleFactory& tensorHandleFactory)
507 {
508     return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
509 }
510 
PadFloat324dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)511 LayerTestResult<float, 4> PadFloat324dTest(
512     armnn::IWorkloadFactory& workloadFactory,
513     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
514     const armnn::ITensorHandleFactory& tensorHandleFactory)
515 {
516     return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
517 }
518 
PadBFloat162dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)519 LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
520     armnn::IWorkloadFactory& workloadFactory,
521     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
522     const armnn::ITensorHandleFactory& tensorHandleFactory)
523 {
524     return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
525 }
526 
PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)527 LayerTestResult<armnn::BFloat16, 2> PadBFloat162dCustomPaddingTest(
528     armnn::IWorkloadFactory& workloadFactory,
529     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
530     const armnn::ITensorHandleFactory& tensorHandleFactory)
531 {
532     return Pad2dTestCommon<armnn::DataType::BFloat16>(
533             workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
534 }
535 
PadBFloat163dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)536 LayerTestResult<armnn::BFloat16, 3> PadBFloat163dTest(
537     armnn::IWorkloadFactory& workloadFactory,
538     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
539     const armnn::ITensorHandleFactory& tensorHandleFactory)
540 {
541     return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
542 }
543 
PadBFloat164dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)544 LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
545     armnn::IWorkloadFactory& workloadFactory,
546     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
547     const armnn::ITensorHandleFactory& tensorHandleFactory)
548 {
549     return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
550 }
551 
PadInt82dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)552 LayerTestResult<int8_t, 2> PadInt82dTest(
553     armnn::IWorkloadFactory& workloadFactory,
554     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
555     const armnn::ITensorHandleFactory& tensorHandleFactory)
556 {
557     return Pad2dTestCommon<armnn::DataType::QSymmS8>(
558             workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
559 }
560 
PadInt82dCustomPaddingTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)561 LayerTestResult<int8_t, 2> PadInt82dCustomPaddingTest(
562     armnn::IWorkloadFactory& workloadFactory,
563     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
564     const armnn::ITensorHandleFactory& tensorHandleFactory)
565 {
566     return Pad2dTestCommon<armnn::DataType::QSymmS8>(
567             workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
568 }
569 
PadInt83dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)570 LayerTestResult<int8_t, 3> PadInt83dTest(
571     armnn::IWorkloadFactory& workloadFactory,
572     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
573     const armnn::ITensorHandleFactory& tensorHandleFactory)
574 {
575     return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
576 }
577 
PadInt84dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)578 LayerTestResult<int8_t, 4> PadInt84dTest(
579     armnn::IWorkloadFactory& workloadFactory,
580     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
581     const armnn::ITensorHandleFactory& tensorHandleFactory)
582 {
583     return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
584 }
585