1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <backendsCommon/test/EndToEndTestImpl.hpp>
7
8 #include <backendsCommon/test/ActivationEndToEndTestImpl.hpp>
9 #include <backendsCommon/test/AdditionEndToEndTestImpl.hpp>
10 #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
11 #include <backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp>
12 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
13 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
14 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
15 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
16 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
17 #include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
18 #include <backendsCommon/test/FillEndToEndTestImpl.hpp>
19 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
20 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
21 #include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
22 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
23 #include <backendsCommon/test/ReduceEndToEndTestImpl.hpp>
24 #include <backendsCommon/test/ReshapeEndToEndTestImpl.hpp>
25 #include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
26 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
27 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
28 #include <backendsCommon/test/TransposeEndToEndTestImpl.hpp>
29
30 #include <doctest/doctest.h>
31
32 TEST_SUITE("NeonEndToEnd")
33 {
34 std::vector<armnn::BackendId> neonDefaultBackends = {armnn::Compute::CpuAcc};
35
36 // ElementwiseUnary
37 // Abs
38 TEST_CASE("NeonAbsEndToEndTestFloat32")
39 {
40 ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
41 UnaryOperation::Abs);
42 }
43 // Rsqrt
44 TEST_CASE("NeonRsqrtEndToEndTestFloat32")
45 {
46 ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
47 UnaryOperation::Rsqrt);
48 }
49
50 // Constant
51 TEST_CASE("ConstantUsage_Neon_Float32")
52 {
53 CHECK(ConstantUsageFloat32Test(neonDefaultBackends));
54 }
55
56 #if defined(ARMNNREF_ENABLED)
57
58 // This test unit needs the reference backend, it's not available if the reference backend is not built
59
60 TEST_CASE("FallbackToCpuRef")
61 {
62 using namespace armnn;
63
64 // Create runtime in which test will run and allow fallback to CpuRef.
65 IRuntime::CreationOptions options;
66 IRuntimePtr runtime(IRuntime::Create(options));
67
68 // Builds up the structure of the network.
69 INetworkPtr net(INetwork::Create());
70
71 IConnectableLayer* input = net->AddInputLayer(0);
72
73 // This layer configuration isn't supported by CpuAcc but we allow fallback to CpuRef so it shoud pass.
74 NormalizationDescriptor descriptor;
75 IConnectableLayer* pooling = net->AddNormalizationLayer(descriptor);
76
77 IConnectableLayer* output = net->AddOutputLayer(0);
78
79 input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
80 pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
81
82 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
83 pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
84
85 // optimize the network
86 std::vector<BackendId> backends = {Compute::CpuAcc, Compute::CpuRef};
87 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
88
89 // Load it into the runtime. It should pass.
90 NetworkId netId;
91 CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
92 }
93
94 #endif
95
96 TEST_CASE("NeonGreaterSimpleEndToEndTest")
97 {
98 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
99 0, 0, 0, 0, 0, 0, 0, 0 });
100
101 ComparisonSimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
102 ComparisonOperation::Greater,
103 expectedOutput);
104 }
105
106 TEST_CASE("NeonGreaterSimpleEndToEndUint8Test")
107 {
108 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
109 0, 0, 0, 0, 0, 0, 0, 0 });
110
111 ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
112 ComparisonOperation::Greater,
113 expectedOutput);
114 }
115
116 TEST_CASE("NeonGreaterBroadcastEndToEndTest")
117 {
118 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
119 1, 1, 1, 1, 1, 1 });
120
121 ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
122 ComparisonOperation::Greater,
123 expectedOutput);
124 }
125
126 TEST_CASE("NeonGreaterBroadcastEndToEndUint8Test")
127 {
128 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
129 1, 1, 1, 1, 1, 1 });
130
131 ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
132 ComparisonOperation::Greater,
133 expectedOutput);
134 }
135
136 TEST_CASE("NeonAdditionEndToEndFloat32Test")
137 {
138 AdditionEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
139 }
140
141 TEST_CASE("NeonAdditionEndToEndUint8Test")
142 {
143 AdditionEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
144 }
145
146 TEST_CASE("NeonBatchMatMulEndToEndFloat32Test")
147 {
148 BatchMatMulEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
149 }
150
151 TEST_CASE("NeonConcatEndToEndDim0Test")
152 {
153 ConcatDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
154 }
155
156 TEST_CASE("NeonConcatEndToEndDim0Uint8Test")
157 {
158 ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
159 }
160
161 TEST_CASE("NeonConcatEndToEndDim1Test")
162 {
163 ConcatDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
164 }
165
166 TEST_CASE("NeonConcatEndToEndDim1Uint8Test")
167 {
168 ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
169 }
170
171 TEST_CASE("NeonConcatEndToEndDim3Test")
172 {
173 ConcatDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
174 }
175
176 TEST_CASE("NeonConcatEndToEndDim3Uint8Test")
177 {
178 ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
179 }
180
181 // DepthToSpace
182 TEST_CASE("NeonDephtToSpaceEndToEndNchwFloat32")
183 {
184 DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NCHW);
185 }
186
187 TEST_CASE("NeonDephtToSpaceEndToEndNchwFloat16")
188 {
189 DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NCHW);
190 }
191
192 TEST_CASE("NeonDephtToSpaceEndToEndNchwUint8")
193 {
194 DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NCHW);
195 }
196
197 TEST_CASE("NeonDephtToSpaceEndToEndNchwInt16")
198 {
199 DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NCHW);
200 }
201
202 TEST_CASE("NeonDephtToSpaceEndToEndNhwcFloat32")
203 {
204 DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NHWC);
205 }
206
207 TEST_CASE("NeonDephtToSpaceEndToEndNhwcFloat16")
208 {
209 DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NHWC);
210 }
211
212 TEST_CASE("NeonDephtToSpaceEndToEndNhwcUint8")
213 {
214 DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NHWC);
215 }
216
217 TEST_CASE("NeonDephtToSpaceEndToEndNhwcInt16")
218 {
219 DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NHWC);
220 }
221
222 // Dequantize
223 TEST_CASE("DequantizeEndToEndSimpleTest")
224 {
225 DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
226 }
227
228 TEST_CASE("DequantizeEndToEndOffsetTest")
229 {
230 DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(neonDefaultBackends);
231 }
232
233 TEST_CASE("NeonEluEndToEndTestFloat32")
234 {
235 EluEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
236 }
237
238 TEST_CASE("NeonEluEndToEndTestFloat16")
239 {
240 EluEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
241 }
242
243 // HardSwish
244 TEST_CASE("NeonHardSwishEndToEndTestFloat32")
245 {
246 HardSwishEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
247 }
248
249 TEST_CASE("NeonHardSwishEndToEndTestFloat16")
250 {
251 HardSwishEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
252 }
253
254 TEST_CASE("NeonHardSwishEndToEndTestQAsymmS8")
255 {
256 HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(neonDefaultBackends);
257 }
258
259 TEST_CASE("NeonHardSwishEndToEndTestQAsymmU8")
260 {
261 HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
262 }
263
264 TEST_CASE("NeonPreluEndToEndFloat32Test")
265 {
266 PreluEndToEndNegativeTest<armnn::DataType::Float32>(neonDefaultBackends);
267 }
268
269 TEST_CASE("NeonPreluEndToEndTestUint8Test")
270 {
271 PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
272 }
273
274 TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest1")
275 {
276 SpaceToDepthNhwcEndToEndTest1(neonDefaultBackends);
277 }
278
279 TEST_CASE("NeonSpaceToDepthNchwEndToEndTest1")
280 {
281 SpaceToDepthNchwEndToEndTest1(neonDefaultBackends);
282 }
283
284 TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest2")
285 {
286 SpaceToDepthNhwcEndToEndTest2(neonDefaultBackends);
287 }
288
289 TEST_CASE("NeonSpaceToDepthNchwEndToEndTest2")
290 {
291 SpaceToDepthNchwEndToEndTest2(neonDefaultBackends);
292 }
293
294 TEST_CASE("NeonSplitter1dEndToEndTest")
295 {
296 Splitter1dEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
297 }
298
299 TEST_CASE("NeonSplitter1dEndToEndUint8Test")
300 {
301 Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
302 }
303
304 TEST_CASE("NeonSplitter2dDim0EndToEndTest")
305 {
306 Splitter2dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
307 }
308
309 TEST_CASE("NeonSplitter2dDim1EndToEndTest")
310 {
311 Splitter2dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
312 }
313
314 TEST_CASE("NeonSplitter2dDim0EndToEndUint8Test")
315 {
316 Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
317 }
318
319 TEST_CASE("NeonSplitter2dDim1EndToEndUint8Test")
320 {
321 Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
322 }
323
324 TEST_CASE("NeonSplitter3dDim0EndToEndTest")
325 {
326 Splitter3dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
327 }
328
329 TEST_CASE("NeonSplitter3dDim1EndToEndTest")
330 {
331 Splitter3dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
332 }
333
334 TEST_CASE("NeonSplitter3dDim2EndToEndTest")
335 {
336 Splitter3dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
337 }
338
339 TEST_CASE("NeonSplitter3dDim0EndToEndUint8Test")
340 {
341 Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
342 }
343
344 TEST_CASE("NeonSplitter3dDim1EndToEndUint8Test")
345 {
346 Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
347 }
348
349 TEST_CASE("NeonSplitter3dDim2EndToEndUint8Test")
350 {
351 Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
352 }
353
354 TEST_CASE("NeonSplitter4dDim0EndToEndTest")
355 {
356 Splitter4dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
357 }
358
359 TEST_CASE("NeonSplitter4dDim1EndToEndTest")
360 {
361 Splitter4dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
362 }
363
364 TEST_CASE("NeonSplitter4dDim2EndToEndTest")
365 {
366 Splitter4dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
367 }
368
369 TEST_CASE("NeonSplitter4dDim3EndToEndTest")
370 {
371 Splitter4dDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
372 }
373
374 TEST_CASE("NeonSplitter4dDim0EndToEndUint8Test")
375 {
376 Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
377 }
378
379 TEST_CASE("NeonSplitter4dDim1EndToEndUint8Test")
380 {
381 Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
382 }
383
384 TEST_CASE("NeonSplitter4dDim2EndToEndUint8Test")
385 {
386 Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
387 }
388
389 TEST_CASE("NeonSplitter4dDim3EndToEndUint8Test")
390 {
391 Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
392 }
393
394 TEST_CASE("NeonQuantizedLstmEndToEndTest")
395 {
396 QuantizedLstmEndToEnd(neonDefaultBackends);
397 }
398
399 TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNchwTest")
400 {
401 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
402 neonDefaultBackends, armnn::DataLayout::NCHW);
403 }
404
405 TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NchwTest")
406 {
407 TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
408 neonDefaultBackends, armnn::DataLayout::NCHW);
409 }
410
411 TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNhwcTest")
412 {
413 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
414 neonDefaultBackends, armnn::DataLayout::NHWC);
415 }
416
417 TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NhwcTest")
418 {
419 TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
420 neonDefaultBackends, armnn::DataLayout::NHWC);
421 }
422
423 // Transpose
424 TEST_CASE("NeonTransposeEndToEndTest")
425 {
426 TransposeEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
427 }
428
429 TEST_CASE("NeonImportNonAlignedInputPointerTest")
430 {
431 ImportNonAlignedInputPointerTest(neonDefaultBackends);
432 }
433
434 TEST_CASE("NeonExportNonAlignedOutputPointerTest")
435 {
436 ExportNonAlignedOutputPointerTest(neonDefaultBackends);
437 }
438
439 TEST_CASE("NeonImportAlignedPointerTest")
440 {
441 ImportAlignedPointerTest(neonDefaultBackends);
442 }
443
444 TEST_CASE("NeonImportOnlyWorkload")
445 {
446 ImportOnlyWorkload(neonDefaultBackends);
447 }
448
449 TEST_CASE("NeonExportOnlyWorkload")
450 {
451 ExportOnlyWorkload(neonDefaultBackends);
452 }
453
454 TEST_CASE("NeonImportAndExportWorkload")
455 {
456 ImportAndExportWorkload(neonDefaultBackends);
457 }
458
459 TEST_CASE("NeonExportOutputWithSeveralOutputSlotConnectionsTest")
460 {
461 ExportOutputWithSeveralOutputSlotConnectionsTest(neonDefaultBackends);
462 }
463
464 // InstanceNormalization
465 TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest1")
466 {
467 InstanceNormalizationNchwEndToEndTest1(neonDefaultBackends);
468 }
469
470 TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest2")
471 {
472 InstanceNormalizationNchwEndToEndTest2(neonDefaultBackends);
473 }
474
475 // Fill
476 TEST_CASE("NeonFillEndToEndTest")
477 {
478 FillEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
479 }
480
481 TEST_CASE("NeonFillEndToEndTestFloat16")
482 {
483 FillEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
484 }
485
486 TEST_CASE("NeonFillEndToEndTestInt32")
487 {
488 FillEndToEnd<armnn::DataType::Signed32>(neonDefaultBackends);
489 }
490
491 // ArgMinMax
492 TEST_CASE("NeonArgMaxSimpleTest")
493 {
494 ArgMaxEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
495 }
496
497 TEST_CASE("NeonArgMinSimpleTest")
498 {
499 ArgMinEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
500 }
501
502 TEST_CASE("NeonArgMaxAxis0Test")
503 {
504 ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
505 }
506
507 TEST_CASE("NeonArgMinAxis0Test")
508 {
509 ArgMinAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
510 }
511
512 TEST_CASE("NeonArgMaxAxis1Test")
513 {
514 ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
515 }
516
517 TEST_CASE("NeonArgMinAxis1Test")
518 {
519 ArgMinAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
520 }
521
522 TEST_CASE("NeonArgMaxAxis2Test")
523 {
524 ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
525 }
526
527 TEST_CASE("NeonArgMinAxis2Test")
528 {
529 ArgMinAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
530 }
531
532 TEST_CASE("NeonArgMaxAxis3Test")
533 {
534 ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
535 }
536
537 TEST_CASE("NeonArgMinAxis3Test")
538 {
539 ArgMinAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
540 }
541
542 TEST_CASE("NeonArgMaxSimpleTestQuantisedAsymm8")
543 {
544 ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
545 }
546
547 TEST_CASE("NeonArgMinSimpleTestQuantisedAsymm8")
548 {
549 ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
550 }
551
552 TEST_CASE("NeonArgMaxAxis0TestQuantisedAsymm8")
553 {
554 ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
555 }
556
557 TEST_CASE("NeonArgMinAxis0TestQuantisedAsymm8")
558 {
559 ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
560 }
561
562 TEST_CASE("NeonArgMaxAxis1TestQuantisedAsymm8")
563 {
564 ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
565 }
566
567 TEST_CASE("NeonArgMinAxis1TestQuantisedAsymm8")
568 {
569 ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
570 }
571
572 TEST_CASE("NeonArgMaxAxis2TestQuantisedAsymm8")
573 {
574 ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
575 }
576
577 TEST_CASE("NeonArgMinAxis2TestQuantisedAsymm8")
578 {
579 ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
580 }
581
582 TEST_CASE("NeonArgMaxAxis3TestQuantisedAsymm8")
583 {
584 ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
585 }
586
587 TEST_CASE("NeonArgMinAxis3TestQuantisedAsymm8")
588 {
589 ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
590 }
591
592 // Reduce
593 TEST_CASE("NeonReduceEndToEndTest")
594 {
595 ReduceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
596 }
597
598 TEST_CASE("NeonReduceEndToEndTestFloat16")
599 {
600 ReduceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
601 }
602
603 // Reshape
604 TEST_CASE("NeonReshapeEndToEndTest")
605 {
606 ReshapeEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
607 }
608
609 TEST_CASE("NeonReshapeEndToEndTestFloat16")
610 {
611 ReshapeEndToEndFloat16<armnn::DataType::Float16>(neonDefaultBackends);
612 }
613
614 TEST_CASE("NeonStridedSliceInvalidSliceEndToEndTest")
615 {
616 StridedSliceInvalidSliceEndToEndTest(neonDefaultBackends);
617 }
618
619 TEST_CASE("NeonForceImportWithAlignedBuffersEndToEndTest"
620 // Currently, the Neon workload for activation does not support tensor handle replacement so this test case
621 // will always fail.
skip(true)622 * doctest::skip(true))
623 {
624 ForceImportWithAlignedBuffersEndToEndTest(neonDefaultBackends);
625 }
626
627 TEST_CASE("NeonForceImportWithMisalignedInputBuffersEndToEndTest"
628 // Currently, the Neon workload for activation does not support tensor handle replacement so this test case
629 // will always fail.
skip(true)630 * doctest::skip(true))
631 {
632 ForceImportWithMisalignedInputBuffersEndToEndTest(neonDefaultBackends);
633 }
634
635 TEST_CASE("NeonForceImportWithMisalignedOutputBuffersEndToEndTest"
636 // Currently, the Neon workload for activation does not support tensor handle replacement so this test case
637 // will always fail.
skip(true)638 * doctest::skip(true))
639 {
640 ForceImportWithMisalignedOutputBuffersEndToEndTest(neonDefaultBackends);
641 }
642
643 TEST_CASE("NeonForceImportWithMisalignedInputAndOutputBuffersEndToEndTest")
644 {
645 ForceImportWithMisalignedInputAndOutputBuffersEndToEndTest(neonDefaultBackends);
646 }
647
648 // DISABLED
649 //TEST_CASE("NeonDetectionPostProcessRegularNmsTest")
650 //{
651 // std::vector<float> boxEncodings({
652 // 0.0f, 0.0f, 0.0f, 0.0f,
653 // 0.0f, 1.0f, 0.0f, 0.0f,
654 // 0.0f, -1.0f, 0.0f, 0.0f,
655 // 0.0f, 0.0f, 0.0f, 0.0f,
656 // 0.0f, 1.0f, 0.0f, 0.0f,
657 // 0.0f, 0.0f, 0.0f, 0.0f
658 // });
659 // std::vector<float> scores({
660 // 0.0f, 0.9f, 0.8f,
661 // 0.0f, 0.75f, 0.72f,
662 // 0.0f, 0.6f, 0.5f,
663 // 0.0f, 0.93f, 0.95f,
664 // 0.0f, 0.5f, 0.4f,
665 // 0.0f, 0.3f, 0.2f
666 // });
667 // std::vector<float> anchors({
668 // 0.5f, 0.5f, 1.0f, 1.0f,
669 // 0.5f, 0.5f, 1.0f, 1.0f,
670 // 0.5f, 0.5f, 1.0f, 1.0f,
671 // 0.5f, 10.5f, 1.0f, 1.0f,
672 // 0.5f, 10.5f, 1.0f, 1.0f,
673 // 0.5f, 100.5f, 1.0f, 1.0f
674 // });
675 // DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
676 // boxEncodings,
677 // scores,
678 // anchors);
679 //}
680
QuantizeData(uint8_t * quant,const float * dequant,const TensorInfo & info)681 inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
682 {
683 for (size_t i = 0; i < info.GetNumElements(); i++)
684 {
685 quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
686 }
687 }
688
689 // DISABLED
690 //TEST_CASE("NeonDetectionPostProcessRegularNmsUint8Test")
691 //{
692 // armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
693 // armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
694 // armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
695 //
696 // boxEncodingsInfo.SetQuantizationScale(1.0f);
697 // boxEncodingsInfo.SetQuantizationOffset(1);
698 // scoresInfo.SetQuantizationScale(0.01f);
699 // scoresInfo.SetQuantizationOffset(0);
700 // anchorsInfo.SetQuantizationScale(0.5f);
701 // anchorsInfo.SetQuantizationOffset(0);
702 //
703 // std::vector<float> boxEncodings({
704 // 0.0f, 0.0f, 0.0f, 0.0f,
705 // 0.0f, 1.0f, 0.0f, 0.0f,
706 // 0.0f, -1.0f, 0.0f, 0.0f,
707 // 0.0f, 0.0f, 0.0f, 0.0f,
708 // 0.0f, 1.0f, 0.0f, 0.0f,
709 // 0.0f, 0.0f, 0.0f, 0.0f
710 // });
711 // std::vector<float> scores({
712 // 0.0f, 0.9f, 0.8f,
713 // 0.0f, 0.75f, 0.72f,
714 // 0.0f, 0.6f, 0.5f,
715 // 0.0f, 0.93f, 0.95f,
716 // 0.0f, 0.5f, 0.4f,
717 // 0.0f, 0.3f, 0.2f
718 // });
719 // std::vector<float> anchors({
720 // 0.5f, 0.5f, 1.0f, 1.0f,
721 // 0.5f, 0.5f, 1.0f, 1.0f,
722 // 0.5f, 0.5f, 1.0f, 1.0f,
723 // 0.5f, 10.5f, 1.0f, 1.0f,
724 // 0.5f, 10.5f, 1.0f, 1.0f,
725 // 0.5f, 100.5f, 1.0f, 1.0f
726 // });
727 //
728 // std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
729 // std::vector<uint8_t> qScores(scores.size(), 0);
730 // std::vector<uint8_t> qAnchors(anchors.size(), 0);
731 // QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
732 // QuantizeData(qScores.data(), scores.data(), scoresInfo);
733 // QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
734 // DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
735 // qScores, qAnchors,
736 // 1.0f, 1, 0.01f, 0, 0.5f, 0);
737 //}
738 //
739 //TEST_CASE("NeonDetectionPostProcessFastNmsTest")
740 //{
741 // std::vector<float> boxEncodings({
742 // 0.0f, 0.0f, 0.0f, 0.0f,
743 // 0.0f, 1.0f, 0.0f, 0.0f,
744 // 0.0f, -1.0f, 0.0f, 0.0f,
745 // 0.0f, 0.0f, 0.0f, 0.0f,
746 // 0.0f, 1.0f, 0.0f, 0.0f,
747 // 0.0f, 0.0f, 0.0f, 0.0f
748 // });
749 // std::vector<float> scores({
750 // 0.0f, 0.9f, 0.8f,
751 // 0.0f, 0.75f, 0.72f,
752 // 0.0f, 0.6f, 0.5f,
753 // 0.0f, 0.93f, 0.95f,
754 // 0.0f, 0.5f, 0.4f,
755 // 0.0f, 0.3f, 0.2f
756 // });
757 // std::vector<float> anchors({
758 // 0.5f, 0.5f, 1.0f, 1.0f,
759 // 0.5f, 0.5f, 1.0f, 1.0f,
760 // 0.5f, 0.5f, 1.0f, 1.0f,
761 // 0.5f, 10.5f, 1.0f, 1.0f,
762 // 0.5f, 10.5f, 1.0f, 1.0f,
763 // 0.5f, 100.5f, 1.0f, 1.0f
764 // });
765 // DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
766 // boxEncodings,
767 // scores,
768 // anchors);
769 //}
770 //
771 // DISABLED
772 //TEST_CASE("NeonDetectionPostProcessFastNmsUint8Test")
773 //{
774 // armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
775 // armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
776 // armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
777 //
778 // boxEncodingsInfo.SetQuantizationScale(1.0f);
779 // boxEncodingsInfo.SetQuantizationOffset(1);
780 // scoresInfo.SetQuantizationScale(0.01f);
781 // scoresInfo.SetQuantizationOffset(0);
782 // anchorsInfo.SetQuantizationScale(0.5f);
783 // anchorsInfo.SetQuantizationOffset(0);
784 //
785 // std::vector<float> boxEncodings({
786 // 0.0f, 0.0f, 0.0f, 0.0f,
787 // 0.0f, 1.0f, 0.0f, 0.0f,
788 // 0.0f, -1.0f, 0.0f, 0.0f,
789 // 0.0f, 0.0f, 0.0f, 0.0f,
790 // 0.0f, 1.0f, 0.0f, 0.0f,
791 // 0.0f, 0.0f, 0.0f, 0.0f
792 // });
793 // std::vector<float> scores({
794 // 0.0f, 0.9f, 0.8f,
795 // 0.0f, 0.75f, 0.72f,
796 // 0.0f, 0.6f, 0.5f,
797 // 0.0f, 0.93f, 0.95f,
798 // 0.0f, 0.5f, 0.4f,
799 // 0.0f, 0.3f, 0.2f
800 // });
801 // std::vector<float> anchors({
802 // 0.5f, 0.5f, 1.0f, 1.0f,
803 // 0.5f, 0.5f, 1.0f, 1.0f,
804 // 0.5f, 0.5f, 1.0f, 1.0f,
805 // 0.5f, 10.5f, 1.0f, 1.0f,
806 // 0.5f, 10.5f, 1.0f, 1.0f,
807 // 0.5f, 100.5f, 1.0f, 1.0f
808 // });
809 //
810 // std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
811 // std::vector<uint8_t> qScores(scores.size(), 0);
812 // std::vector<uint8_t> qAnchors(anchors.size(), 0);
813 // QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
814 // QuantizeData(qScores.data(), scores.data(), scoresInfo);
815 // QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
816 // DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
817 // qScores, qAnchors,
818 // 1.0f, 1, 0.01f, 0, 0.5f, 0);
819 //}
820
821 TEST_CASE("NeonQLstmEndToEndTest")
822 {
823 QLstmEndToEnd(neonDefaultBackends);
824 }
825
826 }
827