• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <TestUtils.hpp>
9 
10 #include <Graph.hpp>
11 #include <layers/ArgMinMaxLayer.hpp>
12 #include <layers/BatchToSpaceNdLayer.hpp>
13 #include <layers/SpaceToDepthLayer.hpp>
14 #include <layers/PreluLayer.hpp>
15 #include <layers/StackLayer.hpp>
16 
17 #include <doctest/doctest.h>
18 
ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)19 void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor       descriptor,
20                                    const std::vector<armnn::TensorShape>& inputShapes,
21                                    std::vector<armnn::TensorShape>&       outputShapes)
22 {
23     armnn::Graph graph;
24     auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
25     outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
26 }
27 
ArgMinMaxInferOutputShape4dTest()28 void ArgMinMaxInferOutputShape4dTest()
29 {
30     armnn::Graph graph;
31     armnn::ArgMinMaxDescriptor descriptor;
32     descriptor.m_Axis = 2;
33 
34     const std::vector<armnn::TensorShape> inputShapes
35     {
36         { 1, 3, 2, 4 }
37     };
38 
39     std::vector<armnn::TensorShape> outputShapes;
40     CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
41 
42     armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
43     CHECK(outputShapes.size() == 1);
44     CHECK(outputShapes[0] == expectedOutputShape);
45 }
46 
ArgMinMaxInferOutputShape3dTest()47 void ArgMinMaxInferOutputShape3dTest()
48 {
49     armnn::Graph graph;
50     armnn::ArgMinMaxDescriptor descriptor;
51     descriptor.m_Axis = 0;
52 
53     const std::vector<armnn::TensorShape> inputShapes
54     {
55         { 1, 3, 2 }
56     };
57 
58     std::vector<armnn::TensorShape> outputShapes;
59     CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
60 
61     armnn::TensorShape expectedOutputShape( { 3, 2 } );
62     CHECK(outputShapes.size() == 1);
63     CHECK(outputShapes[0] == expectedOutputShape);
64 }
65 
ArgMinMaxInferOutputShape2dTest()66 void ArgMinMaxInferOutputShape2dTest()
67 {
68     armnn::Graph graph;
69     armnn::ArgMinMaxDescriptor descriptor;
70     descriptor.m_Axis = 1;
71 
72     const std::vector<armnn::TensorShape> inputShapes
73     {
74         { 3, 2 }
75     };
76 
77     std::vector<armnn::TensorShape> outputShapes;
78     CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
79 
80     armnn::TensorShape expectedOutputShape( { 3 } );
81     CHECK(outputShapes.size() == 1);
82     CHECK(outputShapes[0] == expectedOutputShape);
83 }
84 
ArgMinMaxInferOutputShape1dTest()85 void ArgMinMaxInferOutputShape1dTest()
86 {
87     armnn::Graph graph;
88     armnn::ArgMinMaxDescriptor descriptor;
89     descriptor.m_Axis = 0;
90 
91     const std::vector<armnn::TensorShape> inputShapes
92     {
93         { 5 }
94     };
95 
96     std::vector<armnn::TensorShape> outputShapes;
97     CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
98 
99     armnn::TensorShape expectedOutputShape( { 1 } );
100     CHECK(outputShapes.size() == 1);
101     CHECK(outputShapes[0] == expectedOutputShape);
102 }
103 
BatchToSpaceInferOutputShapeTest()104 void BatchToSpaceInferOutputShapeTest()
105 {
106     armnn::Graph graph;
107 
108     armnn::BatchToSpaceNdDescriptor descriptor;
109     descriptor.m_BlockShape = {2, 2};
110     descriptor.m_Crops = {{0, 0}, {2, 0}};
111     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
112 
113     armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
114         graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
115 
116     std::vector<armnn::TensorShape> shapes;
117     const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
118     armnn::TensorShape shape(4, theDimSizes.data());
119     shapes.push_back(shape);
120 
121     const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
122     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
123 
124     CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
125 }
126 
SpaceToDepthInferOutputShapeTest()127 void SpaceToDepthInferOutputShapeTest()
128 {
129     armnn::Graph graph;
130 
131     armnn::SpaceToDepthDescriptor descriptor;
132     descriptor.m_BlockSize  = 2;
133     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
134 
135     armnn::SpaceToDepthLayer* const spaceToDepthLayer =
136         graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
137 
138     std::vector<armnn::TensorShape> shapes;
139     const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
140     armnn::TensorShape shape(4, dimSizes.data());
141     shapes.push_back(shape);
142 
143     const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
144     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
145 
146     CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
147 }
148 
PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)149 void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
150                                std::vector<armnn::TensorShape>&       outputShapes)
151 {
152     armnn::Graph graph;
153     armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
154     outputShapes = preluLayer->InferOutputShapes(inputShapes);
155 }
156 
PreluInferOutputShapeSameDimsTest()157 void PreluInferOutputShapeSameDimsTest()
158 {
159     const std::vector<armnn::TensorShape> inputShapes
160     {
161         { 5, 1, 1, 7 }, // Input shape
162         { 5, 4, 3, 1 }  // Alpha shape
163     };
164 
165     const std::vector<armnn::TensorShape> expectedOutputShapes
166     {
167         { 5, 4, 3, 7 }  // Output shape
168     };
169 
170     std::vector<armnn::TensorShape> outputShapes;
171     CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
172 
173     CHECK(outputShapes.size() == 1);
174     CHECK(outputShapes[0] == expectedOutputShapes[0]);
175 }
176 
PreluInferOutputShapeInputBiggerTest()177 void PreluInferOutputShapeInputBiggerTest()
178 {
179     const std::vector<armnn::TensorShape> inputShapes
180     {
181         { 4, 1, 4, 8 }, // Input shape
182         { 5, 4, 1 }     // Alpha shape
183     };
184 
185     const std::vector<armnn::TensorShape> expectedOutputShapes
186     {
187         { 4, 5, 4, 8 } // Output shape
188     };
189 
190     std::vector<armnn::TensorShape> outputShapes;
191     CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
192 
193     CHECK(outputShapes.size() == 1);
194     CHECK(outputShapes[0] == expectedOutputShapes[0]);
195 }
196 
PreluInferOutputShapeAlphaBiggerTest()197 void PreluInferOutputShapeAlphaBiggerTest()
198 {
199     const std::vector<armnn::TensorShape> inputShapes
200     {
201         { 4, 1, 2 },   // Input shape
202         { 5, 4, 3, 1 } // Alpha shape
203     };
204 
205     const std::vector<armnn::TensorShape> expectedOutputShapes
206     {
207         { 5, 4, 3, 2 } // Output shape
208     };
209 
210     std::vector<armnn::TensorShape> outputShapes;
211     CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
212 
213     CHECK(outputShapes.size() == 1);
214     CHECK(outputShapes[0] == expectedOutputShapes[0]);
215 }
216 
PreluInferOutputShapeNoMatchTest()217 void PreluInferOutputShapeNoMatchTest()
218 {
219     const std::vector<armnn::TensorShape> inputShapes
220     {
221         { 4, 1, 2 },   // Input shape
222         { 5, 4, 3, 1 } // Alpha shape
223     };
224 
225     const std::vector<armnn::TensorShape> expectedOutputShapes
226     {
227         { 5, 7, 3, 2 } // Output shape
228     };
229 
230     std::vector<armnn::TensorShape> outputShapes;
231     CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
232 
233     CHECK(outputShapes.size() == 1);
234     CHECK(outputShapes[0] != expectedOutputShapes[0]);
235 }
236 
CreatePreluLayerHelper(armnn::Graph & graph,const armnn::TensorShape & inputShape,const armnn::TensorShape & alphaShape,const armnn::TensorShape & outputShape)237 void CreatePreluLayerHelper(armnn::Graph& graph,
238                             const armnn::TensorShape& inputShape,
239                             const armnn::TensorShape& alphaShape,
240                             const armnn::TensorShape& outputShape)
241 {
242     // Creates the PReLU layer
243     armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
244 
245     // Creates extra layers
246     armnn::Layer* const input  = graph.AddLayer<armnn::InputLayer> (0, "input");
247     armnn::Layer* const alpha  = graph.AddLayer<armnn::InputLayer> (1, "alpha");
248     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
249 
250     // Connects up
251     armnn::TensorInfo inputTensorInfo (inputShape,  armnn::DataType::Float32);
252     armnn::TensorInfo alphaTensorInfo (alphaShape,  armnn::DataType::Float32);
253     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
254     Connect(input, preluLayer,  inputTensorInfo,  0, 0);
255     Connect(alpha, preluLayer,  alphaTensorInfo,  0, 1);
256     Connect(preluLayer, output, outputTensorInfo, 0, 0);
257 }
258 
PreluValidateTensorShapesFromInputsMatchTest()259 void PreluValidateTensorShapesFromInputsMatchTest()
260 {
261     armnn::Graph graph;
262 
263     // Creates the PReLU layer
264     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
265 
266     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
267     CHECK_NOTHROW(graph.InferTensorInfos());
268 }
269 
PreluValidateTensorShapesFromInputsNoMatchTest()270 void PreluValidateTensorShapesFromInputsNoMatchTest()
271 {
272     armnn::Graph graph;
273 
274     // Creates the PReLU layer
275     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
276 
277     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
278     CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
279 }
280 
StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)281 void StackInferOutputShapeImpl(const armnn::StackDescriptor           descriptor,
282                                const std::vector<armnn::TensorShape>& inputShapes,
283                                std::vector<armnn::TensorShape>&       outputShapes)
284 {
285     armnn::Graph graph;
286     armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
287     outputShapes = stackLayer->InferOutputShapes(inputShapes);
288 }
289 
StackInferOutputShapeFromInputsMatchTest()290 void StackInferOutputShapeFromInputsMatchTest()
291 {
292     armnn::Graph graph;
293 
294     armnn::StackDescriptor descriptor;
295     descriptor.m_Axis = 1;
296     descriptor.m_NumInputs = 3;
297     descriptor.m_InputShape = armnn::TensorShape
298     (
299         { 4, 2 }  // Defined input shape
300     );
301 
302     const std::vector<armnn::TensorShape> inputShapes
303     {
304         { 4, 2 }, // Actual input shapes
305         { 4, 2 },
306         { 4, 2 }
307     };
308 
309     std::vector<armnn::TensorShape> outputShapes;
310     CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
311 
312     armnn::TensorShape expectedOutputShape
313     (
314         { 4, 3, 2 }
315     );
316     CHECK(outputShapes.size() == 1);
317     CHECK(outputShapes[0] == expectedOutputShape);
318 }
319 
StackInferOutputShapeFromInputsNoMatchTest()320 void StackInferOutputShapeFromInputsNoMatchTest()
321 {
322     armnn::Graph graph;
323 
324     armnn::StackDescriptor descriptor;
325     descriptor.m_Axis = 1;
326     descriptor.m_NumInputs = 3;
327     descriptor.m_InputShape = armnn::TensorShape
328     (
329         { 4, 2 }  // Defined input shape
330     );
331 
332     const std::vector<armnn::TensorShape> inputShapes
333     {
334         { 4, 2 }, // Actual input shapes
335         { 4, 5 }, // Incorrectly shaped input tensor
336         { 4, 2 }
337     };
338 
339     // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
340     std::vector<armnn::TensorShape> outputShapes;
341     CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
342 
343     armnn::TensorShape expectedOutputShape
344     (
345         { 4, 3, 2 }
346     );
347     CHECK(outputShapes.size() == 1);
348     CHECK(outputShapes[0] == expectedOutputShape);
349 }
350 
CreateStackLayerHelper(armnn::Graph & graph,const armnn::StackDescriptor & descriptor,const std::vector<armnn::TensorShape> & inputShapes,const armnn::TensorShape & outputShape)351 void CreateStackLayerHelper(armnn::Graph& graph,
352                             const armnn::StackDescriptor& descriptor,
353                             const std::vector<armnn::TensorShape>& inputShapes,
354                             const armnn::TensorShape& outputShape)
355 {
356     // Creates the Stack layer
357     armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
358 
359     // Creates extra layers
360     std::vector<armnn::Layer*> inputs;
361     for (unsigned int i=0; i<inputShapes.size(); ++i)
362     {
363         inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
364     }
365     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
366 
367     // Connects up
368     std::vector<armnn::TensorInfo> inputTensorInfos;
369     for (unsigned int i=0; i<inputs.size(); ++i)
370     {
371         inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
372     }
373     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
374 
375     for (unsigned int i=0; i<inputs.size(); ++i)
376     {
377         Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
378     }
379     Connect(stackLayer, output, outputTensorInfo, 0, 0);
380 }
381 
StackValidateTensorShapesFromInputsMatchTest()382 void StackValidateTensorShapesFromInputsMatchTest()
383 {
384     armnn::Graph graph;
385 
386     armnn::StackDescriptor descriptor;
387     descriptor.m_Axis = 0;
388     descriptor.m_NumInputs = 3;
389     descriptor.m_InputShape = armnn::TensorShape
390     (
391         { 2, 5 }  // Defined input shape
392     );
393 
394     const std::vector<armnn::TensorShape> inputShapes
395     {
396         { 2, 5 }, // Actual input shapes
397         { 2, 5 },
398         { 2, 5 }
399     };
400 
401     // Creates the Stack layer
402     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
403 
404     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
405     CHECK_NOTHROW(graph.InferTensorInfos());
406 }
407 
StackValidateTensorShapesFromInputsNoMatchTest()408 void StackValidateTensorShapesFromInputsNoMatchTest()
409 {
410     armnn::Graph graph;
411 
412     armnn::StackDescriptor descriptor;
413     descriptor.m_Axis = 0;
414     descriptor.m_NumInputs = 3;
415     descriptor.m_InputShape = armnn::TensorShape
416     (
417         { 2, 5 }  // Defined input shape
418     );
419 
420     const std::vector<armnn::TensorShape> inputShapes
421     {
422         { 2, 5 }, // Actual input shapes
423         { 2, 2 }, // Incorrectly shaped input tensor
424         { 2, 5 }
425     };
426 
427     // Creates the Stack layer
428     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
429 
430     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
431     CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
432 }
433 
Convolution2dInferOutputShapeTest()434 void Convolution2dInferOutputShapeTest()
435 {
436     armnn::Graph graph;
437 
438     armnn::Convolution2dDescriptor descriptor;
439     descriptor.m_DilationX = 2;
440     descriptor.m_DilationY = 2;
441     descriptor.m_PadTop = 1;
442     descriptor.m_PadBottom = 1;
443     descriptor.m_PadLeft = 1;
444     descriptor.m_PadRight = 1;
445     descriptor.m_StrideX = 3;
446     descriptor.m_StrideY = 3;
447     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
448 
449     armnn::Convolution2dLayer* const convolution2dLayer =
450             graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "convolution2d");
451 
452     std::vector<armnn::TensorShape> shapes;
453     const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
454     armnn::TensorShape inputShape(4, inputSize.data());
455     shapes.push_back(inputShape);
456 
457     const std::vector<unsigned int> filterSize = { 1, 2, 2, 2};
458     armnn::TensorShape filterShape(4, filterSize.data());
459     shapes.push_back(filterShape);
460 
461     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
462     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
463 
464     CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
465 }
466 
Convolution3dInferOutputShapeTest()467 void Convolution3dInferOutputShapeTest()
468 {
469     armnn::Graph graph;
470 
471     armnn::Convolution3dDescriptor descriptor;
472     descriptor.m_DilationX = 1;
473     descriptor.m_DilationY = 1;
474     descriptor.m_DilationZ = 1;
475     descriptor.m_PadTop = 1;
476     descriptor.m_PadBottom = 1;
477     descriptor.m_PadLeft = 1;
478     descriptor.m_PadRight = 1;
479     descriptor.m_PadFront = 1;
480     descriptor.m_PadBack = 1;
481     descriptor.m_StrideX = 2;
482     descriptor.m_StrideY = 2;
483     descriptor.m_StrideZ = 2;
484     descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
485 
486     armnn::Convolution3dLayer* const convolution3dLayer =
487             graph.AddLayer<armnn::Convolution3dLayer>(descriptor, "convolution3d");
488 
489     std::vector<armnn::TensorShape> shapes;
490     const std::vector<unsigned int> inputSize = {1, 5, 5, 5, 1};
491     armnn::TensorShape inputShape(5, inputSize.data());
492     shapes.push_back(inputShape);
493 
494     const std::vector<unsigned int> filterSize = {3, 3, 3, 1, 1 };
495     armnn::TensorShape filterShape(5, filterSize.data());
496     shapes.push_back(filterShape);
497 
498     const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
499     armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
500 
501     CHECK(expectedOutputShape == convolution3dLayer->InferOutputShapes(shapes).at(0));
502 }
503 
TransposeConvolution2dInferOutputShapeTest()504 void TransposeConvolution2dInferOutputShapeTest()
505 {
506     armnn::Graph graph;
507 
508     armnn::TransposeConvolution2dDescriptor descriptor;
509     descriptor.m_PadTop = 0;
510     descriptor.m_PadBottom = 1;
511     descriptor.m_PadLeft = 0;
512     descriptor.m_PadRight = 1;
513     descriptor.m_StrideX = 2;
514     descriptor.m_StrideY = 2;
515     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
516 
517     armnn::TransposeConvolution2dLayer* const transposeConvolution2dLayer =
518             graph.AddLayer<armnn::TransposeConvolution2dLayer>(descriptor, "TransposeConvolution2d");
519 
520     std::vector<armnn::TensorShape> shapes;
521     const std::vector<unsigned int> inputSize = {1, 2, 3, 3};
522     armnn::TensorShape inputShape(4, inputSize.data());
523     shapes.push_back(inputShape);
524 
525     const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
526     armnn::TensorShape filterShape(4, filterSize.data());
527     shapes.push_back(filterShape);
528 
529     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
530     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
531 
532     CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
533 }
534 
DepthwiseConvolution2dInferOutputShapeTest()535 void DepthwiseConvolution2dInferOutputShapeTest()
536 {
537     armnn::Graph graph;
538 
539     armnn::DepthwiseConvolution2dDescriptor descriptor;
540     descriptor.m_DilationX = 3;
541     descriptor.m_DilationY = 3;
542     descriptor.m_PadTop = 1;
543     descriptor.m_PadBottom = 2;
544     descriptor.m_PadLeft = 1;
545     descriptor.m_PadRight = 2;
546     descriptor.m_StrideX = 2;
547     descriptor.m_StrideY = 2;
548     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
549 
550     armnn::DepthwiseConvolution2dLayer* const depthwiseConvolution2dLayer =
551             graph.AddLayer<armnn::DepthwiseConvolution2dLayer>(descriptor, "DepthwiseConvolution2d");
552 
553     std::vector<armnn::TensorShape> shapes;
554     const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
555     armnn::TensorShape inputShape(4, inputSize.data());
556     shapes.push_back(inputShape);
557 
558     const std::vector<unsigned int> filterSize = { 1, 3, 3, 2 };
559     armnn::TensorShape filterShape(4, filterSize.data());
560     shapes.push_back(filterShape);
561 
562     const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
563     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
564 
565     CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
566 }
567 
Pooling3dInferOutputShapeTest()568 void Pooling3dInferOutputShapeTest()
569 {
570     armnn::Graph graph;
571 
572     armnn::Pooling3dDescriptor descriptor;
573     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
574     descriptor.m_PoolDepth = 2;
575     descriptor.m_PoolHeight = 2;
576     descriptor.m_PoolWidth = 2;
577     descriptor.m_PadTop = 1;
578     descriptor.m_PadBottom = 1;
579     descriptor.m_PadLeft = 1;
580     descriptor.m_PadRight = 1;
581     descriptor.m_PadFront = 1;
582     descriptor.m_PadBack = 1;
583     descriptor.m_StrideX = 2;
584     descriptor.m_StrideY = 2;
585     descriptor.m_StrideZ = 2;
586     descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
587 
588     armnn::Pooling3dLayer* const pooling3dLayer =
589             graph.AddLayer<armnn::Pooling3dLayer>(descriptor, "pooling3d");
590 
591     std::vector<armnn::TensorShape> shapes;
592     const std::vector<unsigned int> inputSize = {1, 4, 4, 4, 1};
593     armnn::TensorShape inputShape(5, inputSize.data());
594     shapes.push_back(inputShape);
595 
596     const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
597     armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
598 
599     CHECK(expectedOutputShape == pooling3dLayer->InferOutputShapes(shapes).at(0));
600 }
601 
602 // QLstm
QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)603 void QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,
604                                const std::vector<armnn::TensorShape>& inputShapes,
605                                std::vector<armnn::TensorShape>& outputShapes)
606 {
607     armnn::Graph graph;
608     armnn::QLstmLayer* const qLstmLayer = graph.AddLayer<armnn::QLstmLayer>(descriptor, "qLstm");
609     outputShapes = qLstmLayer->InferOutputShapes(inputShapes);
610 }
611 
QLstmInferOutputShapeTest()612 void QLstmInferOutputShapeTest()
613 {
614     armnn::QLstmDescriptor descriptor;
615     descriptor.m_PeepholeEnabled = true;
616     descriptor.m_CifgEnabled = false;
617     descriptor.m_ProjectionEnabled = false;
618 
619     // Input shapes
620     const std::vector<unsigned int> inputShape{ 2, 5 };
621     const std::vector<unsigned int> previousOutputInShape{ 2, 4 };
622     const std::vector<unsigned int> previousCellStateInShape{ 2, 4 };
623 
624     armnn::TensorShape inputTensorShape(2, inputShape.data());
625     armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
626     armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
627 
628     std::vector<armnn::TensorShape> inShapes
629     {
630         inputTensorShape,
631         previousOutputInTensorShape,
632         previousCellStateInTensorShape
633     };
634 
635     // Output shapes
636     const std::vector<unsigned int> outputStateOutShape{ 2, 4 };
637     const std::vector<unsigned int> cellStateOutShape{ 2, 4 };
638     const std::vector<unsigned int> outputShape{ 2, 4 };
639     armnn::TensorShape outputStateOutTensorShape(2, outputShape.data());
640     armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
641     armnn::TensorShape outputTensorShape(2, outputShape.data());
642 
643     std::vector<armnn::TensorShape> expectedOutShapes
644     {
645         outputStateOutTensorShape,
646         cellStateOutTensorShape,
647         outputTensorShape
648     };
649 
650     std::vector<armnn::TensorShape> actualOutShapes;
651     CHECK_NOTHROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
652 
653     CHECK(actualOutShapes.size() == 3);
654     CHECK(expectedOutShapes[0] == actualOutShapes[0]);
655     CHECK(expectedOutShapes[1] == actualOutShapes[1]);
656     CHECK(expectedOutShapes[2] == actualOutShapes[2]);
657 }
658 
659 // QuantizedLstm
QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)660 void QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
661                                        std::vector<armnn::TensorShape>& outputShapes)
662 {
663     armnn::Graph graph;
664     armnn::QuantizedLstmLayer* const quantizedLstmLayer = graph.AddLayer<armnn::QuantizedLstmLayer>("quantizedLstm");
665     outputShapes = quantizedLstmLayer->InferOutputShapes(inputShapes);
666 }
667 
QuantizedLstmInferOutputShapeTest()668 void QuantizedLstmInferOutputShapeTest()
669 {
670     // Input shapes
671     const std::vector<unsigned int> inputShape{ 2, 5 };
672     const std::vector<unsigned int> previousCellStateInShape{ 2, 10 };
673     const std::vector<unsigned int> previousOutputInShape{ 2, 10 };
674     armnn::TensorShape inputTensorShape(2, inputShape.data());
675     armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
676     armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
677 
678     std::vector<armnn::TensorShape> inShapes
679     {
680         inputTensorShape,
681         previousCellStateInTensorShape,
682         previousOutputInTensorShape
683     };
684 
685     // Output shapes
686     const std::vector<unsigned int> cellStateOutShape{ 2, 10 };
687     const std::vector<unsigned int> outputShape{ 2, 10 };
688     armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
689     armnn::TensorShape outputTensorShape(2, outputShape.data());
690 
691     std::vector<armnn::TensorShape> expectedOutShapes
692     {
693         cellStateOutTensorShape,
694         outputTensorShape
695     };
696 
697     std::vector<armnn::TensorShape> actualOutShapes;
698     CHECK_NOTHROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
699 
700     CHECK(actualOutShapes.size() == 2);
701     CHECK(expectedOutShapes[0] == actualOutShapes[0]);
702     CHECK(expectedOutShapes[1] == actualOutShapes[1]);
703 }