• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <Graph.hpp>
11 #include <layers/ArgMinMaxLayer.hpp>
12 #include <layers/BatchToSpaceNdLayer.hpp>
13 #include <layers/SpaceToDepthLayer.hpp>
14 #include <layers/PreluLayer.hpp>
15 #include <layers/StackLayer.hpp>
16 
17 #include <boost/test/unit_test.hpp>
18 
ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)19 void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor       descriptor,
20                                    const std::vector<armnn::TensorShape>& inputShapes,
21                                    std::vector<armnn::TensorShape>&       outputShapes)
22 {
23     armnn::Graph graph;
24     auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
25     outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
26 }
27 
ArgMinMaxInferOutputShape4dTest()28 void ArgMinMaxInferOutputShape4dTest()
29 {
30     armnn::Graph graph;
31     armnn::ArgMinMaxDescriptor descriptor;
32     descriptor.m_Axis = 2;
33 
34     const std::vector<armnn::TensorShape> inputShapes
35     {
36         { 1, 3, 2, 4 }
37     };
38 
39     std::vector<armnn::TensorShape> outputShapes;
40     BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
41 
42     armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
43     BOOST_CHECK(outputShapes.size() == 1);
44     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
45 }
46 
ArgMinMaxInferOutputShape3dTest()47 void ArgMinMaxInferOutputShape3dTest()
48 {
49     armnn::Graph graph;
50     armnn::ArgMinMaxDescriptor descriptor;
51     descriptor.m_Axis = 0;
52 
53     const std::vector<armnn::TensorShape> inputShapes
54     {
55         { 1, 3, 2 }
56     };
57 
58     std::vector<armnn::TensorShape> outputShapes;
59     BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
60 
61     armnn::TensorShape expectedOutputShape( { 3, 2 } );
62     BOOST_CHECK(outputShapes.size() == 1);
63     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
64 }
65 
ArgMinMaxInferOutputShape2dTest()66 void ArgMinMaxInferOutputShape2dTest()
67 {
68     armnn::Graph graph;
69     armnn::ArgMinMaxDescriptor descriptor;
70     descriptor.m_Axis = 1;
71 
72     const std::vector<armnn::TensorShape> inputShapes
73     {
74         { 3, 2 }
75     };
76 
77     std::vector<armnn::TensorShape> outputShapes;
78     BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
79 
80     armnn::TensorShape expectedOutputShape( { 3 } );
81     BOOST_CHECK(outputShapes.size() == 1);
82     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
83 }
84 
ArgMinMaxInferOutputShape1dTest()85 void ArgMinMaxInferOutputShape1dTest()
86 {
87     armnn::Graph graph;
88     armnn::ArgMinMaxDescriptor descriptor;
89     descriptor.m_Axis = 0;
90 
91     const std::vector<armnn::TensorShape> inputShapes
92     {
93         { 5 }
94     };
95 
96     std::vector<armnn::TensorShape> outputShapes;
97     BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
98 
99     armnn::TensorShape expectedOutputShape( { 1 } );
100     BOOST_CHECK(outputShapes.size() == 1);
101     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
102 }
103 
BatchToSpaceInferOutputShapeTest()104 void BatchToSpaceInferOutputShapeTest()
105 {
106     armnn::Graph graph;
107 
108     armnn::BatchToSpaceNdDescriptor descriptor;
109     descriptor.m_BlockShape = {2, 2};
110     descriptor.m_Crops = {{0, 0}, {2, 0}};
111     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
112 
113     armnn::BatchToSpaceNdLayer* const batchToSpaceLayer =
114         graph.AddLayer<armnn::BatchToSpaceNdLayer>(descriptor, "batchToSpace");
115 
116     std::vector<armnn::TensorShape> shapes;
117     const std::vector<unsigned int> theDimSizes = {8, 1, 3, 1};
118     armnn::TensorShape shape(4, theDimSizes.data());
119     shapes.push_back(shape);
120 
121     const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
122     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
123 
124     BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
125 }
126 
SpaceToDepthInferOutputShapeTest()127 void SpaceToDepthInferOutputShapeTest()
128 {
129     armnn::Graph graph;
130 
131     armnn::SpaceToDepthDescriptor descriptor;
132     descriptor.m_BlockSize  = 2;
133     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
134 
135     armnn::SpaceToDepthLayer* const spaceToDepthLayer =
136         graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
137 
138     std::vector<armnn::TensorShape> shapes;
139     const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
140     armnn::TensorShape shape(4, dimSizes.data());
141     shapes.push_back(shape);
142 
143     const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
144     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
145 
146     BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
147 }
148 
PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)149 void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
150                                std::vector<armnn::TensorShape>&       outputShapes)
151 {
152     armnn::Graph graph;
153     armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
154     outputShapes = preluLayer->InferOutputShapes(inputShapes);
155 }
156 
PreluInferOutputShapeSameDimsTest()157 void PreluInferOutputShapeSameDimsTest()
158 {
159     const std::vector<armnn::TensorShape> inputShapes
160     {
161         { 5, 1, 1, 7 }, // Input shape
162         { 5, 4, 3, 1 }  // Alpha shape
163     };
164 
165     const std::vector<armnn::TensorShape> expectedOutputShapes
166     {
167         { 5, 4, 3, 7 }  // Output shape
168     };
169 
170     std::vector<armnn::TensorShape> outputShapes;
171     BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
172 
173     BOOST_CHECK(outputShapes.size() == 1);
174     BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
175 }
176 
PreluInferOutputShapeInputBiggerTest()177 void PreluInferOutputShapeInputBiggerTest()
178 {
179     const std::vector<armnn::TensorShape> inputShapes
180     {
181         { 4, 1, 4, 8 }, // Input shape
182         { 5, 4, 1 }     // Alpha shape
183     };
184 
185     const std::vector<armnn::TensorShape> expectedOutputShapes
186     {
187         { 4, 5, 4, 8 } // Output shape
188     };
189 
190     std::vector<armnn::TensorShape> outputShapes;
191     BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
192 
193     BOOST_CHECK(outputShapes.size() == 1);
194     BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
195 }
196 
PreluInferOutputShapeAlphaBiggerTest()197 void PreluInferOutputShapeAlphaBiggerTest()
198 {
199     const std::vector<armnn::TensorShape> inputShapes
200     {
201         { 4, 1, 2 },   // Input shape
202         { 5, 4, 3, 1 } // Alpha shape
203     };
204 
205     const std::vector<armnn::TensorShape> expectedOutputShapes
206     {
207         { 5, 4, 3, 2 } // Output shape
208     };
209 
210     std::vector<armnn::TensorShape> outputShapes;
211     BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
212 
213     BOOST_CHECK(outputShapes.size() == 1);
214     BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
215 }
216 
PreluInferOutputShapeNoMatchTest()217 void PreluInferOutputShapeNoMatchTest()
218 {
219     const std::vector<armnn::TensorShape> inputShapes
220     {
221         { 4, 1, 2 },   // Input shape
222         { 5, 4, 3, 1 } // Alpha shape
223     };
224 
225     const std::vector<armnn::TensorShape> expectedOutputShapes
226     {
227         { 5, 7, 3, 2 } // Output shape
228     };
229 
230     std::vector<armnn::TensorShape> outputShapes;
231     BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
232 
233     BOOST_CHECK(outputShapes.size() == 1);
234     BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
235 }
236 
CreatePreluLayerHelper(armnn::Graph & graph,const armnn::TensorShape & inputShape,const armnn::TensorShape & alphaShape,const armnn::TensorShape & outputShape)237 void CreatePreluLayerHelper(armnn::Graph& graph,
238                             const armnn::TensorShape& inputShape,
239                             const armnn::TensorShape& alphaShape,
240                             const armnn::TensorShape& outputShape)
241 {
242     // Creates the PReLU layer
243     armnn::Layer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
244 
245     // Creates extra layers
246     armnn::Layer* const input  = graph.AddLayer<armnn::InputLayer> (0, "input");
247     armnn::Layer* const alpha  = graph.AddLayer<armnn::InputLayer> (1, "alpha");
248     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
249 
250     // Connects up
251     armnn::TensorInfo inputTensorInfo (inputShape,  armnn::DataType::Float32);
252     armnn::TensorInfo alphaTensorInfo (alphaShape,  armnn::DataType::Float32);
253     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
254     Connect(input, preluLayer,  inputTensorInfo,  0, 0);
255     Connect(alpha, preluLayer,  alphaTensorInfo,  0, 1);
256     Connect(preluLayer, output, outputTensorInfo, 0, 0);
257 }
258 
PreluValidateTensorShapesFromInputsMatchTest()259 void PreluValidateTensorShapesFromInputsMatchTest()
260 {
261     armnn::Graph graph;
262 
263     // Creates the PReLU layer
264     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
265 
266     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
267     BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
268 }
269 
PreluValidateTensorShapesFromInputsNoMatchTest()270 void PreluValidateTensorShapesFromInputsNoMatchTest()
271 {
272     armnn::Graph graph;
273 
274     // Creates the PReLU layer
275     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
276 
277     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
278     BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
279 }
280 
StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)281 void StackInferOutputShapeImpl(const armnn::StackDescriptor           descriptor,
282                                const std::vector<armnn::TensorShape>& inputShapes,
283                                std::vector<armnn::TensorShape>&       outputShapes)
284 {
285     armnn::Graph graph;
286     armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
287     outputShapes = stackLayer->InferOutputShapes(inputShapes);
288 }
289 
StackInferOutputShapeFromInputsMatchTest()290 void StackInferOutputShapeFromInputsMatchTest()
291 {
292     armnn::Graph graph;
293 
294     armnn::StackDescriptor descriptor;
295     descriptor.m_Axis = 1;
296     descriptor.m_NumInputs = 3;
297     descriptor.m_InputShape = armnn::TensorShape
298     (
299         { 4, 2 }  // Defined input shape
300     );
301 
302     const std::vector<armnn::TensorShape> inputShapes
303     {
304         { 4, 2 }, // Actual input shapes
305         { 4, 2 },
306         { 4, 2 }
307     };
308 
309     std::vector<armnn::TensorShape> outputShapes;
310     BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
311 
312     armnn::TensorShape expectedOutputShape
313     (
314         { 4, 3, 2 }
315     );
316     BOOST_CHECK(outputShapes.size() == 1);
317     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
318 }
319 
StackInferOutputShapeFromInputsNoMatchTest()320 void StackInferOutputShapeFromInputsNoMatchTest()
321 {
322     armnn::Graph graph;
323 
324     armnn::StackDescriptor descriptor;
325     descriptor.m_Axis = 1;
326     descriptor.m_NumInputs = 3;
327     descriptor.m_InputShape = armnn::TensorShape
328     (
329         { 4, 2 }  // Defined input shape
330     );
331 
332     const std::vector<armnn::TensorShape> inputShapes
333     {
334         { 4, 2 }, // Actual input shapes
335         { 4, 5 }, // Incorrectly shaped input tensor
336         { 4, 2 }
337     };
338 
339     // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
340     std::vector<armnn::TensorShape> outputShapes;
341     BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
342 
343     armnn::TensorShape expectedOutputShape
344     (
345         { 4, 3, 2 }
346     );
347     BOOST_CHECK(outputShapes.size() == 1);
348     BOOST_CHECK(outputShapes[0] == expectedOutputShape);
349 }
350 
CreateStackLayerHelper(armnn::Graph & graph,const armnn::StackDescriptor & descriptor,const std::vector<armnn::TensorShape> & inputShapes,const armnn::TensorShape & outputShape)351 void CreateStackLayerHelper(armnn::Graph& graph,
352                             const armnn::StackDescriptor& descriptor,
353                             const std::vector<armnn::TensorShape>& inputShapes,
354                             const armnn::TensorShape& outputShape)
355 {
356     // Creates the Stack layer
357     armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
358 
359     // Creates extra layers
360     std::vector<armnn::Layer*> inputs;
361     for (unsigned int i=0; i<inputShapes.size(); ++i)
362     {
363         inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
364     }
365     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
366 
367     // Connects up
368     std::vector<armnn::TensorInfo> inputTensorInfos;
369     for (unsigned int i=0; i<inputs.size(); ++i)
370     {
371         inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
372     }
373     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
374 
375     for (unsigned int i=0; i<inputs.size(); ++i)
376     {
377         Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
378     }
379     Connect(stackLayer, output, outputTensorInfo, 0, 0);
380 }
381 
StackValidateTensorShapesFromInputsMatchTest()382 void StackValidateTensorShapesFromInputsMatchTest()
383 {
384     armnn::Graph graph;
385 
386     armnn::StackDescriptor descriptor;
387     descriptor.m_Axis = 0;
388     descriptor.m_NumInputs = 3;
389     descriptor.m_InputShape = armnn::TensorShape
390     (
391         { 2, 5 }  // Defined input shape
392     );
393 
394     const std::vector<armnn::TensorShape> inputShapes
395     {
396         { 2, 5 }, // Actual input shapes
397         { 2, 5 },
398         { 2, 5 }
399     };
400 
401     // Creates the Stack layer
402     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
403 
404     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
405     BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
406 }
407 
StackValidateTensorShapesFromInputsNoMatchTest()408 void StackValidateTensorShapesFromInputsNoMatchTest()
409 {
410     armnn::Graph graph;
411 
412     armnn::StackDescriptor descriptor;
413     descriptor.m_Axis = 0;
414     descriptor.m_NumInputs = 3;
415     descriptor.m_InputShape = armnn::TensorShape
416     (
417         { 2, 5 }  // Defined input shape
418     );
419 
420     const std::vector<armnn::TensorShape> inputShapes
421     {
422         { 2, 5 }, // Actual input shapes
423         { 2, 2 }, // Incorrectly shaped input tensor
424         { 2, 5 }
425     };
426 
427     // Creates the Stack layer
428     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
429 
430     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
431     BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
432 }
433 
Convolution2dInferOutputShapeTest()434 void Convolution2dInferOutputShapeTest()
435 {
436     armnn::Graph graph;
437 
438     armnn::Convolution2dDescriptor descriptor;
439     descriptor.m_DilationX = 2;
440     descriptor.m_DilationY = 2;
441     descriptor.m_PadTop = 1;
442     descriptor.m_PadBottom = 1;
443     descriptor.m_PadLeft = 1;
444     descriptor.m_PadRight = 1;
445     descriptor.m_StrideX = 3;
446     descriptor.m_StrideY = 3;
447     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
448 
449     armnn::Convolution2dLayer* const convolution2dLayer =
450             graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "convolution2d");
451 
452     std::vector<armnn::TensorShape> shapes;
453     const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
454     armnn::TensorShape inputShape(4, inputSize.data());
455     shapes.push_back(inputShape);
456 
457     const std::vector<unsigned int> filterSize = { 1, 2, 2, 2};
458     armnn::TensorShape filterShape(4, filterSize.data());
459     shapes.push_back(filterShape);
460 
461     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
462     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
463 
464     BOOST_CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
465 }
466 
TransposeConvolution2dInferOutputShapeTest()467 void TransposeConvolution2dInferOutputShapeTest()
468 {
469     armnn::Graph graph;
470 
471     armnn::TransposeConvolution2dDescriptor descriptor;
472     descriptor.m_PadTop = 0;
473     descriptor.m_PadBottom = 1;
474     descriptor.m_PadLeft = 0;
475     descriptor.m_PadRight = 1;
476     descriptor.m_StrideX = 2;
477     descriptor.m_StrideY = 2;
478     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
479 
480     armnn::TransposeConvolution2dLayer* const transposeConvolution2dLayer =
481             graph.AddLayer<armnn::TransposeConvolution2dLayer>(descriptor, "TransposeConvolution2d");
482 
483     std::vector<armnn::TensorShape> shapes;
484     const std::vector<unsigned int> inputSize = {1, 2, 3, 3};
485     armnn::TensorShape inputShape(4, inputSize.data());
486     shapes.push_back(inputShape);
487 
488     const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
489     armnn::TensorShape filterShape(4, filterSize.data());
490     shapes.push_back(filterShape);
491 
492     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
493     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
494 
495     BOOST_CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
496 }
497 
DepthwiseConvolution2dInferOutputShapeTest()498 void DepthwiseConvolution2dInferOutputShapeTest()
499 {
500     armnn::Graph graph;
501 
502     armnn::DepthwiseConvolution2dDescriptor descriptor;
503     descriptor.m_DilationX = 3;
504     descriptor.m_DilationY = 3;
505     descriptor.m_PadTop = 1;
506     descriptor.m_PadBottom = 2;
507     descriptor.m_PadLeft = 1;
508     descriptor.m_PadRight = 2;
509     descriptor.m_StrideX = 2;
510     descriptor.m_StrideY = 2;
511     descriptor.m_DataLayout = armnn::DataLayout::NCHW;
512 
513     armnn::DepthwiseConvolution2dLayer* const depthwiseConvolution2dLayer =
514             graph.AddLayer<armnn::DepthwiseConvolution2dLayer>(descriptor, "DepthwiseConvolution2d");
515 
516     std::vector<armnn::TensorShape> shapes;
517     const std::vector<unsigned int> inputSize = {1, 2, 10, 10};
518     armnn::TensorShape inputShape(4, inputSize.data());
519     shapes.push_back(inputShape);
520 
521     const std::vector<unsigned int> filterSize = { 1, 2, 3, 3};
522     armnn::TensorShape filterShape(4, filterSize.data());
523     shapes.push_back(filterShape);
524 
525     const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
526     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
527 
528     BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
529 }
530 
531 // QLstm
QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)532 void QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,
533                                const std::vector<armnn::TensorShape>& inputShapes,
534                                std::vector<armnn::TensorShape>& outputShapes)
535 {
536     armnn::Graph graph;
537     armnn::QLstmLayer* const qLstmLayer = graph.AddLayer<armnn::QLstmLayer>(descriptor, "qLstm");
538     outputShapes = qLstmLayer->InferOutputShapes(inputShapes);
539 }
540 
QLstmInferOutputShapeTest()541 void QLstmInferOutputShapeTest()
542 {
543     armnn::QLstmDescriptor descriptor;
544     descriptor.m_PeepholeEnabled = true;
545     descriptor.m_CifgEnabled = false;
546     descriptor.m_ProjectionEnabled = false;
547 
548     // Input shapes
549     const std::vector<unsigned int> inputShape{ 2, 5 };
550     const std::vector<unsigned int> previousOutputInShape{ 2, 4 };
551     const std::vector<unsigned int> previousCellStateInShape{ 2, 4 };
552 
553     armnn::TensorShape inputTensorShape(2, inputShape.data());
554     armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
555     armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
556 
557     std::vector<armnn::TensorShape> inShapes
558     {
559         inputTensorShape,
560         previousOutputInTensorShape,
561         previousCellStateInTensorShape
562     };
563 
564     // Output shapes
565     const std::vector<unsigned int> outputStateOutShape{ 2, 4 };
566     const std::vector<unsigned int> cellStateOutShape{ 2, 4 };
567     const std::vector<unsigned int> outputShape{ 2, 4 };
568     armnn::TensorShape outputStateOutTensorShape(2, outputShape.data());
569     armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
570     armnn::TensorShape outputTensorShape(2, outputShape.data());
571 
572     std::vector<armnn::TensorShape> expectedOutShapes
573     {
574         outputStateOutTensorShape,
575         cellStateOutTensorShape,
576         outputTensorShape
577     };
578 
579     std::vector<armnn::TensorShape> actualOutShapes;
580     BOOST_CHECK_NO_THROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
581 
582     BOOST_CHECK(actualOutShapes.size() == 3);
583     BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
584     BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
585     BOOST_CHECK(expectedOutShapes[2] == actualOutShapes[2]);
586 }
587 
588 // QuantizedLstm
QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape> & inputShapes,std::vector<armnn::TensorShape> & outputShapes)589 void QuantizedLstmInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
590                                        std::vector<armnn::TensorShape>& outputShapes)
591 {
592     armnn::Graph graph;
593     armnn::QuantizedLstmLayer* const quantizedLstmLayer = graph.AddLayer<armnn::QuantizedLstmLayer>("quantizedLstm");
594     outputShapes = quantizedLstmLayer->InferOutputShapes(inputShapes);
595 }
596 
QuantizedLstmInferOutputShapeTest()597 void QuantizedLstmInferOutputShapeTest()
598 {
599     // Input shapes
600     const std::vector<unsigned int> inputShape{ 2, 5 };
601     const std::vector<unsigned int> previousCellStateInShape{ 2, 10 };
602     const std::vector<unsigned int> previousOutputInShape{ 2, 10 };
603     armnn::TensorShape inputTensorShape(2, inputShape.data());
604     armnn::TensorShape previousCellStateInTensorShape(2, previousCellStateInShape.data());
605     armnn::TensorShape previousOutputInTensorShape(2, previousOutputInShape.data());
606 
607     std::vector<armnn::TensorShape> inShapes
608     {
609         inputTensorShape,
610         previousCellStateInTensorShape,
611         previousOutputInTensorShape
612     };
613 
614     // Output shapes
615     const std::vector<unsigned int> cellStateOutShape{ 2, 10 };
616     const std::vector<unsigned int> outputShape{ 2, 10 };
617     armnn::TensorShape cellStateOutTensorShape(2, cellStateOutShape.data());
618     armnn::TensorShape outputTensorShape(2, outputShape.data());
619 
620     std::vector<armnn::TensorShape> expectedOutShapes
621     {
622         cellStateOutTensorShape,
623         outputTensorShape
624     };
625 
626     std::vector<armnn::TensorShape> actualOutShapes;
627     BOOST_CHECK_NO_THROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
628 
629     BOOST_CHECK(actualOutShapes.size() == 2);
630     BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
631     BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
632 }
633