• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <Graph.hpp>
8 
9 #include <backendsCommon/MapWorkload.hpp>
10 #include <backendsCommon/UnmapWorkload.hpp>
11 #include <armnn/backends/WorkloadFactory.hpp>
12 
13 #include <armnn/utility/IgnoreUnused.hpp>
14 
15 #include <doctest/doctest.h>
16 
17 namespace
18 {
19 armnn::Graph dummyGraph;
20 
21 // Make a dummy TensorInfo object.
22 template<armnn::DataType DataType>
MakeDummyTensorInfo()23 armnn::TensorInfo MakeDummyTensorInfo()
24 {
25     return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
26 }
27 
28 
29 // Make a dummy WorkloadInfo using a dummy TensorInfo.
30 template<armnn::DataType DataType>
MakeDummyWorkloadInfo(unsigned int numInputs,unsigned int numOutputs)31 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
32 {
33     armnn::WorkloadInfo info;
34 
35     for (unsigned int i=0; i < numInputs; i++)
36     {
37         info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
38     }
39 
40     for (unsigned int o=0; o < numOutputs; o++)
41     {
42         info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
43     }
44 
45     return info;
46 }
47 
48 // Template class to create a dummy layer (2 parameters).
49 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
50 struct DummyLayer
51 {
DummyLayer__anon286a278b0111::DummyLayer52     DummyLayer()
53     {
54         m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
55     }
56 
~DummyLayer__anon286a278b0111::DummyLayer57     ~DummyLayer()
58     {
59         dummyGraph.EraseLayer(m_Layer);
60     }
61 
62     LayerType* m_Layer;
63 };
64 
65 // Template class to create a dummy layer (1 parameter).
66 template<typename LayerType>
67 struct DummyLayer<LayerType, void>
68 {
DummyLayer__anon286a278b0111::DummyLayer69     DummyLayer()
70     {
71         m_Layer = dummyGraph.AddLayer<LayerType>("");
72     }
73 
~DummyLayer__anon286a278b0111::DummyLayer74     ~DummyLayer()
75     {
76         dummyGraph.EraseLayer(m_Layer);
77     }
78 
79     LayerType* m_Layer;
80 };
81 
82 template<>
83 struct DummyLayer<armnn::BatchNormalizationLayer>
84 {
DummyLayer__anon286a278b0111::DummyLayer85     DummyLayer()
86     {
87         m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
88         m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
89             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
90         m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
91             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
92         m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
93             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
94         m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
95             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
96     }
97 
~DummyLayer__anon286a278b0111::DummyLayer98     ~DummyLayer()
99     {
100         dummyGraph.EraseLayer(m_Layer);
101     }
102 
103     armnn::BatchNormalizationLayer* m_Layer;
104 };
105 
106 template<>
107 struct DummyLayer<armnn::BatchToSpaceNdLayer>
108 {
DummyLayer__anon286a278b0111::DummyLayer109     DummyLayer()
110     {
111         m_Layer = dummyGraph.AddLayer<armnn::BatchToSpaceNdLayer>(armnn::BatchToSpaceNdDescriptor(), "");
112     }
113 
~DummyLayer__anon286a278b0111::DummyLayer114     ~DummyLayer()
115     {
116         dummyGraph.EraseLayer(m_Layer);
117     }
118 
119     armnn::BatchToSpaceNdLayer* m_Layer;
120 };
121 
122 template<>
123 struct DummyLayer<armnn::ConstantLayer, void>
124 {
DummyLayer__anon286a278b0111::DummyLayer125     DummyLayer()
126     {
127         m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
128     }
129 
~DummyLayer__anon286a278b0111::DummyLayer130     ~DummyLayer()
131     {
132         dummyGraph.EraseLayer(m_Layer);
133     }
134 
135     armnn::ConstantLayer* m_Layer;
136 };
137 
138 template<>
139 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
140 {
DummyLayer__anon286a278b0111::DummyLayer141     DummyLayer()
142     {
143         m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
144     }
145 
~DummyLayer__anon286a278b0111::DummyLayer146     ~DummyLayer()
147     {
148         dummyGraph.EraseLayer(m_Layer);
149     }
150 
151     armnn::InputLayer* m_Layer;
152 };
153 
154 template<>
155 struct DummyLayer<armnn::ConcatLayer>
156 {
DummyLayer__anon286a278b0111::DummyLayer157     DummyLayer()
158     {
159         armnn::OriginsDescriptor desc(2);
160         m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
161     }
162 
~DummyLayer__anon286a278b0111::DummyLayer163     ~DummyLayer()
164     {
165         dummyGraph.EraseLayer(m_Layer);
166     }
167 
168     armnn::ConcatLayer* m_Layer;
169 };
170 
171 template<>
172 struct DummyLayer<armnn::MapLayer, void>
173 {
DummyLayer__anon286a278b0111::DummyLayer174     DummyLayer()
175     {
176         m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
177     }
178 
~DummyLayer__anon286a278b0111::DummyLayer179     ~DummyLayer()
180     {
181         dummyGraph.EraseLayer(m_Layer);
182     }
183 
184     armnn::MapLayer* m_Layer;
185 };
186 
187 template<>
188 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
189 {
DummyLayer__anon286a278b0111::DummyLayer190     DummyLayer()
191     {
192         m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
193     }
194 
~DummyLayer__anon286a278b0111::DummyLayer195     ~DummyLayer()
196     {
197         dummyGraph.EraseLayer(m_Layer);
198     }
199 
200     armnn::OutputLayer* m_Layer;
201 };
202 
203 template<>
204 struct DummyLayer<armnn::SplitterLayer>
205 {
DummyLayer__anon286a278b0111::DummyLayer206     DummyLayer()
207     {
208         armnn::ViewsDescriptor desc(1);
209         m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
210     }
211 
~DummyLayer__anon286a278b0111::DummyLayer212     ~DummyLayer()
213     {
214         dummyGraph.EraseLayer(m_Layer);
215     }
216 
217     armnn::SplitterLayer* m_Layer;
218 };
219 
220 template<>
221 struct DummyLayer<armnn::UnmapLayer, void>
222 {
DummyLayer__anon286a278b0111::DummyLayer223     DummyLayer()
224     {
225         m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
226     }
227 
~DummyLayer__anon286a278b0111::DummyLayer228     ~DummyLayer()
229     {
230         dummyGraph.EraseLayer(m_Layer);
231     }
232 
233     armnn::UnmapLayer* m_Layer;
234 };
235 
236 template <typename ConvolutionLayerType>
237 struct DummyConvolutionLayer
238 {
DummyConvolutionLayer__anon286a278b0111::DummyConvolutionLayer239     DummyConvolutionLayer()
240     {
241         typename ConvolutionLayerType::DescriptorType desc;
242         desc.m_StrideX = 1;
243         desc.m_StrideY = 1;
244         m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
245     }
246 
~DummyConvolutionLayer__anon286a278b0111::DummyConvolutionLayer247     ~DummyConvolutionLayer()
248     {
249         dummyGraph.EraseLayer(m_Layer);
250     }
251 
252     ConvolutionLayerType* m_Layer;
253 };
254 
255 template<>
256 struct DummyLayer<armnn::Convolution2dLayer>
257     : public DummyConvolutionLayer<armnn::Convolution2dLayer>
258 {
259 };
260 
261 template<>
262 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
263     : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
264 {
265 };
266 
267 // Note: When m_Weight and m_Bias are removed from TransposeConvolution, Transpose can use DummyConvolutionLayer
268 template <>
269 struct DummyLayer<armnn::TransposeConvolution2dLayer>
270 {
DummyLayer__anon286a278b0111::DummyLayer271     DummyLayer()
272     {
273         typename armnn::TransposeConvolution2dLayer::DescriptorType desc;
274         desc.m_StrideX = 1;
275         desc.m_StrideY = 1;
276         m_Layer = dummyGraph.AddLayer<armnn::TransposeConvolution2dLayer>(desc, "");
277         m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
278             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
279         m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
280             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
281     }
282 
~DummyLayer__anon286a278b0111::DummyLayer283     ~DummyLayer()
284     {
285         dummyGraph.EraseLayer(m_Layer);
286     }
287 
288     armnn::TransposeConvolution2dLayer* m_Layer;
289 };
290 
291 template<>
292 struct DummyLayer<armnn::DetectionPostProcessLayer>
293 {
DummyLayer__anon286a278b0111::DummyLayer294     DummyLayer()
295     {
296         m_Layer = dummyGraph.AddLayer<armnn::DetectionPostProcessLayer>(armnn::DetectionPostProcessDescriptor(), "");
297         m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
298             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
299     }
300 
~DummyLayer__anon286a278b0111::DummyLayer301     ~DummyLayer()
302     {
303         dummyGraph.EraseLayer(m_Layer);
304     }
305 
306     armnn::DetectionPostProcessLayer* m_Layer;
307 };
308 
309 template <typename LstmLayerType>
310 struct DummyLstmLayer
311 {
DummyLstmLayer__anon286a278b0111::DummyLstmLayer312     DummyLstmLayer()
313     {
314         typename LstmLayerType::DescriptorType desc;
315         desc.m_CifgEnabled = false;
316 
317         m_Layer = dummyGraph.AddLayer<LstmLayerType>(desc, "");
318         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedTensorHandle>(
319                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
320         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedTensorHandle>(
321                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
322         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedTensorHandle>(
323                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
324         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
325                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
326         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedTensorHandle>(
327                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
328         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
329                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
330         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
331                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
332         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedTensorHandle>(
333                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
334         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
335                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
336 
337         m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedTensorHandle>(
338                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
339         m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedTensorHandle>(
340                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
341         m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedTensorHandle>(
342                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
343     }
344 
~DummyLstmLayer__anon286a278b0111::DummyLstmLayer345     ~DummyLstmLayer()
346     {
347         dummyGraph.EraseLayer(m_Layer);
348     }
349 
350     armnn::LstmLayer* m_Layer;
351 };
352 
353 template<>
354 struct DummyLayer<armnn::LstmLayer>
355         : public DummyLstmLayer<armnn::LstmLayer>
356 {
357 };
358 
359 template <typename UnidirectionalSequenceLstmLayerType>
360 struct DummyUnidirectionalSequenceLstmLayer
361 {
DummyUnidirectionalSequenceLstmLayer__anon286a278b0111::DummyUnidirectionalSequenceLstmLayer362     DummyUnidirectionalSequenceLstmLayer()
363     {
364         typename UnidirectionalSequenceLstmLayerType::DescriptorType desc;
365         desc.m_CifgEnabled = false;
366 
367         m_Layer = dummyGraph.AddLayer<UnidirectionalSequenceLstmLayerType>(desc, "");
368         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedTensorHandle>(
369                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
370         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedTensorHandle>(
371                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
372         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedTensorHandle>(
373                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
374         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
375                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
376         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedTensorHandle>(
377                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
378         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
379                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
380         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
381                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
382         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedTensorHandle>(
383                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
384         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
385                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
386 
387         m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedTensorHandle>(
388                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
389         m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedTensorHandle>(
390                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
391         m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedTensorHandle>(
392                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
393     }
394 
~DummyUnidirectionalSequenceLstmLayer__anon286a278b0111::DummyUnidirectionalSequenceLstmLayer395     ~DummyUnidirectionalSequenceLstmLayer()
396     {
397         dummyGraph.EraseLayer(m_Layer);
398     }
399 
400     armnn::UnidirectionalSequenceLstmLayer* m_Layer;
401 };
402 
403 template<>
404 struct DummyLayer<armnn::UnidirectionalSequenceLstmLayer>
405         : public DummyUnidirectionalSequenceLstmLayer<armnn::UnidirectionalSequenceLstmLayer>
406 {
407 };
408 
409 template<>
410 struct DummyLayer<armnn::QLstmLayer>
411 {
DummyLayer__anon286a278b0111::DummyLayer412     DummyLayer()
413     {
414         armnn::QLstmLayer::DescriptorType desc;
415         desc.m_CifgEnabled = false;
416         desc.m_PeepholeEnabled = true;
417         desc.m_ProjectionEnabled = true;
418         desc.m_LayerNormEnabled = true;
419 
420         m_Layer = dummyGraph.AddLayer<armnn::QLstmLayer>(desc, "qLstm");
421 
422         // Basic params
423         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedTensorHandle>(
424                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
425         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedTensorHandle>(
426                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
427         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedTensorHandle>(
428                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
429 
430         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
431                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
432         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedTensorHandle>(
433                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
434         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
435                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
436 
437         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
438                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
439         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedTensorHandle>(
440                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
441         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
442                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
443 
444         // CIFG optional params
445         m_Layer->m_CifgParameters.m_InputToInputWeights     = std::make_unique<armnn::ScopedTensorHandle>(
446                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
447         m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
448                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
449         m_Layer->m_CifgParameters.m_InputGateBias           = std::make_unique<armnn::ScopedTensorHandle>(
450                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
451 
452         // Projection optional params
453         m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
454                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
455         m_Layer->m_ProjectionParameters.m_ProjectionBias    = std::make_unique<armnn::ScopedTensorHandle>(
456                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
457 
458         // Peephole optional params
459         m_Layer->m_PeepholeParameters.m_CellToInputWeights  = std::make_unique<armnn::ScopedTensorHandle>(
460                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
461         m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
462                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
463         m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
464                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
465 
466         // Layer normalization optional params
467         m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
468                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
469         m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
470                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
471         m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
472                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
473         m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
474                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
475     }
476 
~DummyLayer__anon286a278b0111::DummyLayer477     ~DummyLayer()
478     {
479         dummyGraph.EraseLayer(m_Layer);
480     }
481 
482     armnn::QLstmLayer* m_Layer;
483 };
484 
485 template<>
486 struct DummyLayer<armnn::QuantizedLstmLayer, void>
487 {
DummyLayer__anon286a278b0111::DummyLayer488     DummyLayer()
489     {
490         m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
491 
492         m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights  = std::make_unique<armnn::ScopedTensorHandle>(
493                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
494         m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
495                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
496         m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights   = std::make_unique<armnn::ScopedTensorHandle>(
497                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
498         m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
499                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
500 
501         m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights  = std::make_unique<armnn::ScopedTensorHandle>(
502                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
503         m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
504                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
505         m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedTensorHandle>(
506                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
507         m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
508                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
509 
510         m_Layer->m_QuantizedLstmParameters.m_InputGateBias  = std::make_unique<armnn::ScopedTensorHandle>(
511                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
512         m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
513                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
514         m_Layer->m_QuantizedLstmParameters.m_CellBias       = std::make_unique<armnn::ScopedTensorHandle>(
515                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
516         m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
517                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
518     }
519 
~DummyLayer__anon286a278b0111::DummyLayer520     ~DummyLayer()
521     {
522         dummyGraph.EraseLayer(m_Layer);
523     }
524 
525     armnn::QuantizedLstmLayer* m_Layer;
526 };
527 
528 template<>
529 struct DummyLayer<armnn::FullyConnectedLayer>
530 {
DummyLayer__anon286a278b0111::DummyLayer531     DummyLayer()
532     {
533         armnn::FullyConnectedLayer::DescriptorType desc;
534         m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
535     }
536 
~DummyLayer__anon286a278b0111::DummyLayer537     ~DummyLayer()
538     {
539         dummyGraph.EraseLayer(m_Layer);
540     }
541 
542     armnn::FullyConnectedLayer* m_Layer;
543 };
544 
545 // Tag for giving LayerType entries a unique strong type each.
546 template<armnn::LayerType>
547 struct Tag{};
548 
549 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
550 template<armnn::DataType DataType> \
551 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
552 { \
553     using Type = armnn::name##Layer; \
554     using Desc = descType; \
555     using QueueDesc = armnn::name##QueueDescriptor; \
556     constexpr static const char* NameStr = #name; \
557     constexpr static const bool IsException = false; \
558     \
559     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
560         unsigned int nIn, unsigned int nOut) \
561     { \
562         QueueDesc desc; \
563         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
564         return factory->CreateWorkload(armnn::LayerType::name, desc, info); \
565     } \
566 };
567 
568 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
569 template<armnn::DataType DataType> \
570 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
571 { \
572     using Type = armnn::name##Layer; \
573     using Desc = descType; \
574     using QueueDesc = armnn::name##QueueDescriptor; \
575     using Workload = armnn::name##Workload; \
576     constexpr static const char* NameStr = #name; \
577     constexpr static const bool IsException = false; \
578     \
579     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
580         unsigned int nIn, unsigned int nOut) \
581     { \
582         IgnoreUnused(factory); \
583         QueueDesc desc; \
584         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
585         return std::make_unique<armnn::name##Workload>(desc, info); \
586     } \
587 };
588 
589 // Define a layer policy specialization for use with the IsLayerSupported tests.
590 // Use this version for layers whose constructor takes 1 parameter(name).
591 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
592 
593 // Define a layer policy specialization for use with the IsLayerSupported tests.
594 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
595 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
596 
597 
598 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
599 template<armnn::DataType DataType> \
600 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
601 { \
602     using Type = armnn::name##Layer; \
603     using Desc = descType; \
604     constexpr static const char* NameStr = #name; \
605     constexpr static const bool IsException = true; \
606     \
607     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
608         unsigned int nIn, unsigned int nOut) \
609     { \
610         IgnoreUnused(factory, nIn, nOut); \
611         return std::unique_ptr<armnn::IWorkload>(); \
612     } \
613 };
614 
615 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
616 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
617 
618 // Layer policy template.
619 template<armnn::LayerType Type, armnn::DataType DataType>
620 struct LayerTypePolicy;
621 
622 // Every entry in the armnn::LayerType enum must be accounted for below.
DECLARE_LAYER_POLICY_2_PARAM(Activation)623 DECLARE_LAYER_POLICY_2_PARAM(Activation)
624 
625 ARMNN_NO_DEPRECATE_WARN_BEGIN
626 DECLARE_LAYER_POLICY_1_PARAM(Addition)
627 ARMNN_NO_DEPRECATE_WARN_END
628 
629 DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
630 
631 DECLARE_LAYER_POLICY_2_PARAM(BatchMatMul)
632 
633 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
634 
635 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
636 
637 DECLARE_LAYER_POLICY_1_PARAM(Cast)
638 
639 DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle)
640 
641 DECLARE_LAYER_POLICY_2_PARAM(Comparison)
642 
643 DECLARE_LAYER_POLICY_2_PARAM(Concat)
644 
645 DECLARE_LAYER_POLICY_1_PARAM(Constant)
646 
647 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
648 
649 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
650 
651 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
652 
653 DECLARE_LAYER_POLICY_2_PARAM(Convolution3d)
654 
655 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
656 
657 DECLARE_LAYER_POLICY_1_PARAM(MemImport)
658 
659 DECLARE_LAYER_POLICY_1_PARAM(Debug)
660 
661 DECLARE_LAYER_POLICY_2_PARAM(DepthToSpace)
662 
663 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
664 
665 DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
666 
667 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
668 
669 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseBinary)
670 
671 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
672 
673 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
674 
675 DECLARE_LAYER_POLICY_2_PARAM(Fill)
676 
677 DECLARE_LAYER_POLICY_1_PARAM(Floor)
678 
679 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
680 
681 DECLARE_LAYER_POLICY_2_PARAM(Gather)
682 
683 DECLARE_LAYER_POLICY_1_PARAM(GatherNd)
684 
685 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
686 
687 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
688 
689 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
690 
691 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
692 
693 DECLARE_LAYER_POLICY_2_PARAM(LogSoftmax)
694 
695 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
696 
697 DECLARE_LAYER_POLICY_MAP_PARAM(Map, void)
698 
699 ARMNN_NO_DEPRECATE_WARN_BEGIN
700 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
701 ARMNN_NO_DEPRECATE_WARN_END
702 
703 DECLARE_LAYER_POLICY_2_PARAM(Mean)
704 
705 DECLARE_LAYER_POLICY_1_PARAM(Merge)
706 
707 ARMNN_NO_DEPRECATE_WARN_BEGIN
708 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
709 ARMNN_NO_DEPRECATE_WARN_END
710 
711 ARMNN_NO_DEPRECATE_WARN_BEGIN
712 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
713 ARMNN_NO_DEPRECATE_WARN_END
714 
715 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
716 
717 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
718 
719 DECLARE_LAYER_POLICY_2_PARAM(Pad)
720 
721 DECLARE_LAYER_POLICY_1_PARAM(Quantize)
722 
723 DECLARE_LAYER_POLICY_2_PARAM(Permute)
724 
725 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
726 
727 DECLARE_LAYER_POLICY_2_PARAM(Pooling3d)
728 
729 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
730 
731 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
732 
733 DECLARE_LAYER_POLICY_2_PARAM(QLstm)
734 
735 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
736 
737 ARMNN_NO_DEPRECATE_WARN_BEGIN
738 DECLARE_LAYER_POLICY_1_PARAM(Division)
739 ARMNN_NO_DEPRECATE_WARN_END
740 
741 DECLARE_LAYER_POLICY_1_PARAM(Rank)
742 
743 DECLARE_LAYER_POLICY_2_PARAM(Resize)
744 
745 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
746 
747 DECLARE_LAYER_POLICY_1_PARAM(Shape)
748 
749 DECLARE_LAYER_POLICY_2_PARAM(Slice)
750 
751 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
752 
753 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
754 
755 DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
756 
757 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
758 
759 DECLARE_LAYER_POLICY_2_PARAM(Stack)
760 
761 DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
762 
763 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
764 
765 ARMNN_NO_DEPRECATE_WARN_BEGIN
766 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
767 ARMNN_NO_DEPRECATE_WARN_END
768 
769 DECLARE_LAYER_POLICY_2_PARAM(Reduce)
770 
771 DECLARE_LAYER_POLICY_1_PARAM(Switch)
772 
773 DECLARE_LAYER_POLICY_2_PARAM(Transpose)
774 
775 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
776 
777 DECLARE_LAYER_POLICY_2_PARAM(UnidirectionalSequenceLstm)
778 
779 DECLARE_LAYER_POLICY_MAP_PARAM(Unmap, void)
780 
781 
782 // Generic implementation to get the number of input slots for a given layer type;
783 template<armnn::LayerType Type>
784 unsigned int GetNumInputs(const armnn::Layer& layer)
785 {
786     return layer.GetNumInputSlots();
787 }
788 
789 // Generic implementation to get the number of output slots for a given layer type;
790 template<armnn::LayerType Type>
GetNumOutputs(const armnn::Layer & layer)791 unsigned int GetNumOutputs(const armnn::Layer& layer)
792 {
793     return layer.GetNumOutputSlots();
794 }
795 
796 
797 // Tests that the IsLayerSupported() function returns the correct value.
798 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
799 // Returns true if expectations are met, otherwise returns false.
800 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<Type>)801 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
802 {
803     using LayerPolicy = LayerTypePolicy<Type, DataType>;
804     using LayerType = typename LayerPolicy::Type;
805     using LayerDesc = typename LayerPolicy::Desc;
806     DummyLayer<LayerType, LayerDesc> layer;
807 
808     if (LayerPolicy::IsException) //Don't test exceptions to the rule.
809     {
810         return true;
811     }
812 
813     unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
814     unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
815 
816     // Make another dummy layer just to make IsLayerSupported have valid inputs.
817     DummyLayer<armnn::ConstantLayer, void> previousLayer;
818     // Set output of the previous layer to a dummy tensor.
819     armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
820     previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
821     // Connect all outputs of the previous layer to inputs of tested layer.
822     for (unsigned int i = 0; i < numIn; i++)
823     {
824         armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
825         armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
826         previousLayerOutputSlot.Connect(layerInputSlot);
827     }
828     // Set outputs of tested layer to a dummy tensor.
829     for (unsigned int i = 0; i < numOut; i++)
830     {
831         layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
832     }
833 
834     std::string layerName = LayerPolicy::NameStr;
835     std::string reasonIfUnsupported;
836     if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
837     {
838         std::string errorMsg = " layer expected support but found none.";
839         try
840         {
841             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
842             CHECK_MESSAGE(retVal, layerName << errorMsg);
843             return retVal;
844         }
845         catch(const armnn::InvalidArgumentException& e)
846         {
847             IgnoreUnused(e);
848             // This is ok since we throw InvalidArgumentException when creating the dummy workload.
849             return true;
850         }
851         catch(const std::exception& e)
852         {
853             errorMsg = e.what();
854             FAIL(layerName << ": " << errorMsg);
855             return false;
856         }
857         catch(...)
858         {
859             errorMsg = "Unexpected error while testing support for ";
860             FAIL(errorMsg << layerName);
861             return false;
862         }
863     }
864     else
865     {
866         std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
867         try
868         {
869             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
870             CHECK_MESSAGE(retVal, layerName << errorMsg);
871             return retVal;
872         }
873         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
874         // using parameters that make IsLayerSupported() return false should throw an
875         // InvalidArgumentException or UnimplementedException.
876         catch(const armnn::InvalidArgumentException& e)
877         {
878             IgnoreUnused(e);
879             return true;
880         }
881         catch(const armnn::UnimplementedException& e)
882         {
883             IgnoreUnused(e);
884             return true;
885         }
886         catch(const std::exception& e)
887         {
888             errorMsg = e.what();
889             FAIL(layerName << ": " << errorMsg);
890             return false;
891         }
892         catch(...)
893         {
894             errorMsg = "Unexpected error while testing support for ";
895             FAIL(errorMsg << layerName);
896             return false;
897         }
898     }
899 }
900 
901 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<armnn::LayerType::Map>)902 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
903 {
904     IgnoreUnused(factory);
905     return true;
906 }
907 
908 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<armnn::LayerType::Unmap>)909 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
910 {
911     IgnoreUnused(factory);
912     return true;
913 }
914 
915 // Helper function to compute the next type in the LayerType enum.
NextType(armnn::LayerType type)916 constexpr armnn::LayerType NextType(armnn::LayerType type)
917 {
918     return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
919 }
920 
921 // Termination function for determining the end of the LayerType enumeration.
922 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTestsImpl(FactoryType * factory,Tag<armnn::LayerType::LastLayer>)923 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
924 {
925     return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
926 }
927 
928 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
929 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTestsImpl(FactoryType * factory,Tag<Type>)930 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
931 {
932     bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
933 
934     return v &&
935     IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
936         (factory, Tag<NextType(Type)>());
937 }
938 
939 // Helper function to pass through to the test framework.
940 template<typename FactoryType, armnn::DataType DataType>
IsLayerSupportedTests(FactoryType * factory)941 bool IsLayerSupportedTests(FactoryType *factory)
942 {
943     return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
944 }
945 
946 template<armnn::LayerType Type>
TestLayerTypeMatches()947 bool TestLayerTypeMatches()
948 {
949     using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
950     using LayerType = typename LayerPolicy::Type;
951     using LayerDesc = typename LayerPolicy::Desc;
952     DummyLayer<LayerType, LayerDesc> layer;
953 
954     std::stringstream ss;
955     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
956     bool v = Type == layer.m_Layer->GetType();
957     CHECK_MESSAGE(v, ss.str());
958     return v;
959 }
960 
961 template<armnn::LayerType Type>
LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)962 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
963 {
964     return TestLayerTypeMatches<Type>();
965 }
966 
967 template<armnn::LayerType Type>
LayerTypeMatchesTestImpl(Tag<Type>)968 bool LayerTypeMatchesTestImpl(Tag<Type>)
969 {
970     return TestLayerTypeMatches<Type>() &&
971         LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
972 }
973 
974 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsConvertLayerSupportedTests(std::string & reasonIfUnsupported)975 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
976 {
977     armnn::Graph graph;
978     LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
979 
980     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
981     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
982 
983     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
984     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
985 
986     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
987     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
988     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
989     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
990 
991     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
992 
993     return result;
994 }
995 
996 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsLogicalBinaryLayerSupportedTests(std::string & reasonIfUnsupported)997 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
998 {
999     armnn::Graph graph;
1000     armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalOr);
1001 
1002     armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
1003     armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
1004 
1005     armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
1006 
1007     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
1008 
1009     armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
1010     armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
1011 
1012     armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
1013 
1014     input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1015     input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1016 
1017     input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
1018     input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1019 
1020     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1021     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1022 
1023     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1024 
1025     return result;
1026 }
1027 
1028 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsLogicalBinaryLayerBroadcastSupportedTests(std::string & reasonIfUnsupported)1029 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
1030 {
1031     armnn::Graph graph;
1032     armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalAnd);
1033 
1034     armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
1035     armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
1036 
1037     armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
1038 
1039     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
1040 
1041     armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
1042     armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
1043 
1044     armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
1045 
1046     input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1047     input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1048 
1049     input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
1050     input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1051 
1052     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1053     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1054 
1055     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1056 
1057     return result;
1058 }
1059 
1060 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsMeanLayerSupportedTests(std::string & reasonIfUnsupported)1061 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
1062 {
1063     armnn::Graph graph;
1064     static const std::vector<unsigned> axes = {1, 0};
1065     armnn::MeanDescriptor desc(axes, false);
1066 
1067     armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1068 
1069     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1070     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1071 
1072     armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
1073     armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
1074 
1075     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1076     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1077     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1078     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1079 
1080     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1081 
1082     return result;
1083 }
1084 
1085 // Tests that IsMeanSupported fails when input tensor dimensions
1086 // do not match output tensor dimensions when keepDims == true
1087 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsMeanLayerNotSupportedTests(std::string & reasonIfUnsupported)1088 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1089 {
1090     armnn::Graph graph;
1091     static const std::vector<unsigned> axes = {};
1092     // Set keepDims == true
1093     armnn::MeanDescriptor desc(axes, true);
1094 
1095     armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1096 
1097     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1098     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1099 
1100     // Mismatching number of tensor dimensions
1101     armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1102     armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1103 
1104     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1105     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1106     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1107     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1108 
1109     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1110 
1111     return result;
1112 }
1113 
1114 template<typename FactoryType, armnn::DataType OutputDataType>
IsConstantLayerSupportedTests(std::string & reasonIfUnsupported)1115 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1116 {
1117     armnn::Graph graph;
1118 
1119     armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1120     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1121 
1122     armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1123 
1124     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1125     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1126 
1127     bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1128 
1129     return result;
1130 }
1131 
1132 } //namespace
1133