1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <Graph.hpp>
8
9 #include <backendsCommon/MapWorkload.hpp>
10 #include <backendsCommon/UnmapWorkload.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
12
13 #include <armnn/utility/IgnoreUnused.hpp>
14
15 namespace
16 {
17 armnn::Graph dummyGraph;
18
19 // Make a dummy TensorInfo object.
20 template<armnn::DataType DataType>
MakeDummyTensorInfo()21 armnn::TensorInfo MakeDummyTensorInfo()
22 {
23 return armnn::TensorInfo({2,2,2,2}, DataType, 1.0, 0);
24 }
25
26
27 // Make a dummy WorkloadInfo using a dummy TensorInfo.
28 template<armnn::DataType DataType>
MakeDummyWorkloadInfo(unsigned int numInputs,unsigned int numOutputs)29 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
30 {
31 armnn::WorkloadInfo info;
32
33 for (unsigned int i=0; i < numInputs; i++)
34 {
35 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
36 }
37
38 for (unsigned int o=0; o < numOutputs; o++)
39 {
40 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
41 }
42
43 return info;
44 }
45
46 // Template class to create a dummy layer (2 parameters).
47 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
48 struct DummyLayer
49 {
DummyLayer__anoncbb09ab20111::DummyLayer50 DummyLayer()
51 {
52 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
53 }
54
~DummyLayer__anoncbb09ab20111::DummyLayer55 ~DummyLayer()
56 {
57 dummyGraph.EraseLayer(m_Layer);
58 }
59
60 LayerType* m_Layer;
61 };
62
63 // Template class to create a dummy layer (1 parameter).
64 template<typename LayerType>
65 struct DummyLayer<LayerType, void>
66 {
DummyLayer__anoncbb09ab20111::DummyLayer67 DummyLayer()
68 {
69 m_Layer = dummyGraph.AddLayer<LayerType>("");
70 }
71
~DummyLayer__anoncbb09ab20111::DummyLayer72 ~DummyLayer()
73 {
74 dummyGraph.EraseLayer(m_Layer);
75 }
76
77 LayerType* m_Layer;
78 };
79
80 template<>
81 struct DummyLayer<armnn::BatchNormalizationLayer>
82 {
DummyLayer__anoncbb09ab20111::DummyLayer83 DummyLayer()
84 {
85 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
86 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
87 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
88 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
89 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
90 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
91 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
92 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
93 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
94 }
95
~DummyLayer__anoncbb09ab20111::DummyLayer96 ~DummyLayer()
97 {
98 dummyGraph.EraseLayer(m_Layer);
99 }
100
101 armnn::BatchNormalizationLayer* m_Layer;
102 };
103
104 template<>
105 struct DummyLayer<armnn::BatchToSpaceNdLayer>
106 {
DummyLayer__anoncbb09ab20111::DummyLayer107 DummyLayer()
108 {
109 m_Layer = dummyGraph.AddLayer<armnn::BatchToSpaceNdLayer>(armnn::BatchToSpaceNdDescriptor(), "");
110 }
111
~DummyLayer__anoncbb09ab20111::DummyLayer112 ~DummyLayer()
113 {
114 dummyGraph.EraseLayer(m_Layer);
115 }
116
117 armnn::BatchToSpaceNdLayer* m_Layer;
118 };
119
120 template<>
121 struct DummyLayer<armnn::ConstantLayer, void>
122 {
DummyLayer__anoncbb09ab20111::DummyLayer123 DummyLayer()
124 {
125 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
126 }
127
~DummyLayer__anoncbb09ab20111::DummyLayer128 ~DummyLayer()
129 {
130 dummyGraph.EraseLayer(m_Layer);
131 }
132
133 armnn::ConstantLayer* m_Layer;
134 };
135
136 template<>
137 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
138 {
DummyLayer__anoncbb09ab20111::DummyLayer139 DummyLayer()
140 {
141 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
142 }
143
~DummyLayer__anoncbb09ab20111::DummyLayer144 ~DummyLayer()
145 {
146 dummyGraph.EraseLayer(m_Layer);
147 }
148
149 armnn::InputLayer* m_Layer;
150 };
151
152 template<>
153 struct DummyLayer<armnn::ConcatLayer>
154 {
DummyLayer__anoncbb09ab20111::DummyLayer155 DummyLayer()
156 {
157 armnn::OriginsDescriptor desc(2);
158 m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
159 }
160
~DummyLayer__anoncbb09ab20111::DummyLayer161 ~DummyLayer()
162 {
163 dummyGraph.EraseLayer(m_Layer);
164 }
165
166 armnn::ConcatLayer* m_Layer;
167 };
168
169 template<>
170 struct DummyLayer<armnn::MapLayer, void>
171 {
DummyLayer__anoncbb09ab20111::DummyLayer172 DummyLayer()
173 {
174 m_Layer = dummyGraph.AddLayer<armnn::MapLayer>("");
175 }
176
~DummyLayer__anoncbb09ab20111::DummyLayer177 ~DummyLayer()
178 {
179 dummyGraph.EraseLayer(m_Layer);
180 }
181
182 armnn::MapLayer* m_Layer;
183 };
184
185 template<>
186 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
187 {
DummyLayer__anoncbb09ab20111::DummyLayer188 DummyLayer()
189 {
190 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
191 }
192
~DummyLayer__anoncbb09ab20111::DummyLayer193 ~DummyLayer()
194 {
195 dummyGraph.EraseLayer(m_Layer);
196 }
197
198 armnn::OutputLayer* m_Layer;
199 };
200
201 template<>
202 struct DummyLayer<armnn::SplitterLayer>
203 {
DummyLayer__anoncbb09ab20111::DummyLayer204 DummyLayer()
205 {
206 armnn::ViewsDescriptor desc(1);
207 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
208 }
209
~DummyLayer__anoncbb09ab20111::DummyLayer210 ~DummyLayer()
211 {
212 dummyGraph.EraseLayer(m_Layer);
213 }
214
215 armnn::SplitterLayer* m_Layer;
216 };
217
218 template<>
219 struct DummyLayer<armnn::UnmapLayer, void>
220 {
DummyLayer__anoncbb09ab20111::DummyLayer221 DummyLayer()
222 {
223 m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
224 }
225
~DummyLayer__anoncbb09ab20111::DummyLayer226 ~DummyLayer()
227 {
228 dummyGraph.EraseLayer(m_Layer);
229 }
230
231 armnn::UnmapLayer* m_Layer;
232 };
233
234 template <typename ConvolutionLayerType>
235 struct DummyConvolutionLayer
236 {
DummyConvolutionLayer__anoncbb09ab20111::DummyConvolutionLayer237 DummyConvolutionLayer()
238 {
239 typename ConvolutionLayerType::DescriptorType desc;
240 desc.m_StrideX = 1;
241 desc.m_StrideY = 1;
242 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
243 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
244 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
245 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
246 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
247 }
248
~DummyConvolutionLayer__anoncbb09ab20111::DummyConvolutionLayer249 ~DummyConvolutionLayer()
250 {
251 dummyGraph.EraseLayer(m_Layer);
252 }
253
254 ConvolutionLayerType* m_Layer;
255 };
256
257 template<>
258 struct DummyLayer<armnn::Convolution2dLayer>
259 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
260 {
261 };
262
263 template<>
264 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
265 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
266 {
267 };
268
269 template<>
270 struct DummyLayer<armnn::TransposeConvolution2dLayer>
271 : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
272 {
273 };
274
275 template<>
276 struct DummyLayer<armnn::DetectionPostProcessLayer>
277 {
DummyLayer__anoncbb09ab20111::DummyLayer278 DummyLayer()
279 {
280 m_Layer = dummyGraph.AddLayer<armnn::DetectionPostProcessLayer>(armnn::DetectionPostProcessDescriptor(), "");
281 m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
282 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
283 }
284
~DummyLayer__anoncbb09ab20111::DummyLayer285 ~DummyLayer()
286 {
287 dummyGraph.EraseLayer(m_Layer);
288 }
289
290 armnn::DetectionPostProcessLayer* m_Layer;
291 };
292
293 template <typename LstmLayerType>
294 struct DummyLstmLayer
295 {
DummyLstmLayer__anoncbb09ab20111::DummyLstmLayer296 DummyLstmLayer()
297 {
298 typename LstmLayerType::DescriptorType desc;
299 desc.m_CifgEnabled = false;
300
301 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
302 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
303 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
304 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
305 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
306 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
307 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
308 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
309 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
310 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
311 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
312 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
313 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
314 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
315 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
316 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
317 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
318 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
319 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
320
321 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
322 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
323 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
324 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
325 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
326 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
327 }
328
~DummyLstmLayer__anoncbb09ab20111::DummyLstmLayer329 ~DummyLstmLayer()
330 {
331 dummyGraph.EraseLayer(m_Layer);
332 }
333
334 armnn::LstmLayer* m_Layer;
335 };
336
337 template<>
338 struct DummyLayer<armnn::LstmLayer>
339 : public DummyLstmLayer<armnn::LstmLayer>
340 {
341 };
342
343 template <typename QLstmLayerType>
344 struct DummyQLstmLayer
345 {
DummyQLstmLayer__anoncbb09ab20111::DummyQLstmLayer346 DummyQLstmLayer()
347 {
348 typename QLstmLayerType::DescriptorType desc;
349 desc.m_CifgEnabled = false;
350 desc.m_PeepholeEnabled = true;
351 desc.m_ProjectionEnabled = true;
352 desc.m_LayerNormEnabled = true;
353
354 m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
355
356 // Basic params
357 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
358 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
359 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
360 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
361 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
362 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
363
364 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
365 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
366 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
367 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
368 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
369 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
370
371 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
372 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
373 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
374 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
375 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
376 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
377
378 // CIFG optional params
379 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
380 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
381 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
382 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
383 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
384 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
385
386 // Projection optional params
387 m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
388 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
389 m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
390 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
391
392 // Peephole optional params
393 m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
394 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
395 m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
396 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
397 m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
398 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
399
400 // Layer normalization optional params
401 m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
402 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
403 m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
404 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
405 m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
406 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
407 m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
408 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
409 }
410
~DummyQLstmLayer__anoncbb09ab20111::DummyQLstmLayer411 ~DummyQLstmLayer()
412 {
413 dummyGraph.EraseLayer(m_Layer);
414 }
415
416 armnn::QLstmLayer* m_Layer;
417 };
418
419 template<>
420 struct DummyLayer<armnn::QuantizedLstmLayer, void>
421 {
DummyLayer__anoncbb09ab20111::DummyLayer422 DummyLayer()
423 {
424 m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
425
426 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
427 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
428 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
429 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
430 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
431 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
432 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
433 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
434
435 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
436 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
437 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
438 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
439 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
440 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
441 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
442 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
443
444 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
445 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
446 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
447 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
448 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
449 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
450 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
451 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
452 }
453
~DummyLayer__anoncbb09ab20111::DummyLayer454 ~DummyLayer()
455 {
456 dummyGraph.EraseLayer(m_Layer);
457 }
458
459 armnn::QuantizedLstmLayer* m_Layer;
460 };
461
462 template<>
463 struct DummyLayer<armnn::FullyConnectedLayer>
464 {
DummyLayer__anoncbb09ab20111::DummyLayer465 DummyLayer()
466 {
467 armnn::FullyConnectedLayer::DescriptorType desc;
468 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
469 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
470 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
471 }
472
~DummyLayer__anoncbb09ab20111::DummyLayer473 ~DummyLayer()
474 {
475 dummyGraph.EraseLayer(m_Layer);
476 }
477
478 armnn::FullyConnectedLayer* m_Layer;
479 };
480
481 // Tag for giving LayerType entries a unique strong type each.
482 template<armnn::LayerType>
483 struct Tag{};
484
485 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
486 template<armnn::DataType DataType> \
487 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
488 { \
489 using Type = armnn::name##Layer; \
490 using Desc = descType; \
491 using QueueDesc = armnn::name##QueueDescriptor; \
492 constexpr static const char* NameStr = #name; \
493 constexpr static const bool IsException = false; \
494 \
495 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
496 unsigned int nIn, unsigned int nOut) \
497 { \
498 QueueDesc desc; \
499 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
500 return factory->Create##name(desc, info); \
501 } \
502 };
503
504 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \
505 template<armnn::DataType DataType> \
506 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
507 { \
508 using Type = armnn::name##Layer; \
509 using Desc = descType; \
510 using QueueDesc = armnn::name##QueueDescriptor; \
511 using Workload = armnn::name##Workload; \
512 constexpr static const char* NameStr = #name; \
513 constexpr static const bool IsException = false; \
514 \
515 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \
516 unsigned int nIn, unsigned int nOut) \
517 { \
518 IgnoreUnused(factory); \
519 QueueDesc desc; \
520 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
521 return std::make_unique<armnn::name##Workload>(desc, info); \
522 } \
523 };
524
525 // Define a layer policy specialization for use with the IsLayerSupported tests.
526 // Use this version for layers whose constructor takes 1 parameter(name).
527 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
528
529 // Define a layer policy specialization for use with the IsLayerSupported tests.
530 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
531 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
532
533
534 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
535 template<armnn::DataType DataType> \
536 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
537 { \
538 using Type = armnn::name##Layer; \
539 using Desc = descType; \
540 constexpr static const char* NameStr = #name; \
541 constexpr static const bool IsException = true; \
542 \
543 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
544 unsigned int nIn, unsigned int nOut) \
545 { \
546 IgnoreUnused(factory, nIn, nOut); \
547 return std::unique_ptr<armnn::IWorkload>(); \
548 } \
549 };
550
551 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
552 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
553
554 // Layer policy template.
555 template<armnn::LayerType Type, armnn::DataType DataType>
556 struct LayerTypePolicy;
557
558 // Every entry in the armnn::LayerType enum must be accounted for below.
559 DECLARE_LAYER_POLICY_2_PARAM(Activation)
560
DECLARE_LAYER_POLICY_1_PARAM(Addition)561 DECLARE_LAYER_POLICY_1_PARAM(Addition)
562
563 DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
564
565 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
566
567 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
568
569 DECLARE_LAYER_POLICY_2_PARAM(Comparison)
570
571 DECLARE_LAYER_POLICY_2_PARAM(Concat)
572
573 DECLARE_LAYER_POLICY_1_PARAM(Constant)
574
575 DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
576
577 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
578
579 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
580
581 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
582
583 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
584
585 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
586
587 DECLARE_LAYER_POLICY_1_PARAM(MemImport)
588
589 DECLARE_LAYER_POLICY_1_PARAM(Debug)
590
591 DECLARE_LAYER_POLICY_2_PARAM(DepthToSpace)
592
593 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
594
595 DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
596
597 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
598
599 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
600
601 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
602
603 DECLARE_LAYER_POLICY_2_PARAM(Fill)
604
605 DECLARE_LAYER_POLICY_1_PARAM(Floor)
606
607 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
608
609 DECLARE_LAYER_POLICY_2_PARAM(Gather)
610
611 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
612
613 DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
614
615 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
616
617 DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
618
619 DECLARE_LAYER_POLICY_2_PARAM(LogSoftmax)
620
621 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
622
623 DECLARE_LAYER_POLICY_MAP_PARAM(Map, void)
624
625 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
626
627 DECLARE_LAYER_POLICY_2_PARAM(Mean)
628
629 DECLARE_LAYER_POLICY_1_PARAM(Merge)
630
631 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
632
633 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
634
635 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
636
637 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
638
639 DECLARE_LAYER_POLICY_2_PARAM(Pad)
640
641 DECLARE_LAYER_POLICY_1_PARAM(Quantize)
642
643 DECLARE_LAYER_POLICY_2_PARAM(Permute)
644
645 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
646
647 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
648
649 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
650 DECLARE_LAYER_POLICY_2_PARAM(QLstm)
651
652 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
653
654 DECLARE_LAYER_POLICY_1_PARAM(Division)
655
656 DECLARE_LAYER_POLICY_1_PARAM(Rank)
657
658 DECLARE_LAYER_POLICY_2_PARAM(Resize)
659
660 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
661
662 DECLARE_LAYER_POLICY_2_PARAM(Slice)
663
664 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
665
666 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
667
668 DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
669
670 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
671
672 DECLARE_LAYER_POLICY_2_PARAM(Stack)
673
674 DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
675
676 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
677
678 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
679
680 DECLARE_LAYER_POLICY_1_PARAM(Switch)
681
682 DECLARE_LAYER_POLICY_2_PARAM(Transpose)
683
684 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
685
686 DECLARE_LAYER_POLICY_MAP_PARAM(Unmap, void)
687
688
689 // Generic implementation to get the number of input slots for a given layer type;
690 template<armnn::LayerType Type>
691 unsigned int GetNumInputs(const armnn::Layer& layer)
692 {
693 return layer.GetNumInputSlots();
694 }
695
696 // Generic implementation to get the number of output slots for a given layer type;
697 template<armnn::LayerType Type>
GetNumOutputs(const armnn::Layer & layer)698 unsigned int GetNumOutputs(const armnn::Layer& layer)
699 {
700 return layer.GetNumOutputSlots();
701 }
702
703 template<>
GetNumInputs(const armnn::Layer & layer)704 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
705 {
706 IgnoreUnused(layer);
707 return 2;
708 }
709
710 // Tests that the IsLayerSupported() function returns the correct value.
711 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
712 // Returns true if expectations are met, otherwise returns false.
713 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<Type>)714 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
715 {
716 using LayerPolicy = LayerTypePolicy<Type, DataType>;
717 using LayerType = typename LayerPolicy::Type;
718 using LayerDesc = typename LayerPolicy::Desc;
719 DummyLayer<LayerType, LayerDesc> layer;
720
721 if (LayerPolicy::IsException) //Don't test exceptions to the rule.
722 {
723 return true;
724 }
725
726 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
727 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
728
729 // Make another dummy layer just to make IsLayerSupported have valid inputs.
730 DummyLayer<armnn::ConstantLayer, void> previousLayer;
731 // Set output of the previous layer to a dummy tensor.
732 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
733 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
734 // Connect all outputs of the previous layer to inputs of tested layer.
735 for (unsigned int i = 0; i < numIn; i++)
736 {
737 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
738 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
739 previousLayerOutputSlot.Connect(layerInputSlot);
740 }
741 // Set outputs of tested layer to a dummy tensor.
742 for (unsigned int i = 0; i < numOut; i++)
743 {
744 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
745 }
746
747 std::string layerName = LayerPolicy::NameStr;
748 std::string reasonIfUnsupported;
749 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
750 {
751 std::string errorMsg = " layer expected support but found none.";
752 try
753 {
754 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
755 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
756 return retVal;
757 }
758 catch(const armnn::InvalidArgumentException& e)
759 {
760 IgnoreUnused(e);
761 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
762 return true;
763 }
764 catch(const std::exception& e)
765 {
766 errorMsg = e.what();
767 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
768 return false;
769 }
770 catch(...)
771 {
772 errorMsg = "Unexpected error while testing support for ";
773 BOOST_TEST_ERROR(errorMsg << layerName);
774 return false;
775 }
776 }
777 else
778 {
779 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
780 try
781 {
782 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
783 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
784 return retVal;
785 }
786 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
787 // using parameters that make IsLayerSupported() return false should throw an
788 // InvalidArgumentException or UnimplementedException.
789 catch(const armnn::InvalidArgumentException& e)
790 {
791 IgnoreUnused(e);
792 return true;
793 }
794 catch(const armnn::UnimplementedException& e)
795 {
796 IgnoreUnused(e);
797 return true;
798 }
799 catch(const std::exception& e)
800 {
801 errorMsg = e.what();
802 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
803 return false;
804 }
805 catch(...)
806 {
807 errorMsg = "Unexpected error while testing support for ";
808 BOOST_TEST_ERROR(errorMsg << layerName);
809 return false;
810 }
811 }
812 }
813
814 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<armnn::LayerType::Map>)815 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
816 {
817 IgnoreUnused(factory);
818 return true;
819 }
820
821 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTest(FactoryType * factory,Tag<armnn::LayerType::Unmap>)822 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
823 {
824 IgnoreUnused(factory);
825 return true;
826 }
827
828 // Helper function to compute the next type in the LayerType enum.
NextType(armnn::LayerType type)829 constexpr armnn::LayerType NextType(armnn::LayerType type)
830 {
831 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
832 }
833
834 // Termination function for determining the end of the LayerType enumeration.
835 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTestsImpl(FactoryType * factory,Tag<armnn::LayerType::LastLayer>)836 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
837 {
838 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
839 }
840
841 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
842 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
IsLayerSupportedTestsImpl(FactoryType * factory,Tag<Type>)843 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
844 {
845 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
846
847 return v &&
848 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
849 (factory, Tag<NextType(Type)>());
850 }
851
852 // Helper function to pass through to the test framework.
853 template<typename FactoryType, armnn::DataType DataType>
IsLayerSupportedTests(FactoryType * factory)854 bool IsLayerSupportedTests(FactoryType *factory)
855 {
856 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
857 }
858
859 template<armnn::LayerType Type>
TestLayerTypeMatches()860 bool TestLayerTypeMatches()
861 {
862 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
863 using LayerType = typename LayerPolicy::Type;
864 using LayerDesc = typename LayerPolicy::Desc;
865 DummyLayer<LayerType, LayerDesc> layer;
866
867 std::stringstream ss;
868 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
869 bool v = Type == layer.m_Layer->GetType();
870 BOOST_CHECK_MESSAGE(v, ss.str());
871 return v;
872 }
873
874 template<armnn::LayerType Type>
LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)875 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
876 {
877 return TestLayerTypeMatches<Type>();
878 }
879
880 template<armnn::LayerType Type>
LayerTypeMatchesTestImpl(Tag<Type>)881 bool LayerTypeMatchesTestImpl(Tag<Type>)
882 {
883 return TestLayerTypeMatches<Type>() &&
884 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
885 }
886
887 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsConvertLayerSupportedTests(std::string & reasonIfUnsupported)888 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
889 {
890 armnn::Graph graph;
891 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
892
893 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
894 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
895
896 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
897 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
898
899 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
900 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
901 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
902 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
903
904 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
905
906 return result;
907 }
908
909 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsLogicalBinaryLayerSupportedTests(std::string & reasonIfUnsupported)910 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
911 {
912 armnn::Graph graph;
913 armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalOr);
914
915 armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
916 armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
917
918 armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
919
920 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
921
922 armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
923 armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
924
925 armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
926
927 input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
928 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
929
930 input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
931 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
932
933 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
934 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
935
936 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
937
938 return result;
939 }
940
941 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsLogicalBinaryLayerBroadcastSupportedTests(std::string & reasonIfUnsupported)942 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
943 {
944 armnn::Graph graph;
945 armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalAnd);
946
947 armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
948 armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
949
950 armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
951
952 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
953
954 armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
955 armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
956
957 armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
958
959 input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
960 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
961
962 input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
963 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
964
965 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
966 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
967
968 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
969
970 return result;
971 }
972
973 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsMeanLayerSupportedTests(std::string & reasonIfUnsupported)974 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
975 {
976 armnn::Graph graph;
977 static const std::vector<unsigned> axes = {1, 0};
978 armnn::MeanDescriptor desc(axes, false);
979
980 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
981
982 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
983 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
984
985 armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
986 armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
987
988 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
989 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
990 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
991 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
992
993 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
994
995 return result;
996 }
997
998 // Tests that IsMeanSupported fails when input tensor dimensions
999 // do not match output tensor dimensions when keepDims == true
1000 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
IsMeanLayerNotSupportedTests(std::string & reasonIfUnsupported)1001 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1002 {
1003 armnn::Graph graph;
1004 static const std::vector<unsigned> axes = {};
1005 // Set keepDims == true
1006 armnn::MeanDescriptor desc(axes, true);
1007
1008 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
1009
1010 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
1011 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
1012
1013 // Mismatching number of tensor dimensions
1014 armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
1015 armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1016
1017 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
1018 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1019 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1020 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1021
1022 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1023
1024 return result;
1025 }
1026
1027 template<typename FactoryType, armnn::DataType OutputDataType>
IsConstantLayerSupportedTests(std::string & reasonIfUnsupported)1028 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1029 {
1030 armnn::Graph graph;
1031
1032 armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
1033 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
1034
1035 armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
1036
1037 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1038 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1039
1040 bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
1041
1042 return result;
1043 }
1044
1045 } //namespace
1046