1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
12 #include <armnn/BackendRegistry.hpp>
13 #include <armnn/utility/PolymorphicDowncast.hpp>
14 #include <armnn/utility/TransformIterator.hpp>
15
16 #include <backendsCommon/WorkloadFactory.hpp>
17 #include <backendsCommon/CpuTensorHandle.hpp>
18
19 #include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21 #include <sstream>
22
23 namespace armnn
24 {
25
26 namespace
27 {
28 using LayerList = std::list<Layer*>;
29 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
30
OverrideDataType(const TensorInfo & info,Optional<DataType> type)31 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
32 {
33 if (!type)
34 {
35 return info;
36 }
37
38 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
39 }
40
41 } // anonymous namespace
42
IsLayerConfigurationSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)43 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
44 const IConnectableLayer& connectableLayer,
45 Optional<DataType> dataType,
46 std::string& outReasonIfUnsupported,
47 const ModelOptions& modelOptions)
48 {
49 Optional<std::string&> reason = outReasonIfUnsupported;
50 bool result;
51 const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
52
53 auto const& backendRegistry = BackendRegistryInstance();
54 if (!backendRegistry.IsBackendRegistered(backendId))
55 {
56 std::stringstream ss;
57 ss << connectableLayer.GetName() << " is not supported on " << backendId
58 << " because this backend is not registered.";
59
60 outReasonIfUnsupported = ss.str();
61 return false;
62 }
63
64 auto backendFactory = backendRegistry.GetFactory(backendId);
65 auto backendObject = backendFactory();
66 auto layerSupportObject = backendObject->GetLayerSupport(modelOptions);
67
68 switch(layer.GetType())
69 {
70 case LayerType::Activation:
71 {
72 auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
73 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
74 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
75 result = layerSupportObject->IsActivationSupported(
76 OverrideDataType(input, dataType),
77 OverrideDataType(output, dataType),
78 cLayer->GetParameters(),
79 reason);
80 break;
81 }
82 case LayerType::Addition:
83 {
84 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
85 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
86 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
87 result = layerSupportObject->IsAdditionSupported(
88 OverrideDataType(input0, dataType),
89 OverrideDataType(input1, dataType),
90 OverrideDataType(output, dataType),
91 reason);
92 break;
93 }
94 case LayerType::ArgMinMax:
95 {
96 auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
97 const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
98
99 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
100 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
101 result = layerSupportObject->IsArgMinMaxSupported(
102 OverrideDataType(input, dataType),
103 OverrideDataType(output, DataType::Signed32),
104 descriptor,
105 reason);
106 break;
107 }
108 case LayerType::BatchNormalization:
109 {
110 auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
111 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
112 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
113 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
114 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
115 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
116 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
117 result = layerSupportObject->IsBatchNormalizationSupported(
118 OverrideDataType(input, dataType),
119 OverrideDataType(output, dataType),
120 OverrideDataType(mean, dataType),
121 OverrideDataType(var, dataType),
122 OverrideDataType(beta, dataType),
123 OverrideDataType(gamma, dataType),
124 cLayer->GetParameters(),
125 reason);
126 break;
127 }
128 case LayerType::BatchToSpaceNd:
129 {
130 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
131 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
132 auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
133
134 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
135 OverrideDataType(output, dataType),
136 cLayer->GetParameters(),
137 reason);
138 break;
139 }
140 case LayerType::Comparison:
141 {
142 auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
143
144 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
145 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
146 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
147
148 result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
149 OverrideDataType(input1, dataType),
150 OverrideDataType(output, DataType::Boolean),
151 cLayer->GetParameters(),
152 reason);
153 break;
154 }
155 case LayerType::Constant:
156 {
157 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
158 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
159 break;
160 }
161 case LayerType::ConvertBf16ToFp32:
162 {
163 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
164 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
165 result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason);
166 break;
167 }
168 case LayerType::ConvertFp16ToFp32:
169 {
170 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
171 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
172 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
173 break;
174 }
175 case LayerType::ConvertFp32ToBf16:
176 {
177 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
178 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
179 result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason);
180 break;
181 }
182 case LayerType::ConvertFp32ToFp16:
183 {
184 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
185 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
186 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
187 break;
188 }
189 case LayerType::Convolution2d:
190 {
191 auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
192
193 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
194 dataType);
195 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
196 ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
197
198 const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
199
200 // Construct optional biases object based on the value of m_BiasEnabled
201 Optional<TensorInfo> biases;
202 if (descriptor.m_BiasEnabled)
203 {
204 biases =
205 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
206 }
207
208 result = layerSupportObject->IsConvolution2dSupported(
209 input,
210 output,
211 descriptor,
212 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
213 biases,
214 reason);
215 break;
216 }
217 case LayerType::Debug:
218 {
219 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
220 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
221
222 result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
223 OverrideDataType(output, dataType),
224 reason);
225 break;
226 }
227 case LayerType::DepthToSpace:
228 {
229 auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
230
231 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
232 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
233
234 result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
235 OverrideDataType(output, dataType),
236 cLayer->GetParameters(),
237 reason);
238 break;
239 }
240 case LayerType::DepthwiseConvolution2d:
241 {
242 auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
243 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
244 dataType);
245 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
246 ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
247
248 const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
249
250 // Construct optional biases object based on the value of m_BiasEnabled
251 Optional<TensorInfo> biases;
252 if (descriptor.m_BiasEnabled)
253 {
254 biases =
255 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
256 }
257
258 result = layerSupportObject->IsDepthwiseConvolutionSupported(
259 input,
260 output,
261 descriptor,
262 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
263 biases,
264 reason);
265 break;
266 }
267 case LayerType::Dequantize:
268 {
269 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
270 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
271
272 result = layerSupportObject->IsDequantizeSupported(input,
273 OverrideDataType(output, dataType),
274 reason);
275 break;
276 }
277 case LayerType::DetectionPostProcess:
278 {
279 auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
280 const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
281 const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
282 const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
283
284 const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
285 const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
286 const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
287 const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
288
289 const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
290 result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
291 scores,
292 anchors,
293 detectionBoxes,
294 detectionClasses,
295 detectionScores,
296 numDetections,
297 descriptor,
298 reason);
299 break;
300 }
301 case LayerType::ElementwiseUnary:
302 {
303 auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
304
305 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
306 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
307
308 result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
309 OverrideDataType(output, dataType),
310 cLayer->GetParameters(),
311 reason);
312 break;
313 }
314 case LayerType::Fill:
315 {
316 auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
317 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
318 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
319 const FillDescriptor& descriptor = cLayer->GetParameters();
320
321 result = layerSupportObject->IsFillSupported(
322 OverrideDataType(input, dataType),
323 OverrideDataType(output, dataType),
324 descriptor,
325 reason);
326 break;
327 }
328 case LayerType::FakeQuantization:
329 {
330 auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
331 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
332 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
333 cLayer->GetParameters(),
334 reason);
335 break;
336 }
337 case LayerType::Floor:
338 {
339 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
340 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
341 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
342 OverrideDataType(output, dataType),
343 reason);
344 break;
345 }
346 case LayerType::FullyConnected:
347 {
348 auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
349 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
350 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
351 ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
352
353 TensorInfo biasInfo;
354 const TensorInfo * biasInfoPtr = nullptr;
355 static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
356 static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
357 static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
358 static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
359
360 const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
361 if (descriptor.m_BiasEnabled)
362 {
363 ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
364 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
365 biasInfoPtr = &biasInfo;
366 }
367 else
368 {
369 // If biases are not enabled pass a dummy tensorinfo for the validation
370 switch(input.GetDataType())
371 {
372 case DataType::BFloat16:
373 {
374 biasInfoPtr = &dummyBFloat16Bias;
375 break;
376 }
377 case DataType::Float16:
378 {
379 biasInfoPtr = &dummyFloat16Bias;
380 break;
381 }
382 case DataType::Float32:
383 {
384 biasInfoPtr = &dummyFloat32Bias;
385 break;
386 }
387 case DataType::QAsymmU8:
388 case DataType::QAsymmS8:
389 case DataType::QSymmS8:
390 case DataType::QSymmS16:
391 {
392 biasInfoPtr = &dummyQA8Bias;
393 break;
394 }
395 default:
396 {
397 ARMNN_ASSERT_MSG(false, "Unexpected bias type");
398 }
399 }
400 }
401
402 result = layerSupportObject->IsFullyConnectedSupported(
403 OverrideDataType(input, dataType),
404 OverrideDataType(output, dataType),
405 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
406 *biasInfoPtr,
407 descriptor,
408 reason);
409 break;
410 }
411 case LayerType::Gather:
412 {
413 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
414 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
415 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
416 auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
417 const GatherDescriptor& descriptor = cLayer->GetParameters();
418 result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
419 input1,
420 OverrideDataType(output, dataType),
421 descriptor,
422 reason);
423 break;
424 }
425 case LayerType::Input:
426 {
427 const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
428 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
429 break;
430 }
431 case LayerType::InstanceNormalization:
432 {
433 auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
434 const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
435
436 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
437 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
438
439 result = layerSupportObject->IsInstanceNormalizationSupported(
440 OverrideDataType(input, dataType),
441 OverrideDataType(output, dataType),
442 descriptor,
443 reason);
444 break;
445 }
446 case LayerType::L2Normalization:
447 {
448 auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
449 const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
450
451 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
452 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
453
454 result = layerSupportObject->IsL2NormalizationSupported(
455 OverrideDataType(input, dataType),
456 OverrideDataType(output, dataType),
457 descriptor,
458 reason);
459 break;
460 }
461 case LayerType::LogicalBinary:
462 {
463 auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
464
465 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
466 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
467 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
468
469 result = layerSupportObject->IsLogicalBinarySupported(input0,
470 input1,
471 output,
472 cLayer->GetParameters(),
473 reason);
474 break;
475 }
476 case LayerType::LogSoftmax:
477 {
478 auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
479
480 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
481 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
482
483 result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
484 OverrideDataType(output, dataType),
485 cLayer->GetParameters(),
486 reason);
487 break;
488 }
489 case LayerType::Lstm:
490 {
491 auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
492 const LstmDescriptor& descriptor = cLayer->GetParameters();
493
494 // All inputs.
495 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
496 dataType);
497 const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
498 dataType);
499 const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
500 dataType);
501 // All outputs
502 const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
503 const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
504 const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
505 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
506
507 // Basic parameters
508 const TensorInfo& inputToForgetWeights
509 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
510 const TensorInfo& inputToCellWeights
511 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
512 const TensorInfo& inputToOutputWeights
513 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
514 const TensorInfo& recurrentToForgetWeights
515 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
516 const TensorInfo& recurrentToCellWeights
517 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
518 const TensorInfo& recurrentToOutputWeights
519 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
520 const TensorInfo& forgetGateBias
521 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
522 const TensorInfo& cellBias
523 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
524 const TensorInfo& outputGateBias
525 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
526
527 LstmInputParamsInfo paramsInfo;
528
529 paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
530 paramsInfo.m_InputToCellWeights = &inputToCellWeights;
531 paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
532 paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
533 paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
534 paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
535 paramsInfo.m_ForgetGateBias = &forgetGateBias;
536 paramsInfo.m_CellBias = &cellBias;
537 paramsInfo.m_OutputGateBias = &outputGateBias;
538
539
540 // Optional parameters
541 TensorInfo optInputToInputWeights;
542 TensorInfo optRecurrentToInputWeights;
543 TensorInfo optCellToInputWeights;
544 TensorInfo optInputGateBias;
545 TensorInfo optProjectionWeights;
546 TensorInfo optProjectionBias;
547 TensorInfo optCellToForgetWeights;
548 TensorInfo optCellToOutputWeights;
549 TensorInfo optInputLayerNormWeights;
550 TensorInfo optForgetLayerNormWeights;
551 TensorInfo optCellLayerNormWeights;
552 TensorInfo optOutputLayerNormWeights;
553
554 if(!descriptor.m_CifgEnabled)
555 {
556 optInputToInputWeights =
557 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
558 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
559
560 optRecurrentToInputWeights =
561 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
562 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
563 optInputGateBias =
564 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
565 paramsInfo.m_InputGateBias = &optInputGateBias;
566 }
567
568 if(descriptor.m_ProjectionEnabled)
569 {
570 optProjectionWeights =
571 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
572 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
573 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
574 {
575 optProjectionBias =
576 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
577 paramsInfo.m_ProjectionBias = &optProjectionBias;
578 }
579 }
580
581 if(descriptor.m_PeepholeEnabled)
582 {
583 if(!descriptor.m_CifgEnabled)
584 {
585 optCellToInputWeights =
586 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
587 dataType);
588 paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
589 }
590 optCellToForgetWeights =
591 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
592 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
593 optCellToOutputWeights =
594 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
595 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
596 }
597
598 if(descriptor.m_LayerNormEnabled)
599 {
600 if (!descriptor.m_CifgEnabled)
601 {
602 optInputLayerNormWeights = OverrideDataType(
603 cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
604 paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
605 }
606
607 optForgetLayerNormWeights = OverrideDataType(
608 cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
609 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
610
611 optCellLayerNormWeights = OverrideDataType(
612 cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
613 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
614
615 optOutputLayerNormWeights = OverrideDataType(
616 cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
617 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
618 }
619
620 result = layerSupportObject->IsLstmSupported(
621 input,
622 outputStateIn,
623 cellStateIn,
624 scratchBuffer,
625 outputStateOut,
626 cellStateOut,
627 output,
628 descriptor,
629 paramsInfo,
630 reason);
631 break;
632 }
633 case LayerType::Maximum:
634 {
635 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
636 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
637 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
638
639 result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
640 OverrideDataType(input1, dataType),
641 OverrideDataType(output, dataType),
642 reason);
643 break;
644 }
645 case LayerType::MemCopy:
646 {
647 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
648 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
649
650 result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
651 OverrideDataType(output, dataType),
652 reason);
653 break;
654 }
655 case LayerType::MemImport:
656 {
657 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
658 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
659
660 result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
661 OverrideDataType(output, dataType),
662 reason);
663 break;
664 }
665 case LayerType::Merge:
666 {
667 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
668 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
669 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
670
671 result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
672 OverrideDataType(input1, dataType),
673 OverrideDataType(output, dataType),
674 reason);
675 break;
676 }
677 case LayerType::Concat:
678 {
679 auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
680
681 // Get vector of all inputs.
682 auto getTensorInfo = [&dataType](const InputSlot& slot)
683 {
684 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
685 };
686
687 auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
688 auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
689 std::vector<TensorInfo> inputs(beginI, endI);
690
691 auto getTensorInfoPtr = [](const TensorInfo& info)
692 {
693 return &info;
694 };
695
696 auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
697 auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
698 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
699
700 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
701
702 result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
703
704
705 break;
706 }
707 case LayerType::Multiplication:
708 {
709 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
710 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
711 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
712 result = layerSupportObject->IsMultiplicationSupported(
713 OverrideDataType(input0, dataType),
714 OverrideDataType(input1, dataType),
715 OverrideDataType(output, dataType),
716 reason);
717 break;
718 }
719 case LayerType::Normalization:
720 {
721 auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
722 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
723 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
724 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
725 OverrideDataType(output, dataType),
726 cLayer->GetParameters(),
727 reason);
728 break;
729 }
730 case LayerType::Output:
731 {
732 const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
733 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
734 break;
735 }
736 case LayerType::Permute:
737 {
738 auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
739 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
740 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
741 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
742 OverrideDataType(output, dataType),
743 cLayer->GetParameters(),
744 reason);
745 break;
746 }
747 case LayerType::Pad:
748 {
749 auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
750 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
751 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
752 result = layerSupportObject->IsPadSupported(
753 OverrideDataType(input, dataType),
754 OverrideDataType(output, dataType),
755 cLayer->GetParameters(),
756 reason);
757 break;
758 }
759 case LayerType::Pooling2d:
760 {
761 auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
762 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
763 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
764 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
765 OverrideDataType(output, dataType),
766 cLayer->GetParameters(),
767 reason);
768 break;
769 }
770 case LayerType::PreCompiled:
771 {
772 auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
773 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
774 result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
775 cLayer->GetParameters(),
776 reason);
777 break;
778 }
779 case LayerType::Quantize:
780 {
781 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
782 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
783 result = layerSupportObject->IsQuantizeSupported(input, output, reason);
784 break;
785 }
786 case LayerType::QLstm:
787 {
788 auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
789 const QLstmDescriptor& descriptor = cLayer->GetParameters();
790
791 // Inputs
792 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
793 const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
794 const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
795
796 // Outputs
797 const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
798 const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
799 const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
800
801 // Lstm parameters
802 LstmInputParamsInfo paramsInfo;
803
804 // Basic parameters
805 paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
806 paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
807 paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
808
809 paramsInfo.m_RecurrentToForgetWeights =
810 &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
811 paramsInfo.m_RecurrentToCellWeights =
812 &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
813 paramsInfo.m_RecurrentToOutputWeights =
814 &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
815
816 paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
817 paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
818 paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
819
820 if(!descriptor.m_CifgEnabled)
821 {
822 paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
823 paramsInfo.m_RecurrentToInputWeights =
824 &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
825 paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
826 }
827
828 if(descriptor.m_ProjectionEnabled)
829 {
830 paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
831
832 // Projection bias is optional even if projection is enabled
833 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
834 {
835 paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
836 }
837 }
838
839 if(descriptor.m_PeepholeEnabled)
840 {
841 if (!descriptor.m_CifgEnabled)
842 {
843 paramsInfo.m_CellToInputWeights =
844 &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
845 }
846
847 paramsInfo.m_CellToForgetWeights =
848 &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
849 paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
850 }
851
852 if(descriptor.m_LayerNormEnabled)
853 {
854 if (!descriptor.m_CifgEnabled)
855 {
856 paramsInfo.m_InputLayerNormWeights =
857 &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
858 }
859
860 paramsInfo.m_ForgetLayerNormWeights =
861 &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
862 paramsInfo.m_CellLayerNormWeights =
863 &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
864 paramsInfo.m_OutputLayerNormWeights =
865 &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
866 }
867
868 result = layerSupportObject->IsQLstmSupported(input,
869 previousOutputIn,
870 previousCellStateIn,
871 outputStateOut,
872 cellStateOut,
873 output,
874 descriptor,
875 paramsInfo,
876 reason);
877 break;
878 }
879 case LayerType::QuantizedLstm:
880 {
881 auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
882
883 // Inputs
884 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
885 const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
886 const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
887
888 // Outputs
889 const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
890 const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
891
892 // QuantizedLstm parameters
893 QuantizedLstmInputParamsInfo paramsInfo;
894
895 paramsInfo.m_InputToInputWeights =
896 &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
897 paramsInfo.m_InputToForgetWeights =
898 &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
899 paramsInfo.m_InputToCellWeights =
900 &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
901 paramsInfo.m_InputToOutputWeights =
902 &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
903
904 paramsInfo.m_RecurrentToInputWeights =
905 &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
906 paramsInfo.m_RecurrentToForgetWeights =
907 &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
908 paramsInfo.m_RecurrentToCellWeights =
909 &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
910 paramsInfo.m_RecurrentToOutputWeights =
911 &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
912
913 paramsInfo.m_InputGateBias =
914 &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
915 paramsInfo.m_ForgetGateBias =
916 &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
917 paramsInfo.m_CellBias =
918 &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
919 paramsInfo.m_OutputGateBias =
920 &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
921
922 result = layerSupportObject->IsQuantizedLstmSupported(input,
923 previousCellStateIn,
924 previousOutputIn,
925 cellStateOut,
926 output,
927 paramsInfo,
928 reason);
929 break;
930 }
931 case LayerType::Division:
932 {
933 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
934 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
935 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
936 result = layerSupportObject->IsDivisionSupported(
937 OverrideDataType(input0, dataType),
938 OverrideDataType(input1, dataType),
939 OverrideDataType(output, dataType),
940 reason);
941 break;
942 }
943 case LayerType::Rank:
944 {
945 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
946 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
947 result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType),
948 OverrideDataType(output, dataType),
949 reason);
950 break;
951 }
952 case LayerType::Reshape:
953 {
954 auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
955 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
956 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
957 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
958 OverrideDataType(output, dataType),
959 cLayer->GetParameters(),
960 reason);
961 break;
962 }
963 case LayerType::Resize:
964 {
965 auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
966 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
967 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
968 result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
969 OverrideDataType(output, dataType),
970 cLayer->GetParameters(),
971 reason);
972 break;
973 }
974 case LayerType::Slice:
975 {
976 auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
977
978 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
979 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
980
981 result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
982 OverrideDataType(output, dataType),
983 cLayer->GetParameters(),
984 reason);
985 break;
986 }
987 case LayerType::Softmax:
988 {
989 auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
990 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
991 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
992 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
993 OverrideDataType(output, dataType),
994 cLayer->GetParameters(),
995 reason);
996 break;
997 }
998 case LayerType::SpaceToBatchNd:
999 {
1000 auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1001 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1002 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1003 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1004 OverrideDataType(output, dataType),
1005 cLayer->GetParameters(),
1006 reason);
1007 break;
1008 }
1009 case LayerType::SpaceToDepth:
1010 {
1011 auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1012
1013 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1014 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1015
1016 result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1017 OverrideDataType(output, dataType),
1018 cLayer->GetParameters(),
1019 reason);
1020 break;
1021 }
1022 case LayerType::Splitter:
1023 {
1024 auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1025 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1026
1027 // Get vector of all outputs.
1028 auto getTensorInfo = [&dataType](const OutputSlot& slot)
1029 {
1030 return OverrideDataType(slot.GetTensorInfo(), dataType);
1031 };
1032 auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1033 auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1034 std::vector<TensorInfo> outputs(beginI, endI);
1035
1036 const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1037
1038 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
1039 outputPtrs,
1040 cLayer->GetParameters(),
1041 reason);
1042 break;
1043 }
1044 case LayerType::Stack:
1045 {
1046 auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1047
1048 // Get vector of all inputs.
1049 auto getTensorInfo = [&dataType](const InputSlot& slot)
1050 {
1051 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1052 };
1053 auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1054 auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1055 std::vector<TensorInfo> inputs(beginI, endI);
1056
1057 auto getTensorInfoPtr = [](const TensorInfo& info)
1058 {
1059 return &info;
1060 };
1061 auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1062 auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1063 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1064
1065 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1066
1067 result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1068
1069 break;
1070 }
1071 case LayerType::StandIn:
1072 {
1073 auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1074
1075 // Get vector of all inputs.
1076 auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1077 {
1078 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1079 };
1080 auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1081 {
1082 return OverrideDataType(slot.GetTensorInfo(), dataType);
1083 };
1084 auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1085 auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1086 std::vector<TensorInfo> inputs(beginI, endI);
1087
1088 auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1089 auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1090 std::vector<TensorInfo> outputs(beginO, endO);
1091
1092
1093 auto getTensorInfoPtr = [](const TensorInfo& info)
1094 {
1095 return &info;
1096 };
1097 auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1098 auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1099 std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1100
1101 auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1102 auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1103 std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1104
1105
1106 result = layerSupportObject->IsStandInSupported(inputPtrs,
1107 outputPtrs,
1108 cLayer->GetParameters(),
1109 reason);
1110 break;
1111 }
1112 case LayerType::StridedSlice:
1113 {
1114 auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1115 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1116 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1117 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
1118 OverrideDataType(output, dataType),
1119 cLayer->GetParameters(),
1120 reason);
1121 break;
1122 }
1123 case LayerType::Subtraction:
1124 {
1125 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1126 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1127 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1128 result = layerSupportObject->IsSubtractionSupported(
1129 OverrideDataType(input0, dataType),
1130 OverrideDataType(input1, dataType),
1131 OverrideDataType(output, dataType),
1132 reason);
1133 break;
1134 }
1135 case LayerType::Switch:
1136 {
1137 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1138 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1139 const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1140 const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1141 result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
1142 OverrideDataType(input1, dataType),
1143 OverrideDataType(output0, dataType),
1144 OverrideDataType(output1, dataType),
1145 reason);
1146 break;
1147 }
1148 case LayerType::Mean:
1149 {
1150 auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1151 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1152 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1153 result = layerSupportObject->IsMeanSupported(
1154 OverrideDataType(input, dataType),
1155 OverrideDataType(output, dataType),
1156 cLayer->GetParameters(),
1157 reason);
1158 break;
1159 }
1160 case LayerType::Minimum:
1161 {
1162 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1163 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1164 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1165 result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1166 OverrideDataType(input1, dataType),
1167 OverrideDataType(output, dataType),
1168 reason);
1169 break;
1170 }
1171 case LayerType::Prelu:
1172 {
1173 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1174 const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1175 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1176 result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1177 OverrideDataType(alpha, dataType),
1178 OverrideDataType(output, dataType),
1179 reason);
1180 break;
1181 }
1182 case LayerType::Transpose:
1183 {
1184 auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1185 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1186 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1187 result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1188 OverrideDataType(output, dataType),
1189 cLayer->GetParameters(),
1190 reason);
1191 break;
1192 }
1193 case LayerType::TransposeConvolution2d:
1194 {
1195 auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1196
1197 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1198 dataType);
1199 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1200
1201 const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1202
1203 Optional<TensorInfo> biases;
1204 if (descriptor.m_BiasEnabled)
1205 {
1206 ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1207 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1208 GetBiasTypeFromWeightsType(dataType));
1209 }
1210
1211 ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1212 const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1213
1214 result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1215 output,
1216 descriptor,
1217 weights,
1218 biases,
1219 reason);
1220
1221 break;
1222 }
1223 default:
1224 {
1225 ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1226 reason.value() = "Unrecognised layer type";
1227 result = false;
1228 break;
1229 }
1230 }
1231 return result;
1232 }
1233
IsLayerSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported)1234 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
1235 const IConnectableLayer& connectableLayer,
1236 Optional<DataType> dataType,
1237 std::string& outReasonIfUnsupported)
1238 {
1239 return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1240 }
1241
IsLayerSupported(const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported)1242 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1243 Optional<DataType> dataType,
1244 std::string& outReasonIfUnsupported)
1245 {
1246 auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1247 return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1248 }
1249
1250 // TODO merge with defaulted modelOptions above
IsLayerSupported(const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)1251 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1252 Optional<DataType> dataType,
1253 std::string& outReasonIfUnsupported,
1254 const ModelOptions& modelOptions)
1255 {
1256 auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1257 return IsLayerConfigurationSupported(layer->GetBackendId(),
1258 connectableLayer,
1259 dataType,
1260 outReasonIfUnsupported,
1261 modelOptions);
1262 }
1263
IsLayerSupported(const BackendId & backendId,const IConnectableLayer & connectableLayer,Optional<DataType> dataType,std::string & outReasonIfUnsupported,const ModelOptions & modelOptions)1264 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
1265 const IConnectableLayer& connectableLayer,
1266 Optional<DataType> dataType,
1267 std::string& outReasonIfUnsupported,
1268 const ModelOptions& modelOptions)
1269 {
1270 return IsLayerConfigurationSupported(backendId,
1271 connectableLayer,
1272 dataType,
1273 outReasonIfUnsupported,
1274 modelOptions);
1275 }
1276
1277 // Default Implementations
CreateAbs(const AbsQueueDescriptor &,const WorkloadInfo &) const1278 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1279 const WorkloadInfo& /*info*/) const
1280 {
1281 return std::unique_ptr<IWorkload>();
1282 }
1283
CreateActivation(const ActivationQueueDescriptor &,const WorkloadInfo &) const1284 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1285 const WorkloadInfo& /*info*/) const
1286 {
1287 return std::unique_ptr<IWorkload>();
1288 }
1289
CreateAddition(const AdditionQueueDescriptor &,const WorkloadInfo &) const1290 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1291 const WorkloadInfo& /*info*/) const
1292 {
1293 return std::unique_ptr<IWorkload>();
1294 }
1295
CreateArgMinMax(const ArgMinMaxQueueDescriptor &,const WorkloadInfo &) const1296 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1297 const WorkloadInfo& /*info*/) const
1298 {
1299 return std::unique_ptr<IWorkload>();
1300 }
1301
CreateBatchNormalization(const BatchNormalizationQueueDescriptor &,const WorkloadInfo &) const1302 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1303 const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1304 {
1305 return std::unique_ptr<IWorkload>();
1306 }
1307
CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &,const WorkloadInfo &) const1308 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1309 const WorkloadInfo& /*Info*/) const
1310 {
1311 return std::unique_ptr<IWorkload>();
1312 }
1313
CreateComparison(const ComparisonQueueDescriptor &,const WorkloadInfo &) const1314 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1315 const WorkloadInfo& /*info*/) const
1316 {
1317 return std::unique_ptr<IWorkload>();
1318 }
1319
CreateConcat(const ConcatQueueDescriptor &,const WorkloadInfo &) const1320 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1321 const WorkloadInfo& /*info*/) const
1322 {
1323 return std::unique_ptr<IWorkload>();
1324 }
1325
CreateConstant(const ConstantQueueDescriptor &,const WorkloadInfo &) const1326 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1327 const WorkloadInfo& /*info*/) const
1328 {
1329 return std::unique_ptr<IWorkload>();
1330 }
1331
CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &,const WorkloadInfo &) const1332 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
1333 const WorkloadInfo& /*info*/) const
1334 {
1335 return std::unique_ptr<IWorkload>();
1336 }
1337
CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &,const WorkloadInfo &) const1338 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1339 const WorkloadInfo& /*info*/) const
1340 {
1341 return std::unique_ptr<IWorkload>();
1342 }
1343
CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor &,const WorkloadInfo &) const1344 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
1345 const WorkloadInfo& /*info*/) const
1346 {
1347 return std::unique_ptr<IWorkload>();
1348 }
1349
CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &,const WorkloadInfo &) const1350 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1351 const WorkloadInfo& /*info*/) const
1352 {
1353 return std::unique_ptr<IWorkload>();
1354 }
1355
CreateConvolution2d(const Convolution2dQueueDescriptor &,const WorkloadInfo &) const1356 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1357 const WorkloadInfo& /*info*/) const
1358 {
1359 return std::unique_ptr<IWorkload>();
1360 }
1361
CreateDebug(const DebugQueueDescriptor &,const WorkloadInfo &) const1362 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1363 const WorkloadInfo& /*info*/) const
1364 {
1365 return std::unique_ptr<IWorkload>();
1366 }
1367
CreateDepthToSpace(const DepthToSpaceQueueDescriptor &,const WorkloadInfo &) const1368 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1369 const WorkloadInfo& /*info*/) const
1370 {
1371 return std::unique_ptr<IWorkload>();
1372 }
1373
CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &,const WorkloadInfo &) const1374 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
1375 const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1376 {
1377 return std::unique_ptr<IWorkload>();
1378 }
1379
CreateDequantize(const DequantizeQueueDescriptor &,const WorkloadInfo &) const1380 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1381 const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1382 {
1383 return std::unique_ptr<IWorkload>();
1384 }
1385
CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &,const WorkloadInfo &) const1386 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
1387 const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1388 {
1389 return std::unique_ptr<IWorkload>();
1390 }
1391
CreateDivision(const DivisionQueueDescriptor &,const WorkloadInfo &) const1392 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1393 const WorkloadInfo& /*info*/) const
1394 {
1395 return std::unique_ptr<IWorkload>();
1396 }
1397
CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &,const WorkloadInfo &) const1398 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
1399 const WorkloadInfo& /*info*/) const
1400 {
1401 return std::unique_ptr<IWorkload>();
1402 }
1403
CreateEqual(const EqualQueueDescriptor &,const WorkloadInfo &) const1404 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1405 const WorkloadInfo& /*Info*/) const
1406 {
1407 return std::unique_ptr<IWorkload>();
1408 }
1409
CreateFakeQuantization(const FakeQuantizationQueueDescriptor &,const WorkloadInfo &) const1410 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
1411 const WorkloadInfo& /*info*/) const
1412 {
1413 return std::unique_ptr<IWorkload>();
1414 }
1415
CreateFill(const FillQueueDescriptor &,const WorkloadInfo &) const1416 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFill(const FillQueueDescriptor& /*descriptor*/,
1417 const WorkloadInfo& /*info*/) const
1418 {
1419 return std::unique_ptr<IWorkload>();
1420 }
1421
CreateFloor(const FloorQueueDescriptor &,const WorkloadInfo &) const1422 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1423 const WorkloadInfo& /*info*/) const
1424 {
1425 return std::unique_ptr<IWorkload>();
1426 }
1427
CreateFullyConnected(const FullyConnectedQueueDescriptor &,const WorkloadInfo &) const1428 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1429 const WorkloadInfo& /*info*/) const
1430 {
1431 return std::unique_ptr<IWorkload>();
1432 }
1433
CreateGather(const GatherQueueDescriptor &,const WorkloadInfo &) const1434 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1435 const WorkloadInfo& /*info*/) const
1436 {
1437 return std::unique_ptr<IWorkload>();
1438 }
1439
CreateGreater(const GreaterQueueDescriptor &,const WorkloadInfo &) const1440 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1441 const WorkloadInfo& /*info*/) const
1442 {
1443 return std::unique_ptr<IWorkload>();
1444 }
1445
CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &,const WorkloadInfo &) const1446 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
1447 const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1448 const WorkloadInfo& /*info*/) const
1449 {
1450 return std::unique_ptr<IWorkload>();
1451 }
1452
CreateL2Normalization(const L2NormalizationQueueDescriptor &,const WorkloadInfo &) const1453 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
1454 const WorkloadInfo& /*info*/) const
1455 {
1456 return std::unique_ptr<IWorkload>();
1457 }
1458
CreateLogicalBinary(const LogicalBinaryQueueDescriptor &,const WorkloadInfo &) const1459 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
1460 const WorkloadInfo& /*info*/) const
1461 {
1462 return std::unique_ptr<IWorkload>();
1463 }
1464
CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor &,const WorkloadInfo &) const1465 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
1466 const WorkloadInfo& /*info*/) const
1467 {
1468 return std::unique_ptr<IWorkload>();
1469 }
1470
CreateLogSoftmax(const LogSoftmaxQueueDescriptor &,const WorkloadInfo &) const1471 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1472 const WorkloadInfo& /*info*/) const
1473 {
1474 return std::unique_ptr<IWorkload>();
1475 }
1476
CreateLstm(const LstmQueueDescriptor &,const WorkloadInfo &) const1477 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1478 const WorkloadInfo& /*info*/) const
1479 {
1480 return std::unique_ptr<IWorkload>();
1481 }
1482
CreateMaximum(const MaximumQueueDescriptor &,const WorkloadInfo &) const1483 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1484 const WorkloadInfo& /*info*/) const
1485 {
1486 return std::unique_ptr<IWorkload>();
1487 }
1488
CreateMean(const MeanQueueDescriptor &,const WorkloadInfo &) const1489 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1490 const WorkloadInfo& /*Info*/) const
1491 {
1492 return std::unique_ptr<IWorkload>();
1493 }
1494
CreateMemCopy(const MemCopyQueueDescriptor &,const WorkloadInfo &) const1495 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1496 const WorkloadInfo& /*info*/) const
1497 {
1498 return std::unique_ptr<IWorkload>();
1499 }
1500
CreateMemImport(const MemImportQueueDescriptor &,const WorkloadInfo &) const1501 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1502 const WorkloadInfo& /*info*/) const
1503 {
1504 return std::unique_ptr<IWorkload>();
1505 }
1506
CreateMerge(const MergeQueueDescriptor &,const WorkloadInfo &) const1507 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1508 const WorkloadInfo& /*info*/) const
1509 {
1510 return std::unique_ptr<IWorkload>();
1511 }
1512
CreateMerger(const MergerQueueDescriptor &,const WorkloadInfo &) const1513 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1514 const WorkloadInfo& /*info*/) const
1515 {
1516 return std::unique_ptr<IWorkload>();
1517 }
1518
CreateMinimum(const MinimumQueueDescriptor &,const WorkloadInfo &) const1519 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1520 const WorkloadInfo& /*info*/) const
1521 {
1522 return std::unique_ptr<IWorkload>();
1523 }
1524
CreateMultiplication(const MultiplicationQueueDescriptor &,const WorkloadInfo &) const1525 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1526 const WorkloadInfo& /*info*/) const
1527 {
1528 return std::unique_ptr<IWorkload>();
1529 }
1530
CreateNormalization(const NormalizationQueueDescriptor &,const WorkloadInfo &) const1531 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1532 const WorkloadInfo& /*info*/) const
1533 {
1534 return std::unique_ptr<IWorkload>();
1535 }
1536
CreateOutput(const OutputQueueDescriptor &,const WorkloadInfo &) const1537 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1538 const WorkloadInfo& /*info*/) const
1539 {
1540 return std::unique_ptr<IWorkload>();
1541 }
1542
CreatePad(const PadQueueDescriptor &,const WorkloadInfo &) const1543 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1544 const WorkloadInfo& /*Info*/) const
1545 {
1546 return std::unique_ptr<IWorkload>();
1547 }
1548
CreatePermute(const PermuteQueueDescriptor &,const WorkloadInfo &) const1549 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1550 const WorkloadInfo& /*info*/) const
1551 {
1552 return std::unique_ptr<IWorkload>();
1553 }
1554
CreatePooling2d(const Pooling2dQueueDescriptor &,const WorkloadInfo &) const1555 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1556 const WorkloadInfo& /*info*/) const
1557 {
1558 return std::unique_ptr<IWorkload>();
1559 }
1560
CreatePreCompiled(const PreCompiledQueueDescriptor &,const WorkloadInfo &) const1561 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1562 const WorkloadInfo& /*info*/) const
1563 {
1564 return std::unique_ptr<IWorkload>();
1565 }
1566
CreatePrelu(const PreluQueueDescriptor &,const WorkloadInfo &) const1567 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1568 const WorkloadInfo &/*info*/) const
1569 {
1570 return std::unique_ptr<IWorkload>();
1571 }
1572
CreateQuantize(const QuantizeQueueDescriptor &,const WorkloadInfo &) const1573 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1574 const WorkloadInfo& /*Info*/) const
1575 {
1576 return std::unique_ptr<IWorkload>();
1577 }
1578
CreateQLstm(const QLstmQueueDescriptor &,const WorkloadInfo &) const1579 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1580 const WorkloadInfo& /*info*/) const
1581 {
1582 return std::unique_ptr<IWorkload>();
1583 }
1584
CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &,const WorkloadInfo &) const1585 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1586 const WorkloadInfo& /*info*/) const
1587 {
1588 return std::unique_ptr<IWorkload>();
1589 }
CreateRank(const RankQueueDescriptor &,const WorkloadInfo &) const1590 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
1591 const WorkloadInfo& /*info*/) const
1592 {
1593 return std::unique_ptr<IWorkload>();
1594 }
1595
CreateReshape(const ReshapeQueueDescriptor &,const WorkloadInfo &) const1596 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1597 const WorkloadInfo& /*info*/) const
1598 {
1599 return std::unique_ptr<IWorkload>();
1600 }
1601
CreateResizeBilinear(const ResizeBilinearQueueDescriptor &,const WorkloadInfo &) const1602 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1603 const WorkloadInfo& /*info*/) const
1604 {
1605 return std::unique_ptr<IWorkload>();
1606 }
1607
CreateResize(const ResizeQueueDescriptor &,const WorkloadInfo &) const1608 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1609 const WorkloadInfo& /*info*/) const
1610 {
1611 return std::unique_ptr<IWorkload>();
1612 }
1613
CreateRsqrt(const RsqrtQueueDescriptor &,const WorkloadInfo &) const1614 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1615 const WorkloadInfo& /*info*/) const
1616 {
1617 return std::unique_ptr<IWorkload>();
1618 }
1619
CreateSlice(const SliceQueueDescriptor &,const WorkloadInfo &) const1620 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1621 const WorkloadInfo& /*info*/) const
1622 {
1623 return std::unique_ptr<IWorkload>();
1624 }
1625
CreateSoftmax(const SoftmaxQueueDescriptor &,const WorkloadInfo &) const1626 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1627 const WorkloadInfo& /*info*/) const
1628 {
1629 return std::unique_ptr<IWorkload>();
1630 }
1631
CreateSplitter(const SplitterQueueDescriptor &,const WorkloadInfo &) const1632 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1633 const WorkloadInfo& /*info*/) const
1634 {
1635 return std::unique_ptr<IWorkload>();
1636 }
1637
CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &,const WorkloadInfo &) const1638 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1639 const WorkloadInfo& /*info*/) const
1640 {
1641 return std::unique_ptr<IWorkload>();
1642 }
1643
CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &,const WorkloadInfo &) const1644 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1645 const WorkloadInfo& /*info*/) const
1646 {
1647 return std::unique_ptr<IWorkload>();
1648 }
1649
CreateStack(const StackQueueDescriptor &,const WorkloadInfo &) const1650 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1651 const WorkloadInfo& /*info*/) const
1652 {
1653 return std::unique_ptr<IWorkload>();
1654 }
1655
CreateStridedSlice(const StridedSliceQueueDescriptor &,const WorkloadInfo &) const1656 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1657 const WorkloadInfo& /*info*/) const
1658 {
1659 return std::unique_ptr<IWorkload>();
1660 }
1661
CreateSubtraction(const SubtractionQueueDescriptor &,const WorkloadInfo &) const1662 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1663 const WorkloadInfo& /*info*/) const
1664 {
1665 return std::unique_ptr<IWorkload>();
1666 }
1667
CreateSwitch(const SwitchQueueDescriptor &,const WorkloadInfo &) const1668 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1669 const WorkloadInfo& /*info*/) const
1670 {
1671 return std::unique_ptr<IWorkload>();
1672 }
1673
CreateTranspose(const TransposeQueueDescriptor &,const WorkloadInfo &) const1674 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1675 const WorkloadInfo& /*info*/) const
1676 {
1677 return std::unique_ptr<IWorkload>();
1678 }
1679
CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &,const WorkloadInfo &) const1680 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
1681 const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1682 const WorkloadInfo& /*info*/) const
1683 {
1684 return std::unique_ptr<IWorkload>();
1685 }
1686
1687 } // namepsace armnn
1688