• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <armnn/Deprecated.hpp>
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Exceptions.hpp>
9 #include <armnn/Types.hpp>
10 
11 #include <backendsCommon/LayerSupportBase.hpp>
12 
13 #include <armnn/utility/IgnoreUnused.hpp>
14 
15 namespace
16 {
17 
DefaultLayerSupport(const char * func,const char * file,unsigned int line,armnn::Optional<std::string &> reasonIfUnsupported)18 bool DefaultLayerSupport(const char* func,
19                          const char* file,
20                          unsigned int line,
21                          armnn::Optional<std::string&> reasonIfUnsupported)
22 {
23     // NOTE: We only need to return the reason if the optional parameter is not empty
24     if (reasonIfUnsupported)
25     {
26         std::stringstream message;
27         message << func << " is not implemented [" << file << ":" << line << "]";
28 
29         reasonIfUnsupported.value() = message.str();
30     }
31 
32     return false;
33 }
34 
35 } // anonymous namespace
36 
37 namespace armnn
38 {
39 
IsAbsSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const40 bool LayerSupportBase::IsAbsSupported(const TensorInfo&, // input
41                                       const TensorInfo&, // output
42                                       Optional<std::string &> reasonIfUnsupported) const
43 {
44     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
45 }
46 
IsActivationSupported(const TensorInfo &,const TensorInfo &,const ActivationDescriptor &,Optional<std::string &> reasonIfUnsupported) const47 bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
48                                              const TensorInfo&, //output
49                                              const ActivationDescriptor&, // descriptor
50                                              Optional<std::string&> reasonIfUnsupported) const
51 {
52     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
53 }
54 
IsAdditionSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const55 bool LayerSupportBase::IsAdditionSupported(const TensorInfo&, // input0
56                                            const TensorInfo&, // input1
57                                            const TensorInfo&, // output
58                                            Optional<std::string&> reasonIfUnsupported) const
59 {
60     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
61 }
62 
IsArgMinMaxSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,const armnn::ArgMinMaxDescriptor &,armnn::Optional<std::string &> reasonIfUnsupported) const63 bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo&, // input
64                                             const armnn::TensorInfo&, // output
65                                             const armnn::ArgMinMaxDescriptor&, // descriptor
66                                             armnn::Optional<std::string &> reasonIfUnsupported) const
67 {
68     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
69 }
70 
IsBatchNormalizationSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const BatchNormalizationDescriptor &,Optional<std::string &> reasonIfUnsupported) const71 bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo&, //input
72                                                      const TensorInfo&, // output
73                                                      const TensorInfo&, //mean
74                                                      const TensorInfo&, //var
75                                                      const TensorInfo&, //beta
76                                                      const TensorInfo&, //gamma
77                                                      const BatchNormalizationDescriptor&, // descriptor
78                                                      Optional<std::string&> reasonIfUnsupported) const
79 {
80     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
81 }
82 
IsBatchToSpaceNdSupported(const TensorInfo &,const TensorInfo &,const BatchToSpaceNdDescriptor &,Optional<std::string &> reasonIfUnsupported) const83 bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo&, // input
84                                                  const TensorInfo&, // output
85                                                  const BatchToSpaceNdDescriptor&, //descriptor
86                                                  Optional<std::string&> reasonIfUnsupported) const
87 {
88     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
89 }
90 
IsComparisonSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const ComparisonDescriptor &,Optional<std::string &> reasonIfUnsupported) const91 bool LayerSupportBase::IsComparisonSupported(const TensorInfo&, // input0
92                                              const TensorInfo&, // input1
93                                              const TensorInfo&, // output
94                                              const ComparisonDescriptor&, // descriptor
95                                              Optional<std::string&> reasonIfUnsupported) const
96 {
97     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
98 }
99 
IsConcatSupported(const std::vector<const TensorInfo * >,const TensorInfo &,const OriginsDescriptor &,Optional<std::string &> reasonIfUnsupported) const100 bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*>, // inputs
101                                          const TensorInfo&, // output
102                                          const OriginsDescriptor&, // descriptor
103                                          Optional<std::string&> reasonIfUnsupported) const
104 {
105     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
106 }
107 
IsConstantSupported(const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const108 bool LayerSupportBase::IsConstantSupported(const TensorInfo&, // output
109                                            Optional<std::string&> reasonIfUnsupported) const
110 {
111     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
112 }
113 
IsConvertBf16ToFp32Supported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const114 bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input
115                                                     const TensorInfo&, // output
116                                                     Optional<std::string&> reasonIfUnsupported) const
117 {
118     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
119 }
120 
IsConvertFp16ToFp32Supported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const121 bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input
122                                                     const TensorInfo&, // output
123                                                     Optional<std::string&> reasonIfUnsupported) const
124 {
125     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
126 }
127 
IsConvertFp32ToBf16Supported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const128 bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input
129                                                     const TensorInfo&, // output
130                                                     Optional<std::string&> reasonIfUnsupported) const
131 {
132     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
133 }
134 
135 
IsConvertFp32ToFp16Supported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const136 bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input
137                                                     const TensorInfo&, // output
138                                                     Optional<std::string&> reasonIfUnsupported) const
139 {
140     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
141 }
142 
IsConvolution2dSupported(const TensorInfo &,const TensorInfo &,const Convolution2dDescriptor &,const TensorInfo &,const Optional<TensorInfo> &,Optional<std::string &> reasonIfUnsupported) const143 bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo&, // input
144                                                 const TensorInfo&, // output
145                                                 const Convolution2dDescriptor&, // descriptor
146                                                 const TensorInfo&, // weights
147                                                 const Optional<TensorInfo>&, // biases
148                                                 Optional<std::string&> reasonIfUnsupported) const
149 {
150     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
151 }
152 
IsDebugSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const153 bool LayerSupportBase::IsDebugSupported(const TensorInfo&, // input
154                                         const TensorInfo&, // output
155                                         Optional<std::string&> reasonIfUnsupported) const
156 {
157     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
158 }
159 
IsDepthToSpaceSupported(const TensorInfo &,const TensorInfo &,const DepthToSpaceDescriptor &,Optional<std::string &> reasonIfUnsupported) const160 bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo&, // input
161                                                const TensorInfo&, // output
162                                                const DepthToSpaceDescriptor&, // descriptor
163                                                Optional<std::string&> reasonIfUnsupported) const
164 {
165     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
166 }
167 
IsDepthwiseConvolutionSupported(const TensorInfo &,const TensorInfo &,const DepthwiseConvolution2dDescriptor &,const TensorInfo &,const Optional<TensorInfo> &,Optional<std::string &> reasonIfUnsupported) const168 bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo&, //input
169                                                        const TensorInfo&, //output
170                                                        const DepthwiseConvolution2dDescriptor&, // descriptor
171                                                        const TensorInfo&, // weights
172                                                        const Optional<TensorInfo>&, // biases
173                                                        Optional<std::string&> reasonIfUnsupported) const
174 {
175     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
176 }
177 
IsDequantizeSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const178 bool LayerSupportBase::IsDequantizeSupported(const TensorInfo&, // input
179                                              const TensorInfo&, // output
180                                              Optional<std::string&> reasonIfUnsupported) const
181 {
182     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
183 }
184 
IsDetectionPostProcessSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const DetectionPostProcessDescriptor &,Optional<std::string &> reasonIfUnsupported) const185 bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo&, // boxEncodings
186                                                        const TensorInfo&, // scores
187                                                        const TensorInfo&, // anchors
188                                                        const TensorInfo&, // detectionBoxes
189                                                        const TensorInfo&, // detectionClasses
190                                                        const TensorInfo&, // detectionScores
191                                                        const TensorInfo&, // numDetections
192                                                        const DetectionPostProcessDescriptor&, //descriptor
193                                                        Optional<std::string&> reasonIfUnsupported) const
194 {
195     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
196 }
197 
IsDilatedDepthwiseConvolutionSupported(const TensorInfo &,const TensorInfo &,const DepthwiseConvolution2dDescriptor &,const TensorInfo &,const Optional<TensorInfo> &,Optional<std::string &> reasonIfUnsupported) const198 bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo&, // input
199                                                               const TensorInfo&, // output
200                                                               const DepthwiseConvolution2dDescriptor&, // descriptor
201                                                               const TensorInfo&,// weights
202                                                               const Optional<TensorInfo>&, // biases
203                                                               Optional<std::string&> reasonIfUnsupported) const
204 {
205     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
206 }
207 
IsDivisionSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const208 bool LayerSupportBase::IsDivisionSupported(const TensorInfo&, // input0
209                                            const TensorInfo&, // input1
210                                            const TensorInfo&, // output
211                                            Optional<std::string&> reasonIfUnsupported) const
212 {
213     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
214 }
215 
IsElementwiseUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const216 bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input,
217                                                    const TensorInfo& output,
218                                                    const ElementwiseUnaryDescriptor& descriptor,
219                                                    Optional<std::string&> reasonIfUnsupported) const
220 {
221     if (descriptor.m_Operation == UnaryOperation::Abs)
222     {
223         ARMNN_NO_DEPRECATE_WARN_BEGIN
224         return IsAbsSupported(input, output, reasonIfUnsupported);
225         ARMNN_NO_DEPRECATE_WARN_END
226     }
227     else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
228     {
229         ARMNN_NO_DEPRECATE_WARN_BEGIN
230         return IsRsqrtSupported(input, output, reasonIfUnsupported);
231         ARMNN_NO_DEPRECATE_WARN_END
232     }
233     return false;
234 }
235 
IsEqualSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,const armnn::TensorInfo &,armnn::Optional<std::string &> reasonIfUnsupported) const236 bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo&, // input0
237                                         const armnn::TensorInfo&, // input1
238                                         const armnn::TensorInfo&, // output
239                                         armnn::Optional<std::string &> reasonIfUnsupported) const
240 {
241     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
242 }
243 
IsFakeQuantizationSupported(const TensorInfo &,const FakeQuantizationDescriptor &,Optional<std::string &> reasonIfUnsupported) const244 bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo&, // input
245                                                    const FakeQuantizationDescriptor&, // descriptor
246                                                    Optional<std::string&> reasonIfUnsupported) const
247 {
248     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
249 }
250 
IsFillSupported(const TensorInfo &,const TensorInfo &,const FillDescriptor &,Optional<std::string &> reasonIfUnsupported) const251 bool LayerSupportBase::IsFillSupported(const TensorInfo&, // input
252                                        const TensorInfo&, // output
253                                        const FillDescriptor&, // descriptor
254                                        Optional<std::string&> reasonIfUnsupported) const
255 {
256     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
257 }
258 
IsFloorSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const259 bool LayerSupportBase::IsFloorSupported(const TensorInfo&, // input
260                                         const TensorInfo&, // output
261                                         Optional<std::string&> reasonIfUnsupported) const
262 {
263     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
264 }
265 
IsFullyConnectedSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const FullyConnectedDescriptor &,Optional<std::string &> reasonIfUnsupported) const266 bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo&, // input
267                                                  const TensorInfo&, // output
268                                                  const TensorInfo&, // weights
269                                                  const TensorInfo&, // biases
270                                                  const FullyConnectedDescriptor&, // descriptor
271                                                  Optional<std::string&> reasonIfUnsupported) const
272 {
273     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
274 }
275 
IsGatherSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,const armnn::TensorInfo &,armnn::Optional<std::string &> reasonIfUnsupported) const276 bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
277                                          const armnn::TensorInfo&, // input1
278                                          const armnn::TensorInfo&, // output
279                                          armnn::Optional<std::string&> reasonIfUnsupported) const
280 {
281     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
282 }
283 
IsGatherSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,const armnn::TensorInfo &,const GatherDescriptor &,armnn::Optional<std::string &> reasonIfUnsupported) const284 bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
285                                          const armnn::TensorInfo&, // input1
286                                          const armnn::TensorInfo&, // output
287                                          const GatherDescriptor&, // descriptor
288                                          armnn::Optional<std::string&> reasonIfUnsupported) const
289 {
290     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
291 }
292 
IsGreaterSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const293 bool LayerSupportBase::IsGreaterSupported(const TensorInfo&, // input0
294                                           const TensorInfo&, // input1
295                                           const TensorInfo&, // output
296                                           Optional<std::string&> reasonIfUnsupported) const
297 {
298     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
299 }
300 
IsInputSupported(const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const301 bool LayerSupportBase::IsInputSupported(const TensorInfo&, // input
302                                         Optional<std::string&> reasonIfUnsupported) const
303 {
304     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
305 }
306 
IsInstanceNormalizationSupported(const TensorInfo &,const TensorInfo &,const InstanceNormalizationDescriptor &,Optional<std::string &> reasonIfUnsupported) const307 bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo&, // input
308                                                         const TensorInfo&, // output
309                                                         const InstanceNormalizationDescriptor&, // descriptor
310                                                         Optional<std::string&> reasonIfUnsupported) const
311 {
312     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
313 }
314 
IsL2NormalizationSupported(const TensorInfo &,const TensorInfo &,const L2NormalizationDescriptor &,Optional<std::string &> reasonIfUnsupported) const315 bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo&, // input
316                                                   const TensorInfo&, // output
317                                                   const L2NormalizationDescriptor&, // descriptor
318                                                   Optional<std::string&> reasonIfUnsupported) const
319 {
320     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
321 }
322 
IsLogicalBinarySupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const LogicalBinaryDescriptor &,Optional<std::string &> reasonIfUnsupported) const323 bool LayerSupportBase::IsLogicalBinarySupported(const TensorInfo&, // input0
324                                                 const TensorInfo&, // input1
325                                                 const TensorInfo&, // output
326                                                 const LogicalBinaryDescriptor&, // descriptor
327                                                 Optional<std::string&> reasonIfUnsupported) const
328 {
329     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
330 }
331 
IsLogicalUnarySupported(const TensorInfo &,const TensorInfo &,const ElementwiseUnaryDescriptor &,Optional<std::string &> reasonIfUnsupported) const332 bool LayerSupportBase::IsLogicalUnarySupported(const TensorInfo&, // input
333                                                const TensorInfo&, // output
334                                                const ElementwiseUnaryDescriptor&, // descriptor
335                                                Optional<std::string&> reasonIfUnsupported) const
336 {
337     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
338 }
339 
IsLogSoftmaxSupported(const TensorInfo &,const TensorInfo &,const LogSoftmaxDescriptor &,Optional<std::string &> reasonIfUnsupported) const340 bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo&, // input
341                                              const TensorInfo&, // output
342                                              const LogSoftmaxDescriptor&, // descriptor
343                                              Optional<std::string&> reasonIfUnsupported) const
344 {
345     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
346 }
347 
IsLstmSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const LstmDescriptor &,const LstmInputParamsInfo &,Optional<std::string &> reasonIfUnsupported) const348 bool LayerSupportBase::IsLstmSupported(const TensorInfo&, // input
349                                        const TensorInfo&, // outputStateIn
350                                        const TensorInfo&, // cellStateIn
351                                        const TensorInfo&, // scratchBuffer
352                                        const TensorInfo&, // outputStateOut
353                                        const TensorInfo&, // cellStateOut
354                                        const TensorInfo&, // output
355                                        const LstmDescriptor&, // descriptor
356                                        const LstmInputParamsInfo&, // paramsInfo
357                                        Optional<std::string&> reasonIfUnsupported) const
358 {
359     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
360 }
361 
IsMaximumSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const362 bool LayerSupportBase::IsMaximumSupported(const TensorInfo&, // input0
363                                           const TensorInfo&, // input1
364                                           const TensorInfo&, // output
365                                           Optional<std::string&> reasonIfUnsupported) const
366 {
367     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
368 }
369 
IsMeanSupported(const TensorInfo &,const TensorInfo &,const MeanDescriptor &,Optional<std::string &> reasonIfUnsupported) const370 bool LayerSupportBase::IsMeanSupported(const TensorInfo&, // input
371                                        const TensorInfo&, // output
372                                        const MeanDescriptor&, // descriptor
373                                        Optional<std::string&> reasonIfUnsupported) const
374 {
375     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
376 }
377 
IsMemCopySupported(const armnn::TensorInfo &,const armnn::TensorInfo &,armnn::Optional<std::string &>) const378 bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo&, // input
379                                           const armnn::TensorInfo&, // output
380                                           armnn::Optional<std::string &> ) const // reasonIfUnsupported
381 {
382     return true;
383 }
384 
IsMemImportSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,armnn::Optional<std::string &>) const385 bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo&, // input
386                                             const armnn::TensorInfo&, // output
387                                             armnn::Optional<std::string &> ) const // reasonIfUnsupported
388 {
389     return true;
390 }
391 
IsMergeSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const392 bool LayerSupportBase::IsMergeSupported(const TensorInfo&, // input0
393                                         const TensorInfo&, // input1
394                                         const TensorInfo&, // output
395                                         Optional<std::string&> reasonIfUnsupported) const
396 {
397     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
398 }
399 
IsMergerSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const OriginsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const400 bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
401                                          const TensorInfo& output,
402                                          const OriginsDescriptor& descriptor,
403                                          Optional<std::string&> reasonIfUnsupported) const
404 {
405     return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
406 }
407 
IsMinimumSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const408 bool LayerSupportBase::IsMinimumSupported(const TensorInfo&, // input0
409                                           const TensorInfo&, // input1
410                                           const TensorInfo&, // output
411                                           Optional<std::string&> reasonIfUnsupported) const
412 {
413     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
414 }
415 
IsMultiplicationSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const416 bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo&, // input0
417                                                  const TensorInfo&, // input1
418                                                  const TensorInfo&, // output
419                                                  Optional<std::string&> reasonIfUnsupported) const
420 {
421     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
422 }
423 
IsNormalizationSupported(const TensorInfo &,const TensorInfo &,const NormalizationDescriptor &,Optional<std::string &> reasonIfUnsupported) const424 bool LayerSupportBase::IsNormalizationSupported(const TensorInfo&, // input
425                                                 const TensorInfo&, // output
426                                                 const NormalizationDescriptor&, // descriptor
427                                                 Optional<std::string&> reasonIfUnsupported) const
428 {
429     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
430 }
431 
IsOutputSupported(const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const432 bool LayerSupportBase::IsOutputSupported(const TensorInfo&, // output
433                                          Optional<std::string&> reasonIfUnsupported) const
434 {
435     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
436 }
437 
IsPadSupported(const TensorInfo &,const TensorInfo &,const PadDescriptor &,Optional<std::string &> reasonIfUnsupported) const438 bool LayerSupportBase::IsPadSupported(const TensorInfo&, // input
439                                       const TensorInfo&, // output
440                                       const PadDescriptor&, // descriptor
441                                       Optional<std::string&> reasonIfUnsupported) const
442 {
443     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
444 }
445 
IsPermuteSupported(const TensorInfo &,const TensorInfo &,const PermuteDescriptor &,Optional<std::string &> reasonIfUnsupported) const446 bool LayerSupportBase::IsPermuteSupported(const TensorInfo&, // input
447                                           const TensorInfo&, // output
448                                           const PermuteDescriptor&, // descriptor
449                                           Optional<std::string&> reasonIfUnsupported) const
450 {
451     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
452 }
453 
IsPooling2dSupported(const TensorInfo &,const TensorInfo &,const Pooling2dDescriptor &,Optional<std::string &> reasonIfUnsupported) const454 bool LayerSupportBase::IsPooling2dSupported(const TensorInfo&, // input
455                                             const TensorInfo&, // output
456                                             const Pooling2dDescriptor&, // descriptor
457                                             Optional<std::string&> reasonIfUnsupported) const
458 {
459     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
460 }
461 
IsPreCompiledSupported(const TensorInfo &,const PreCompiledDescriptor &,Optional<std::string &> reasonIfUnsupported) const462 bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo&, // input
463                                               const PreCompiledDescriptor&, // descriptor
464                                               Optional<std::string&> reasonIfUnsupported) const
465 {
466     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
467 }
468 
IsPreluSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const469 bool LayerSupportBase::IsPreluSupported(const TensorInfo&, // input
470                                         const TensorInfo&, // alpha
471                                         const TensorInfo&, // output
472                                         Optional<std::string &> reasonIfUnsupported) const
473 {
474     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
475 }
476 
IsQuantizeSupported(const armnn::TensorInfo &,const armnn::TensorInfo &,armnn::Optional<std::string &> reasonIfUnsupported) const477 bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo&, // input
478                                            const armnn::TensorInfo&, // output
479                                            armnn::Optional<std::string&> reasonIfUnsupported) const
480 {
481     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
482 }
483 
IsQLstmSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const QLstmDescriptor &,const LstmInputParamsInfo &,Optional<std::string &> reasonIfUnsupported) const484 bool LayerSupportBase::IsQLstmSupported(const TensorInfo&, // input
485                                         const TensorInfo&, // previousOutputIn
486                                         const TensorInfo&, // previousCellStateIn
487                                         const TensorInfo&, // outputStateOut
488                                         const TensorInfo&, // cellStateOut
489                                         const TensorInfo&, // output
490                                         const QLstmDescriptor&, // descriptor
491                                         const LstmInputParamsInfo&, // paramsInfo
492                                         Optional<std::string&> reasonIfUnsupported) const
493 {
494     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
495 }
496 
IsQuantizedLstmSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,const QuantizedLstmInputParamsInfo &,Optional<std::string &> reasonIfUnsupported) const497 bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo&, // input
498                                                 const TensorInfo&, // previousCellStateIn
499                                                 const TensorInfo&, // previousOutputIn
500                                                 const TensorInfo&, // cellStateOut
501                                                 const TensorInfo&, // output
502                                                 const QuantizedLstmInputParamsInfo&, // paramsInfo
503                                                 Optional<std::string&> reasonIfUnsupported) const
504 {
505     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
506 }
507 
IsRankSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const508 bool LayerSupportBase::IsRankSupported(const TensorInfo&, // input
509                                        const TensorInfo&,  // output
510                                        Optional<std::string&> reasonIfUnsupported) const
511 {
512     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
513 }
514 
IsReshapeSupported(const TensorInfo &,const TensorInfo &,const ReshapeDescriptor &,Optional<std::string &> reasonIfUnsupported) const515 bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input
516                                           const TensorInfo&, // output
517                                           const ReshapeDescriptor&, // descriptor
518                                           Optional<std::string&> reasonIfUnsupported) const
519 {
520     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
521 }
522 
IsResizeBilinearSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const523 bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo&, // input
524                                                  const TensorInfo&, // output
525                                                  Optional<std::string&> reasonIfUnsupported) const
526 {
527     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
528 }
529 
IsResizeSupported(const TensorInfo &,const TensorInfo &,const ResizeDescriptor &,Optional<std::string &> reasonIfUnsupported) const530 bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input
531                                          const TensorInfo&, // output
532                                          const ResizeDescriptor&, // descriptor
533                                          Optional<std::string&> reasonIfUnsupported) const
534 {
535     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
536 }
537 
IsRsqrtSupported(const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const538 bool LayerSupportBase::IsRsqrtSupported(const TensorInfo&, // input
539                                         const TensorInfo&, // output
540                                         Optional<std::string &> reasonIfUnsupported) const
541 {
542     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
543 }
544 
IsSliceSupported(const TensorInfo &,const TensorInfo &,const SliceDescriptor &,Optional<std::string &> reasonIfUnsupported) const545 bool LayerSupportBase::IsSliceSupported(const TensorInfo&, // input
546                                         const TensorInfo&, // output
547                                         const SliceDescriptor&, // descriptor
548                                         Optional<std::string&> reasonIfUnsupported) const
549 {
550     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
551 }
552 
IsSoftmaxSupported(const TensorInfo &,const TensorInfo &,const SoftmaxDescriptor &,Optional<std::string &> reasonIfUnsupported) const553 bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo&, // input
554                                           const TensorInfo&, // output
555                                           const SoftmaxDescriptor&, // descriptor
556                                           Optional<std::string&> reasonIfUnsupported) const
557 {
558     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
559 }
560 /**/
IsSpaceToBatchNdSupported(const TensorInfo &,const TensorInfo &,const SpaceToBatchNdDescriptor &,Optional<std::string &> reasonIfUnsupported) const561 bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo&, // input
562                                                  const TensorInfo&, // output
563                                                  const SpaceToBatchNdDescriptor&, // descriptor
564                                                  Optional<std::string&> reasonIfUnsupported) const
565 {
566     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
567 }
568 
IsSpaceToDepthSupported(const TensorInfo &,const TensorInfo &,const SpaceToDepthDescriptor &,Optional<std::string &> reasonIfUnsupported) const569 bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo&, // input
570                                                const TensorInfo&, // output
571                                                const SpaceToDepthDescriptor&, // descriptor
572                                                Optional<std::string&> reasonIfUnsupported) const
573 {
574     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
575 }
576 
IsSplitterSupported(const TensorInfo &,const ViewsDescriptor &,Optional<std::string &> reasonIfUnsupported) const577 bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
578                                            const ViewsDescriptor&, // descriptor
579                                            Optional<std::string&> reasonIfUnsupported) const
580 {
581     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
582 }
583 
IsSplitterSupported(const TensorInfo &,const std::vector<std::reference_wrapper<TensorInfo>> &,const ViewsDescriptor &,Optional<std::string &> reasonIfUnsupported) const584 bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
585                                            const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs
586                                            const ViewsDescriptor&, // descriptor
587                                            Optional<std::string&> reasonIfUnsupported) const
588 {
589     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
590 }
591 
IsStackSupported(const std::vector<const TensorInfo * > &,const TensorInfo &,const StackDescriptor &,Optional<std::string &> reasonIfUnsupported) const592 bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>&, // inputs
593                                         const TensorInfo&, // output
594                                         const StackDescriptor&, // descriptor
595                                         Optional<std::string&> reasonIfUnsupported) const
596 {
597     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
598 }
599 
IsStandInSupported(const std::vector<const TensorInfo * > &,const std::vector<const TensorInfo * > &,const StandInDescriptor &,Optional<std::string &> reasonIfUnsupported) const600 bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&, // inputs
601                                           const std::vector<const TensorInfo*>&, // outputs
602                                           const StandInDescriptor&, // descriptor
603                                           Optional<std::string&> reasonIfUnsupported) const
604 {
605     if (reasonIfUnsupported)
606     {
607         std::stringstream message;
608         message << "StandIn layer is not executable via backends";
609 
610         reasonIfUnsupported.value() = message.str();
611     }
612 
613     return false;
614 }
615 
IsStridedSliceSupported(const TensorInfo &,const TensorInfo &,const StridedSliceDescriptor &,Optional<std::string &> reasonIfUnsupported) const616 bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo&, // input
617                                                const TensorInfo&, // output
618                                                const StridedSliceDescriptor&, // descriptor
619                                                Optional<std::string&> reasonIfUnsupported) const
620 {
621     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
622 }
623 
IsSubtractionSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const624 bool LayerSupportBase::IsSubtractionSupported(const TensorInfo&, // input0
625                                               const TensorInfo&, // input1
626                                               const TensorInfo&, // output
627                                               Optional<std::string&> reasonIfUnsupported) const
628 {
629     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
630 }
631 
IsSwitchSupported(const TensorInfo &,const TensorInfo &,const TensorInfo &,const TensorInfo &,Optional<std::string &> reasonIfUnsupported) const632 bool LayerSupportBase::IsSwitchSupported(const TensorInfo&, // input0
633                                          const TensorInfo&, // input1
634                                          const TensorInfo&, // output0
635                                          const TensorInfo&, // output1
636                                          Optional<std::string&> reasonIfUnsupported) const
637 {
638     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
639 }
640 
IsTransposeConvolution2dSupported(const TensorInfo &,const TensorInfo &,const TransposeConvolution2dDescriptor &,const TensorInfo &,const Optional<TensorInfo> &,Optional<std::string &> reasonIfUnsupported) const641 bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo&, // input
642                                                          const TensorInfo&, // output
643                                                          const TransposeConvolution2dDescriptor&, // descriptor
644                                                          const TensorInfo&, // weights
645                                                          const Optional<TensorInfo>&, // biases
646                                                          Optional<std::string&> reasonIfUnsupported) const
647 {
648     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
649 }
650 
IsTransposeSupported(const TensorInfo &,const TensorInfo &,const TransposeDescriptor &,Optional<std::string &> reasonIfUnsupported) const651 bool LayerSupportBase::IsTransposeSupported(const TensorInfo&, // input
652                                             const TensorInfo&, // output
653                                             const TransposeDescriptor&, // descriptor
654                                             Optional<std::string&> reasonIfUnsupported) const
655 {
656     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
657 }
658 
659 } // namespace armnn
660