1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
8 #include "ClBackendModelContext.hpp"
9
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/BackendRegistry.hpp>
12
13 #include <InternalTypes.hpp>
14 #include <LayerSupportCommon.hpp>
15
16 #include <armnn/utility/IgnoreUnused.hpp>
17 #include <armnn/utility/PolymorphicDowncast.hpp>
18
19 #if defined(ARMCOMPUTECL_ENABLED)
20 #include <aclCommon/ArmComputeUtils.hpp>
21 #include <aclCommon/ArmComputeTensorUtils.hpp>
22 #include "workloads/ClAbsWorkload.hpp"
23 #include "workloads/ClAdditionWorkload.hpp"
24 #include "workloads/ClActivationWorkload.hpp"
25 #include "workloads/ClArgMinMaxWorkload.hpp"
26 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
27 #include "workloads/ClBatchToSpaceNdWorkload.hpp"
28 #include "workloads/ClComparisonWorkload.hpp"
29 #include "workloads/ClConstantWorkload.hpp"
30 #include "workloads/ClConvertFp16ToFp32Workload.hpp"
31 #include "workloads/ClConvertFp32ToFp16Workload.hpp"
32 #include "workloads/ClConvolution2dWorkload.hpp"
33 #include "workloads/ClDepthToSpaceWorkload.hpp"
34 #include "workloads/ClDepthwiseConvolutionWorkload.hpp"
35 #include "workloads/ClDequantizeWorkload.hpp"
36 #include "workloads/ClDivisionFloatWorkload.hpp"
37 #include "workloads/ClExpWorkload.hpp"
38 #include "workloads/ClFillWorkload.hpp"
39 #include "workloads/ClFloorFloatWorkload.hpp"
40 #include "workloads/ClFullyConnectedWorkload.hpp"
41 #include "workloads/ClGatherWorkload.hpp"
42 #include "workloads/ClInstanceNormalizationWorkload.hpp"
43 #include "workloads/ClL2NormalizationFloatWorkload.hpp"
44 #include "workloads/ClLogSoftmaxWorkload.hpp"
45 #include "workloads/ClLogicalAndWorkload.hpp"
46 #include "workloads/ClLogicalNotWorkload.hpp"
47 #include "workloads/ClLogicalOrWorkload.hpp"
48 #include "workloads/ClLstmFloatWorkload.hpp"
49 #include "workloads/ClMaximumWorkload.hpp"
50 #include "workloads/ClMeanWorkload.hpp"
51 #include "workloads/ClConcatWorkload.hpp"
52 #include "workloads/ClMinimumWorkload.hpp"
53 #include "workloads/ClMultiplicationWorkload.hpp"
54 #include "workloads/ClNegWorkload.hpp"
55 #include "workloads/ClNormalizationFloatWorkload.hpp"
56 #include "workloads/ClPadWorkload.hpp"
57 #include "workloads/ClPermuteWorkload.hpp"
58 #include "workloads/ClPooling2dWorkload.hpp"
59 #include "workloads/ClPreluWorkload.hpp"
60 #include "workloads/ClQLstmWorkload.hpp"
61 #include "workloads/ClQuantizedLstmWorkload.hpp"
62 #include "workloads/ClQuantizeWorkload.hpp"
63 #include "workloads/ClReshapeWorkload.hpp"
64 #include "workloads/ClResizeWorkload.hpp"
65 #include "workloads/ClRsqrtWorkload.hpp"
66 #include "workloads/ClSliceWorkload.hpp"
67 #include "workloads/ClSoftmaxWorkload.hpp"
68 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
69 #include "workloads/ClSpaceToDepthWorkload.hpp"
70 #include "workloads/ClSplitterWorkload.hpp"
71 #include "workloads/ClStackWorkload.hpp"
72 #include "workloads/ClStridedSliceWorkload.hpp"
73 #include "workloads/ClSubtractionWorkload.hpp"
74 #include "workloads/ClTransposeConvolution2dWorkload.hpp"
75 #include "workloads/ClTransposeWorkload.hpp"
76 #endif
77
78
79 namespace armnn
80 {
81
82 namespace
83 {
84
85 template<unsigned int FilterSize>
IsMatchingSize2d(const TensorInfo & weightInfo)86 bool IsMatchingSize2d(const TensorInfo& weightInfo)
87 {
88 // Width & Height must match.
89 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
90 }
91
92 template<uint32_t ValidStride>
IsMatchingStride(uint32_t actualStride)93 bool IsMatchingStride(uint32_t actualStride)
94 {
95 return ValidStride == actualStride;
96 }
97
98 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
IsMatchingStride(uint32_t actualStride)99 bool IsMatchingStride(uint32_t actualStride)
100 {
101 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
102 }
103
104 template<typename ... Args>
IsClBackendSupported(Optional<std::string &> reasonIfUnsupported,Args...args)105 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
106 {
107 IgnoreUnused(reasonIfUnsupported, (args)...);
108 #if defined(ARMCOMPUTECL_ENABLED)
109 return true;
110 #else
111 if (reasonIfUnsupported)
112 {
113 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
114 }
115 return false;
116 #endif
117 }
118
119 #if defined(ARMCOMPUTECL_ENABLED)
120 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
121 #else
122 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
123 #endif
124
125 #if defined(ARMCOMPUTECL_ENABLED)
126 template<class FuncType, class... Args>
IsWorkloadSupported(FuncType && func,Optional<std::string &> reasonIfUnsupported,Args &&...args)127 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
128 {
129 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
130 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
131 if (!supported && reasonIfUnsupported)
132 {
133 reasonIfUnsupported.value() = aclStatus.error_description();
134 }
135 return supported;
136 }
137
138 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
139 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
140 #else
141 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
142 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
143 #endif
144
145 template<typename FloatFunc, typename Uint8Func, typename ... Params>
IsSupportedForDataTypeCl(Optional<std::string &> reasonIfUnsupported,DataType dataType,FloatFunc floatFuncPtr,Uint8Func uint8FuncPtr,Params &&...params)146 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
147 DataType dataType,
148 FloatFunc floatFuncPtr,
149 Uint8Func uint8FuncPtr,
150 Params&&... params)
151 {
152 return IsClBackendSupported(reasonIfUnsupported) &&
153 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
154 dataType,
155 floatFuncPtr,
156 floatFuncPtr,
157 uint8FuncPtr,
158 &FalseFunc<>,
159 &FalseFunc<>,
160 std::forward<Params>(params)...);
161 }
162 } // anonymous namespace
163
ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr & modelContextPtr)164 ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
165 : m_ModelContextPtr(modelContextPtr)
166 {
167 }
168
ClLayerSupport()169 ClLayerSupport::ClLayerSupport()
170 : m_ModelContextPtr(nullptr)
171 {
172 }
173
IsAbsSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const174 bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
175 const TensorInfo& output,
176 Optional<std::string&> reasonIfUnsupported) const
177 {
178 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
179 return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
180 }
181
IsActivationSupported(const TensorInfo & input,const TensorInfo & output,const ActivationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const182 bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
183 const TensorInfo& output,
184 const ActivationDescriptor& descriptor,
185 Optional<std::string&> reasonIfUnsupported) const
186 {
187 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
188 reasonIfUnsupported,
189 input,
190 output,
191 descriptor);
192 }
193
IsAdditionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const194 bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
195 const TensorInfo& input1,
196 const TensorInfo& output,
197 Optional<std::string&> reasonIfUnsupported) const
198 {
199 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
200 reasonIfUnsupported,
201 input0,
202 input1,
203 output,
204 nullptr);
205 }
206
IsArgMinMaxSupported(const TensorInfo & input,const TensorInfo & output,const ArgMinMaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const207 bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
208 const TensorInfo& output,
209 const ArgMinMaxDescriptor& descriptor,
210 Optional<std::string&> reasonIfUnsupported) const
211 {
212
213 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
214 reasonIfUnsupported,
215 input,
216 output,
217 descriptor);
218 }
219
IsBatchNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & mean,const TensorInfo & var,const TensorInfo & beta,const TensorInfo & gamma,const BatchNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const220 bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
221 const TensorInfo& output,
222 const TensorInfo& mean,
223 const TensorInfo& var,
224 const TensorInfo& beta,
225 const TensorInfo& gamma,
226 const BatchNormalizationDescriptor& descriptor,
227 Optional<std::string&> reasonIfUnsupported) const
228 {
229 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
230 reasonIfUnsupported,
231 input,
232 output,
233 mean,
234 var,
235 beta,
236 gamma,
237 descriptor,
238 nullptr);
239 }
240
IsBatchToSpaceNdSupported(const TensorInfo & input,const TensorInfo & output,const BatchToSpaceNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const241 bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
242 const TensorInfo& output,
243 const BatchToSpaceNdDescriptor& descriptor,
244 Optional<std::string&> reasonIfUnsupported) const
245 {
246 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
247 reasonIfUnsupported,
248 input,
249 output,
250 descriptor);
251 }
252
IsComparisonSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ComparisonDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const253 bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
254 const TensorInfo& input1,
255 const TensorInfo& output,
256 const ComparisonDescriptor& descriptor,
257 Optional<std::string&> reasonIfUnsupported) const
258 {
259 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
260 reasonIfUnsupported,
261 input0,
262 input1,
263 output,
264 descriptor);
265 }
266
IsConcatSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const ConcatDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const267 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
268 const TensorInfo& output,
269 const ConcatDescriptor& descriptor,
270 Optional<std::string&> reasonIfUnsupported) const
271 {
272 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
273 {
274 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
275 return false;
276 }
277
278 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
279 if(concatInnerAxis < 3) // Width, height, or channels
280 {
281 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
282 reasonIfUnsupported,
283 inputs,
284 output,
285 descriptor);
286 }
287 else if (concatInnerAxis == 3)
288 {
289 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
290 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
291 for (auto& input : inputs)
292 {
293 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
294 {
295 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
296 return false;
297 }
298 }
299 return true; // Sub-tensors support concat along batch
300 }
301 else // > 4 dimensions not supported.
302 {
303 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
304 return false;
305 }
306 }
307
IsConstantSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const308 bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
309 Optional<std::string&> reasonIfUnsupported) const
310 {
311 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
312 reasonIfUnsupported,
313 output);
314 }
315
IsConvertFp16ToFp32Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const316 bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
317 const TensorInfo& output,
318 Optional<std::string&> reasonIfUnsupported) const
319 {
320 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
321 reasonIfUnsupported,
322 input,
323 output);
324 }
325
IsConvertFp32ToFp16Supported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const326 bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
327 const TensorInfo& output,
328 Optional<std::string&> reasonIfUnsupported) const
329 {
330 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
331 reasonIfUnsupported,
332 input,
333 output);
334 }
335
IsConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const Convolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const336 bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
337 const TensorInfo& output,
338 const Convolution2dDescriptor& descriptor,
339 const TensorInfo& weights,
340 const Optional<TensorInfo>& biases,
341 Optional<std::string&> reasonIfUnsupported) const
342 {
343 bool isFastMathEnabled = false;
344 #if defined(ARMCOMPUTECL_ENABLED)
345 if (m_ModelContextPtr)
346 {
347 if (m_ModelContextPtr.get() != nullptr)
348 {
349 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
350 if (modelOptions)
351 {
352 isFastMathEnabled = modelOptions->IsFastMathEnabled();
353 }
354 }
355 }
356 #endif
357
358 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
359 reasonIfUnsupported,
360 input,
361 output,
362 descriptor,
363 weights,
364 biases,
365 isFastMathEnabled,
366 nullptr);
367 }
368
IsDequantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const369 bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
370 const TensorInfo& output,
371 Optional<std::string&> reasonIfUnsupported) const
372 {
373 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
374 reasonIfUnsupported,
375 input,
376 output);
377 }
378
IsDepthToSpaceSupported(const TensorInfo & input,const TensorInfo & output,const DepthToSpaceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const379 bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
380 const TensorInfo& output,
381 const DepthToSpaceDescriptor& descriptor,
382 Optional<std::string&> reasonIfUnsupported) const
383 {
384 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
385 reasonIfUnsupported,
386 input,
387 output,
388 descriptor);
389 }
390
IsDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const391 bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
392 const TensorInfo& output,
393 const DepthwiseConvolution2dDescriptor& descriptor,
394 const TensorInfo& weights,
395 const Optional<TensorInfo>& biases,
396 Optional<std::string&> reasonIfUnsupported) const
397 {
398 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
399 reasonIfUnsupported,
400 input,
401 output,
402 descriptor,
403 weights,
404 biases,
405 nullptr);
406 }
407
IsDilatedDepthwiseConvolutionSupported(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const408 bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
409 const TensorInfo& output,
410 const DepthwiseConvolution2dDescriptor& descriptor,
411 const TensorInfo& weights,
412 const Optional<TensorInfo>& biases,
413 Optional<std::string&> reasonIfUnsupported) const
414 {
415 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
416 reasonIfUnsupported,
417 input,
418 output,
419 descriptor,
420 weights,
421 biases,
422 nullptr);
423 }
424
425
IsDivisionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const426 bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
427 const TensorInfo& input1,
428 const TensorInfo& output,
429 Optional<std::string&> reasonIfUnsupported) const
430 {
431 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
432 reasonIfUnsupported,
433 input0,
434 input1,
435 output,
436 nullptr);
437 }
438
IsElementwiseUnarySupported(const TensorInfo & input,const TensorInfo & output,const ElementwiseUnaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const439 bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
440 const TensorInfo& output,
441 const ElementwiseUnaryDescriptor& descriptor,
442 Optional<std::string&> reasonIfUnsupported) const
443 {
444 switch(descriptor.m_Operation)
445 {
446 case UnaryOperation::Abs:
447 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
448 reasonIfUnsupported,
449 input,
450 output);
451 case UnaryOperation::Exp:
452 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
453 reasonIfUnsupported,
454 input,
455 output);
456 case UnaryOperation::Neg:
457 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
458 reasonIfUnsupported,
459 input,
460 output);
461 case UnaryOperation::Rsqrt:
462 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
463 reasonIfUnsupported,
464 input,
465 output);
466 case UnaryOperation::LogicalNot:
467 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
468 reasonIfUnsupported,
469 input,
470 output);
471 default:
472 return false;
473 }
474 }
475
IsFillSupported(const TensorInfo & input,const TensorInfo & output,const FillDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const476 bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
477 const TensorInfo& output,
478 const FillDescriptor& descriptor,
479 Optional<std::string&> reasonIfUnsupported) const
480 {
481 armnn::IgnoreUnused(input);
482 armnn::IgnoreUnused(output);
483 armnn::IgnoreUnused(descriptor);
484
485 return IsClBackendSupported(reasonIfUnsupported);
486 }
487
IsFloorSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const488 bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
489 const TensorInfo& output,
490 Optional<std::string&> reasonIfUnsupported) const
491 {
492 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
493 reasonIfUnsupported,
494 input,
495 output);
496 }
497
IsFullyConnectedSupported(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const498 bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
499 const TensorInfo& output,
500 const TensorInfo& weights,
501 const TensorInfo& biases,
502 const FullyConnectedDescriptor& descriptor,
503 Optional<std::string&> reasonIfUnsupported) const
504 {
505 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
506 reasonIfUnsupported,
507 input,
508 output,
509 weights,
510 biases,
511 descriptor,
512 nullptr);
513 }
514
IsGatherSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const GatherDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const515 bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
516 const TensorInfo& input1,
517 const TensorInfo& output,
518 const GatherDescriptor& descriptor,
519 Optional<std::string&> reasonIfUnsupported) const
520 {
521 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
522 reasonIfUnsupported,
523 input0,
524 input1,
525 output,
526 descriptor);
527 }
528
IsGreaterSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const529 bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
530 const TensorInfo& input1,
531 const TensorInfo& output,
532 Optional<std::string&> reasonIfUnsupported) const
533 {
534 ComparisonDescriptor descriptor(ComparisonOperation::Greater);
535 return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
536 }
537
IsInputSupported(const TensorInfo & input,Optional<std::string &> reasonIfUnsupported) const538 bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
539 Optional<std::string&> reasonIfUnsupported) const
540 {
541 return IsClBackendSupported(reasonIfUnsupported, input);
542 }
543
IsInstanceNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const InstanceNormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const544 bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
545 const TensorInfo& output,
546 const InstanceNormalizationDescriptor& descriptor,
547 Optional<std::string&> reasonIfUnsupported) const
548 {
549 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
550 reasonIfUnsupported,
551 input,
552 output,
553 descriptor);
554 }
555
IsL2NormalizationSupported(const TensorInfo & input,const TensorInfo & output,const L2NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const556 bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
557 const TensorInfo& output,
558 const L2NormalizationDescriptor& descriptor,
559 Optional<std::string&> reasonIfUnsupported) const
560 {
561 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
562 reasonIfUnsupported,
563 input,
564 output,
565 descriptor);
566 }
567
IsLogicalBinarySupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const LogicalBinaryDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const568 bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
569 const TensorInfo& input1,
570 const TensorInfo& output,
571 const LogicalBinaryDescriptor& descriptor,
572 Optional<std::string&> reasonIfUnsupported) const
573 {
574 IgnoreUnused(output);
575
576 switch(descriptor.m_Operation)
577 {
578 case LogicalBinaryOperation::LogicalAnd:
579 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
580 reasonIfUnsupported,
581 input0,
582 input1,
583 output);
584 case LogicalBinaryOperation::LogicalOr:
585 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
586 reasonIfUnsupported,
587 input0,
588 input1,
589 output);
590 default:
591 return false;
592 }
593 }
594
595
IsLogSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const LogSoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const596 bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
597 const TensorInfo& output,
598 const LogSoftmaxDescriptor& descriptor,
599 Optional<std::string&> reasonIfUnsupported) const
600 {
601 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
602 reasonIfUnsupported,
603 input,
604 output,
605 descriptor);
606 }
607
IsLstmSupported(const TensorInfo & input,const TensorInfo & outputStateIn,const TensorInfo & cellStateIn,const TensorInfo & scratchBuffer,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const LstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const608 bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
609 const TensorInfo& outputStateIn,
610 const TensorInfo& cellStateIn,
611 const TensorInfo& scratchBuffer,
612 const TensorInfo& outputStateOut,
613 const TensorInfo& cellStateOut,
614 const TensorInfo& output,
615 const LstmDescriptor& descriptor,
616 const LstmInputParamsInfo& paramsInfo,
617 Optional<std::string&> reasonIfUnsupported) const
618 {
619 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
620 reasonIfUnsupported,
621 input,
622 outputStateIn,
623 cellStateIn,
624 scratchBuffer,
625 outputStateOut,
626 cellStateOut,
627 output,
628 descriptor,
629 paramsInfo);
630 }
631
IsMaximumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const632 bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
633 const TensorInfo& input1,
634 const TensorInfo& output,
635 Optional<std::string&> reasonIfUnsupported) const
636 {
637 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
638 reasonIfUnsupported,
639 input0,
640 input1,
641 output);
642 }
643
IsMeanSupported(const TensorInfo & input,const TensorInfo & output,const MeanDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const644 bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
645 const TensorInfo& output,
646 const MeanDescriptor& descriptor,
647 Optional<std::string&> reasonIfUnsupported) const
648 {
649 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
650 reasonIfUnsupported,
651 input,
652 output,
653 descriptor);
654 }
655
IsMergerSupported(const std::vector<const TensorInfo * > inputs,const TensorInfo & output,const MergerDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const656 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
657 const TensorInfo& output,
658 const MergerDescriptor& descriptor,
659 Optional<std::string&> reasonIfUnsupported) const
660 {
661 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
662 }
663
IsMinimumSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const664 bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
665 const TensorInfo& input1,
666 const TensorInfo& output,
667 Optional<std::string&> reasonIfUnsupported) const
668 {
669 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
670 reasonIfUnsupported,
671 input0,
672 input1,
673 output);
674 }
675
IsMultiplicationSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const676 bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
677 const TensorInfo& input1,
678 const TensorInfo& output,
679 Optional<std::string&> reasonIfUnsupported) const
680 {
681 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
682 reasonIfUnsupported,
683 input0,
684 input1,
685 output,
686 nullptr);
687 }
688
IsNormalizationSupported(const TensorInfo & input,const TensorInfo & output,const NormalizationDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const689 bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
690 const TensorInfo& output,
691 const NormalizationDescriptor& descriptor,
692 Optional<std::string&> reasonIfUnsupported) const
693 {
694 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
695 }
696
IsOutputSupported(const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const697 bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
698 Optional<std::string&> reasonIfUnsupported) const
699 {
700 return IsClBackendSupported(reasonIfUnsupported, output);
701 }
702
IsPadSupported(const TensorInfo & input,const TensorInfo & output,const PadDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const703 bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
704 const TensorInfo& output,
705 const PadDescriptor& descriptor,
706 Optional<std::string&> reasonIfUnsupported) const
707 {
708 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
709 reasonIfUnsupported,
710 input,
711 output,
712 descriptor);
713 }
714
IsPermuteSupported(const TensorInfo & input,const TensorInfo & output,const PermuteDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const715 bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
716 const TensorInfo& output,
717 const PermuteDescriptor& descriptor,
718 Optional<std::string&> reasonIfUnsupported) const
719 {
720 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
721 }
722
IsPooling2dSupported(const TensorInfo & input,const TensorInfo & output,const Pooling2dDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const723 bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
724 const TensorInfo& output,
725 const Pooling2dDescriptor& descriptor,
726 Optional<std::string&> reasonIfUnsupported) const
727 {
728 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
729 }
730
IsPreluSupported(const armnn::TensorInfo & input,const armnn::TensorInfo & alpha,const armnn::TensorInfo & output,armnn::Optional<std::string &> reasonIfUnsupported) const731 bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
732 const armnn::TensorInfo &alpha,
733 const armnn::TensorInfo &output,
734 armnn::Optional<std::string &> reasonIfUnsupported) const
735 {
736 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
737 }
738
IsQLstmSupported(const TensorInfo & input,const TensorInfo & previousOutputIn,const TensorInfo & previousCellStateIn,const TensorInfo & outputStateOut,const TensorInfo & cellStateOut,const TensorInfo & output,const QLstmDescriptor & descriptor,const LstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const739 bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
740 const TensorInfo& previousOutputIn,
741 const TensorInfo& previousCellStateIn,
742 const TensorInfo& outputStateOut,
743 const TensorInfo& cellStateOut,
744 const TensorInfo& output,
745 const QLstmDescriptor& descriptor,
746 const LstmInputParamsInfo& paramsInfo,
747 Optional<std::string&> reasonIfUnsupported) const
748 {
749 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
750 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
751 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
752 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
753 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
754 output.GetDataType() == armnn::DataType::QAsymmS8)
755 {
756 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
757 reasonIfUnsupported,
758 input,
759 previousCellStateIn,
760 previousOutputIn,
761 cellStateOut,
762 outputStateOut,
763 output,
764 descriptor,
765 paramsInfo);
766 }
767 else
768 {
769 return false;
770 }
771 }
772
IsQuantizedLstmSupported(const TensorInfo & input,const TensorInfo & previousCellStateIn,const TensorInfo & previousOutputIn,const TensorInfo & cellStateOut,const TensorInfo & output,const QuantizedLstmInputParamsInfo & paramsInfo,Optional<std::string &> reasonIfUnsupported) const773 bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
774 const TensorInfo& previousCellStateIn,
775 const TensorInfo& previousOutputIn,
776 const TensorInfo& cellStateOut,
777 const TensorInfo& output,
778 const QuantizedLstmInputParamsInfo& paramsInfo,
779 Optional<std::string&> reasonIfUnsupported) const
780 {
781 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
782 reasonIfUnsupported,
783 input,
784 previousCellStateIn,
785 previousOutputIn,
786 cellStateOut,
787 output,
788 paramsInfo);
789 }
790
IsQuantizeSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const791 bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
792 const TensorInfo& output,
793 Optional<std::string&> reasonIfUnsupported) const
794 {
795 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
796 reasonIfUnsupported,
797 input,
798 output);
799 }
800
IsReshapeSupported(const TensorInfo & input,const TensorInfo & output,const ReshapeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const801 bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
802 const TensorInfo& output,
803 const ReshapeDescriptor& descriptor,
804 Optional<std::string&> reasonIfUnsupported) const
805 {
806 IgnoreUnused(descriptor);
807 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
808 }
809
IsResizeSupported(const TensorInfo & input,const TensorInfo & output,const ResizeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const810 bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
811 const TensorInfo& output,
812 const ResizeDescriptor& descriptor,
813 Optional<std::string&> reasonIfUnsupported) const
814 {
815 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
816 }
817
IsResizeBilinearSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const818 bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
819 const TensorInfo& output,
820 Optional<std::string&> reasonIfUnsupported) const
821 {
822 ResizeDescriptor descriptor;
823 descriptor.m_Method = ResizeMethod::Bilinear;
824 descriptor.m_DataLayout = DataLayout::NCHW;
825
826 const TensorShape& outputShape = output.GetShape();
827 descriptor.m_TargetHeight = outputShape[2];
828 descriptor.m_TargetWidth = outputShape[3];
829
830 return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
831 }
832
IsRsqrtSupported(const TensorInfo & input,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const833 bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input,
834 const TensorInfo& output,
835 Optional<std::string&> reasonIfUnsupported) const
836 {
837 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
838 return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
839 }
840
IsSliceSupported(const TensorInfo & input,const TensorInfo & output,const SliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const841 bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
842 const TensorInfo& output,
843 const SliceDescriptor& descriptor,
844 Optional<std::string&> reasonIfUnsupported) const
845 {
846 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
847 }
848
IsSoftmaxSupported(const TensorInfo & input,const TensorInfo & output,const SoftmaxDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const849 bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
850 const TensorInfo& output,
851 const SoftmaxDescriptor& descriptor,
852 Optional<std::string&> reasonIfUnsupported) const
853 {
854 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
855 }
856
IsSpaceToBatchNdSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToBatchNdDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const857 bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
858 const TensorInfo& output,
859 const SpaceToBatchNdDescriptor& descriptor,
860 Optional<std::string&> reasonIfUnsupported) const
861 {
862 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
863 reasonIfUnsupported,
864 input,
865 output,
866 descriptor);
867 }
868
IsSpaceToDepthSupported(const TensorInfo & input,const TensorInfo & output,const SpaceToDepthDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const869 bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
870 const TensorInfo& output,
871 const SpaceToDepthDescriptor& descriptor,
872 Optional<std::string&> reasonIfUnsupported) const
873 {
874 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
875 reasonIfUnsupported,
876 input,
877 output,
878 descriptor);
879 }
880
IsSplitterSupported(const TensorInfo & input,const ViewsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const881 bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
882 const ViewsDescriptor& descriptor,
883 Optional<std::string&> reasonIfUnsupported) const
884 {
885 IgnoreUnused(descriptor);
886 return IsSupportedForDataTypeCl(reasonIfUnsupported,
887 input.GetDataType(),
888 &TrueFunc<>,
889 &TrueFunc<>);
890 }
891
IsSplitterSupported(const TensorInfo & input,const std::vector<std::reference_wrapper<TensorInfo>> & outputs,const ViewsDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const892 bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
893 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
894 const ViewsDescriptor& descriptor,
895 Optional<std::string&> reasonIfUnsupported) const
896 {
897 #if defined(ARMCOMPUTECL_ENABLED)
898 // Split along the last dimension, cannot use sub-tensors
899 // as width and height of the sub-tensors do not match
900 // the width and height of the parent tensor
901 // in case of input with more than 2D.
902 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
903 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
904 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
905 {
906 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
907 reasonIfUnsupported,
908 input,
909 outputs,
910 *splitAxis.begin());
911 }
912 #endif
913 IgnoreUnused(descriptor);
914 for (auto output : outputs)
915 {
916 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
917 {
918 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
919 return false;
920 }
921 }
922 return true;
923 }
924
IsStackSupported(const std::vector<const TensorInfo * > & inputs,const TensorInfo & output,const StackDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const925 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
926 const TensorInfo& output,
927 const StackDescriptor& descriptor,
928 Optional<std::string&> reasonIfUnsupported) const
929 {
930 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
931 reasonIfUnsupported,
932 inputs,
933 output,
934 descriptor);
935 }
936
IsStridedSliceSupported(const TensorInfo & input,const TensorInfo & output,const StridedSliceDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const937 bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
938 const TensorInfo& output,
939 const StridedSliceDescriptor& descriptor,
940 Optional<std::string&> reasonIfUnsupported) const
941 {
942 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
943 reasonIfUnsupported,
944 input,
945 output,
946 descriptor);
947 }
948
IsSubtractionSupported(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,Optional<std::string &> reasonIfUnsupported) const949 bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
950 const TensorInfo& input1,
951 const TensorInfo& output,
952 Optional<std::string&> reasonIfUnsupported) const
953 {
954 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
955 reasonIfUnsupported,
956 input0,
957 input1,
958 output,
959 nullptr);
960 }
961
IsTransposeConvolution2dSupported(const TensorInfo & input,const TensorInfo & output,const TransposeConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,Optional<std::string &> reasonIfUnsupported) const962 bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
963 const TensorInfo& output,
964 const TransposeConvolution2dDescriptor& descriptor,
965 const TensorInfo& weights,
966 const Optional<TensorInfo>& biases,
967 Optional<std::string&> reasonIfUnsupported) const
968 {
969 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
970 reasonIfUnsupported,
971 input,
972 output,
973 descriptor,
974 weights,
975 biases);
976 }
977
IsTransposeSupported(const TensorInfo & input,const TensorInfo & output,const TransposeDescriptor & descriptor,Optional<std::string &> reasonIfUnsupported) const978 bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
979 const TensorInfo& output,
980 const TransposeDescriptor& descriptor,
981 Optional<std::string&> reasonIfUnsupported) const
982 {
983 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
984 }
985
986 } // namespace armnn
987