• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <ClassicDelegateUtils.hpp>
9 #include <SharedFunctions.hpp>
10 
11 #include <tensorflow/lite/builtin_ops.h>
12 #include <tensorflow/lite/c/builtin_op_data.h>
13 #include <tensorflow/lite/c/common.h>
14 #include <tensorflow/lite/minimal_logging.h>
15 #include <tensorflow/lite/kernels/internal/tensor.h>
16 
17 namespace armnnDelegate
18 {
19 
VisitConv2dOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)20 TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21                                  TfLiteContext* tfLiteContext,
22                                  TfLiteNode* tfLiteNode,
23                                  int nodeIndex,
24                                  int32_t operatorCode)
25 {
26     auto numInputs = tfLiteNode->inputs->size;
27     if (numInputs < 2)
28     {
29         TF_LITE_MAYBE_KERNEL_LOG(
30             tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31             2, numInputs, nodeIndex);
32         return kTfLiteError;
33     }
34     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35 
36     armnn::Convolution2dDescriptor descriptor;
37     const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38 
39     bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
40     descriptor.m_BiasEnabled = biasEnabled;
41     descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42     descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44     descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45     descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46 
47     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48     const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
49     if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
50     {
51         TF_LITE_MAYBE_KERNEL_LOG(
52             tfLiteContext,
53             "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
54             operatorCode, nodeIndex);
55         return kTfLiteError;
56     }
57     if (IsDynamicTensor(tfLiteInputTensor))
58     {
59         TF_LITE_MAYBE_KERNEL_LOG(
60             tfLiteContext,
61             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
62             operatorCode, nodeIndex);
63         return kTfLiteError;
64     }
65     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
66     if(!IsValid(&tfLiteOutputTensor))
67     {
68         TF_LITE_MAYBE_KERNEL_LOG(
69             tfLiteContext,
70             "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
71             operatorCode, nodeIndex);
72         return kTfLiteError;
73     }
74     if (IsDynamicTensor(tfLiteOutputTensor))
75     {
76         TF_LITE_MAYBE_KERNEL_LOG(
77             tfLiteContext,
78             "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
79             operatorCode, nodeIndex);
80         return kTfLiteError;
81     }
82 
83     const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
84     if(!IsValid(&tfLiteFilterTensor))
85     {
86         TF_LITE_MAYBE_KERNEL_LOG(
87             tfLiteContext,
88             "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
89             operatorCode, nodeIndex);
90         return kTfLiteError;
91     }
92     if (IsDynamicTensor(tfLiteFilterTensor))
93     {
94         TF_LITE_MAYBE_KERNEL_LOG(
95             tfLiteContext,
96             "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
97             nodeIndex);
98         return kTfLiteError;
99     }
100 
101     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
102     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
103 
104     auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
105     TfLiteFusedActivation activationType=kTfLiteActNone;
106     if (tfLiteNodeParameters)
107     {
108         activationType = tfLiteNodeParameters->activation;
109         TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
110                                                                         outputTensorInfo, activationType);
111         if(activationStatus != kTfLiteOk)
112         {
113             return kTfLiteError;
114         }
115 
116     }
117 
118     const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
119 
120     armnn::TensorInfo biasTensorInfo;
121     if(biasEnabled)
122     {
123         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
124         if(!IsValid(&tfLiteBiasTensor))
125         {
126             TF_LITE_MAYBE_KERNEL_LOG(
127                 tfLiteContext,
128                 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
129                 operatorCode, nodeIndex);
130             return kTfLiteError;
131         }
132         if (IsDynamicTensor(tfLiteBiasTensor))
133         {
134             TF_LITE_MAYBE_KERNEL_LOG(
135                 tfLiteContext,
136                 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
137                 nodeIndex);
138             return kTfLiteError;
139         }
140         biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
141     }
142     else
143     {
144         biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
145     }
146 
147     armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
148 
149     // TfLite uses NHWC tensors
150     const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
151     const unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
152 
153     const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
154     const unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
155 
156     // Calculate padding
157     CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
158                 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
159     CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
160                 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
161 
162     armnn::BackendId setBackend;
163     if (!delegateData.m_Network)
164     {
165         bool isSupported = false;
166         FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
167                                    tfLiteContext,
168                                    IsConvolution2dSupported,
169                                    delegateData.m_Backends,
170                                    isSupported,
171                                    setBackend,
172                                    inputTensorInfo,
173                                    outputTensorInfo,
174                                    descriptor,
175                                    filterTensorInfo,
176                                    optionalBiasInfo);
177         return isSupported ? kTfLiteOk : kTfLiteError;
178     }
179 
180     // Set up filter and biases
181     armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
182     layer->SetBackendId(setBackend);
183 
184     if(filterTensorInfo.IsConstant())
185     {
186         auto filter =
187                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
188                                   filterTensorInfo);
189 
190         armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
191         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
192         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
193     }
194 
195     if (biasEnabled)
196     {
197         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
198         if(biasTensorInfo.IsConstant())
199         {
200             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
201             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
202             ARMNN_ASSERT(biasLayer != nullptr);
203             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
204             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
205         }
206     }
207 
208     // The data input can also be constant, so we must check that this is also allocated to an input slot
209     if(inputTensorInfo.IsConstant())
210     {
211         auto input =
212                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
213                                   inputTensorInfo);
214 
215         armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
216         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
217         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
218     }
219 
220     ARMNN_ASSERT(layer != nullptr);
221 
222     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
223     outputSlot.SetTensorInfo(outputTensorInfo);
224 
225     if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
226     {
227         return kTfLiteError;
228     }
229 
230     if (!tfLiteNodeParameters)
231     {
232         // No Activation
233         return kTfLiteOk;
234     }
235     // Check and Create activation
236     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
237 
238 }
239 
240 // Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
241 #if defined(ARMNN_POST_TFLITE_2_5)
VisitConv3dOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)242 TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
243                                  TfLiteContext* tfLiteContext,
244                                  TfLiteNode* tfLiteNode,
245                                  int nodeIndex,
246                                  int32_t operatorCode)
247 {
248     auto numInputs = tfLiteNode->inputs->size;
249     if (numInputs < 2)
250     {
251         TF_LITE_MAYBE_KERNEL_LOG(
252                 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
253                 2, numInputs, nodeIndex);
254         return kTfLiteError;
255     }
256     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
257 
258     armnn::Convolution3dDescriptor descriptor;
259     const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
260 
261     bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
262     descriptor.m_BiasEnabled = biasEnabled;
263     descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
264     descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
265     descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
266     descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
267     descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
268     descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
269     descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
270 
271     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
272     const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
273     if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
274     {
275         return kTfLiteError;
276     }
277 
278     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
279     if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
280     {
281         return kTfLiteError;
282     }
283 
284     const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
285     if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
286     {
287         return kTfLiteError;
288     }
289 
290     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
291     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
292 
293     auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
294     TfLiteFusedActivation activationType=kTfLiteActNone;
295     if (tfLiteNodeParameters)
296     {
297         activationType = tfLiteNodeParameters->activation;
298         TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
299                                                                         outputTensorInfo, activationType);
300         if(activationStatus != kTfLiteOk)
301         {
302             return kTfLiteError;
303         }
304 
305     }
306 
307     const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
308 
309     armnn::TensorInfo biasTensorInfo;
310     if(biasEnabled)
311     {
312         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
313         if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
314         {
315             return kTfLiteError;
316         }
317         biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
318     }
319     else
320     {
321         biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
322     }
323 
324     armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
325 
326     // TfLite uses NDHWC tensors
327     const unsigned int inputDepth  = inputTensorInfo.GetShape()[1];
328     const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
329     const unsigned int inputWidth  = inputTensorInfo.GetShape()[3];
330 
331     // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
332     const unsigned int filterDepth  = filterTensorInfo.GetShape()[0];
333     const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
334     const unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
335 
336     // Calculate padding
337     CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
338                 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
339     CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
340                 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
341     CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
342                 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
343 
344     // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
345     // support for the operator
346     // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
347     armnn::BackendId setBackend;
348     if (!delegateData.m_Network)
349     {
350         bool isSupported = false;
351         FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
352                                    tfLiteContext,
353                                    IsConvolution3dSupported,
354                                    delegateData.m_Backends,
355                                    isSupported,
356                                    setBackend,
357                                    inputTensorInfo,
358                                    outputTensorInfo,
359                                    descriptor,
360                                    filterTensorInfo,
361                                    optionalBiasInfo);
362         return isSupported ? kTfLiteOk : kTfLiteError;
363     }
364 
365     armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor);
366     layer->SetBackendId(setBackend);
367     ARMNN_ASSERT(layer != nullptr);
368 
369     // Add a constant layer for weights and biases if inputs are constant,
370     // which are connected to the Convolution3d layer as inputs.
371     if (filterTensorInfo.IsConstant())
372     {
373         auto filter = CreateConstTensor(&tfLiteFilterTensor,
374                                         filterTensorInfo);
375 
376         armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
377         ARMNN_ASSERT(weightsLayer != nullptr);
378 
379         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
380         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
381     }
382 
383     if(biasEnabled)
384     {
385         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
386         if(biasTensorInfo.IsConstant())
387         {
388             auto biases = CreateConstTensor(&tfLiteBiasTensor,
389                                             biasTensorInfo);
390 
391             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
392             ARMNN_ASSERT(biasLayer != nullptr);
393 
394             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
395             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
396         }
397     }
398 
399     // The data input can also be constant, so we must check that this is also allocated to an input slot
400     if(inputTensorInfo.IsConstant())
401     {
402         auto input =
403                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
404                                   inputTensorInfo);
405 
406         armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
407         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
408         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
409     }
410 
411     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
412     outputSlot.SetTensorInfo(outputTensorInfo);
413 
414     if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
415     {
416         return kTfLiteError;
417     }
418 
419     if (!tfLiteNodeParameters)
420     {
421         // No Activation
422         return kTfLiteOk;
423     }
424 
425     // Check and create activation
426     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
427 }
428 #endif
429 
VisitDepthwiseConv2dOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)430 TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
431                                           TfLiteContext* tfLiteContext,
432                                           TfLiteNode* tfLiteNode,
433                                           int nodeIndex,
434                                           int32_t operatorCode)
435 {
436     auto numInputs = tfLiteNode->inputs->size;
437     if (numInputs < 2)
438     {
439         TF_LITE_MAYBE_KERNEL_LOG(
440             tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
441             2, numInputs, nodeIndex);
442         return kTfLiteError;
443     }
444     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
445 
446     bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
447 
448     armnn::DepthwiseConvolution2dDescriptor descriptor;
449     const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
450 
451     descriptor.m_BiasEnabled = biasEnabled;
452     descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
453     descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
454     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
455     descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
456     descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
457 
458     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
459     const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
460     if(!IsValid(&tfLiteInputTensor))
461     {
462         TF_LITE_MAYBE_KERNEL_LOG(
463             tfLiteContext,
464             "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
465             operatorCode, nodeIndex);
466         return kTfLiteError;
467     }
468     if (IsDynamicTensor(tfLiteInputTensor))
469     {
470         TF_LITE_MAYBE_KERNEL_LOG(
471             tfLiteContext,
472             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
473             operatorCode, nodeIndex);
474         return kTfLiteError;
475     }
476     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
477     if(!IsValid(&tfLiteOutputTensor))
478     {
479         TF_LITE_MAYBE_KERNEL_LOG(
480             tfLiteContext,
481             "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
482             operatorCode, nodeIndex);
483         return kTfLiteError;
484     }
485     if (IsDynamicTensor(tfLiteOutputTensor))
486     {
487         TF_LITE_MAYBE_KERNEL_LOG(
488             tfLiteContext,
489             "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
490             operatorCode, nodeIndex);
491         return kTfLiteError;
492     }
493 
494     const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
495     if(!IsValid(&tfLiteFilterTensor))
496     {
497         TF_LITE_MAYBE_KERNEL_LOG(
498             tfLiteContext,
499             "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
500             operatorCode, nodeIndex);
501         return kTfLiteError;
502     }
503     if (IsDynamicTensor(tfLiteFilterTensor))
504     {
505         TF_LITE_MAYBE_KERNEL_LOG(
506             tfLiteContext,
507             "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
508             nodeIndex);
509         return kTfLiteError;
510     }
511 
512     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
513     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
514 
515     auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
516     TfLiteFusedActivation activationType = kTfLiteActNone;
517     if (tfLiteNodeParameters)
518     {
519         activationType = tfLiteNodeParameters->activation;
520         TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
521                                                                         outputTensorInfo, activationType);
522         if(activationStatus != kTfLiteOk)
523         {
524             return kTfLiteError;
525         }
526 
527     }
528 
529     const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
530 
531     // Assuming input is NHWC
532     unsigned int inputHeight = inputTensorInfo.GetShape()[1];
533     unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
534 
535     // TensorflowLite weights come in the format [1, H, W, I * M]
536     unsigned int filterHeight = filterTensorInfo.GetShape()[1];
537     unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
538 
539     // Calculate padding
540     CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
541                 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
542     CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
543                 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
544 
545     armnn::TensorInfo biasTensorInfo;
546     if(biasEnabled)
547     {
548         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
549         if(!IsValid(&tfLiteBiasTensor))
550         {
551             TF_LITE_MAYBE_KERNEL_LOG(
552                 tfLiteContext,
553                 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
554                 operatorCode, nodeIndex);
555             return kTfLiteError;
556         }
557         if (IsDynamicTensor(tfLiteBiasTensor))
558         {
559             TF_LITE_MAYBE_KERNEL_LOG(
560                 tfLiteContext,
561                 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
562                 nodeIndex);
563             return kTfLiteError;
564         }
565         biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
566     }
567     else
568     {
569         biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
570     }
571 
572     armnn::BackendId setBackend;
573     if (!delegateData.m_Network)
574     {
575         bool isSupported = false;
576         FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
577                                    tfLiteContext,
578                                    IsDepthwiseConvolutionSupported,
579                                    delegateData.m_Backends,
580                                    isSupported,
581                                    setBackend,
582                                    inputTensorInfo,
583                                    outputTensorInfo,
584                                    descriptor,
585                                    filterTensorInfo,
586                                    armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
587         return isSupported ? kTfLiteOk : kTfLiteError;
588     }
589 
590     armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
591     layer->SetBackendId(setBackend);
592 
593     if(filterTensorInfo.IsConstant())
594     {
595         // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
596         auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
597 
598         armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
599         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
600         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
601     }
602 
603     if (biasEnabled)
604     {
605         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
606         if(biasTensorInfo.IsConstant())
607         {
608             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
609             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
610             ARMNN_ASSERT(biasLayer != nullptr);
611             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
612             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
613         }
614     }
615 
616     // The data input can also be constant, so we must check that this is also allocated to an input slot
617     if(inputTensorInfo.IsConstant())
618     {
619         auto input =
620                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
621                                   inputTensorInfo);
622 
623         armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
624         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
625         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
626     }
627 
628     ARMNN_ASSERT(layer != nullptr);
629 
630     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
631     outputSlot.SetTensorInfo(outputTensorInfo);
632 
633     if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
634     {
635         return kTfLiteError;
636     }
637 
638     if (!tfLiteNodeParameters)
639     {
640         // No Activation
641         return kTfLiteOk;
642     }
643     // Check and create activation
644     return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
645 }
646 
VisitTransposeConv2dOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)647 TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
648                                           TfLiteContext* tfLiteContext,
649                                           TfLiteNode* tfLiteNode,
650                                           int nodeIndex,
651                                           int32_t operatorCode)
652 {
653     TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
654     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
655 
656     armnn::TransposeConvolution2dDescriptor descriptor;
657     auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
658     descriptor.m_BiasEnabled = false;
659     descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
660     descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
661     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
662 
663     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
664     const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
665     if(!IsValid(&tfLiteOutputShapeTensor))
666     {
667         TF_LITE_MAYBE_KERNEL_LOG(
668             tfLiteContext,
669             "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
670             operatorCode, nodeIndex);
671         return kTfLiteError;
672     }
673     if (IsDynamicTensor(tfLiteOutputShapeTensor))
674     {
675         TF_LITE_MAYBE_KERNEL_LOG(
676             tfLiteContext,
677             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
678             operatorCode, nodeIndex);
679         return kTfLiteError;
680     }
681 
682     const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
683     std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
684     if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
685     {
686         for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
687         {
688             outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
689         }
690     }
691 
692     if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
693     {
694         for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
695         {
696             outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
697         }
698     }
699     // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
700     for (int dimension : outputShape)
701     {
702         descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
703     }
704     descriptor.m_OutputShapeEnabled = true;
705 
706     const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
707     if(!IsValid(&tfLiteInputTensor))
708     {
709         TF_LITE_MAYBE_KERNEL_LOG(
710             tfLiteContext,
711             "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
712             operatorCode, nodeIndex);
713         return kTfLiteError;
714     }
715     if (IsDynamicTensor(tfLiteInputTensor))
716     {
717         TF_LITE_MAYBE_KERNEL_LOG(
718             tfLiteContext,
719             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
720             operatorCode, nodeIndex);
721         return kTfLiteError;
722     }
723 
724     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
725     if(!IsValid(&tfLiteOutputTensor))
726     {
727         TF_LITE_MAYBE_KERNEL_LOG(
728             tfLiteContext,
729             "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
730             operatorCode, nodeIndex);
731         return kTfLiteError;
732     }
733     if (IsDynamicTensor(tfLiteOutputTensor))
734     {
735         TF_LITE_MAYBE_KERNEL_LOG(
736             tfLiteContext,
737             "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
738             operatorCode, nodeIndex);
739         return kTfLiteError;
740     }
741 
742     const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
743     if(!IsValid(&tfLiteFilterTensor))
744     {
745         TF_LITE_MAYBE_KERNEL_LOG(
746             tfLiteContext,
747             "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
748             operatorCode, nodeIndex);
749         return kTfLiteError;
750     }
751     if (IsDynamicTensor(tfLiteFilterTensor))
752     {
753         TF_LITE_MAYBE_KERNEL_LOG(
754             tfLiteContext,
755             "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
756             operatorCode, nodeIndex);
757         return kTfLiteError;
758     }
759 
760     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
761     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
762     const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
763 
764     // TfLite uses NHWC tensors
765     const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
766     const unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
767 
768     const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
769     const unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
770 
771     // Calculate padding
772     CalcPadding(inputHeight,
773                 filterHeight,
774                 descriptor.m_StrideY,
775                 1, // dilation y
776                 descriptor.m_PadTop,
777                 descriptor.m_PadBottom,
778                 parameters->padding);
779     CalcPadding(inputWidth,
780                 filterWidth,
781                 descriptor.m_StrideX,
782                 1, // dilation x
783                 descriptor.m_PadLeft,
784                 descriptor.m_PadRight,
785                 parameters->padding);
786 
787     // Set up filter
788     auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
789                                           filterTensorInfo);
790     armnn::BackendId setBackend;
791     if (!delegateData.m_Network)
792     {
793         bool isSupported = false;
794         FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
795                                    tfLiteContext,
796                                    IsTransposeConvolution2dSupported,
797                                    delegateData.m_Backends,
798                                    isSupported,
799                                    setBackend,
800                                    inputTensorInfo,
801                                    outputTensorInfo,
802                                    descriptor,
803                                    filterTensorInfo,
804                                    armnn::EmptyOptional());
805         return isSupported ? kTfLiteOk : kTfLiteError;
806     }
807 
808     armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
809                                                                                              filterTensor,
810                                                                                              armnn::EmptyOptional());
811     layer->SetBackendId(setBackend);
812     ARMNN_ASSERT(layer != nullptr);
813 
814     // The data input can be constant, so we must check that this is allocated to an input slot
815     if(inputTensorInfo.IsConstant())
816     {
817         auto input =
818                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
819                                   inputTensorInfo);
820 
821         armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
822         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
823         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
824     }
825 
826     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
827     outputSlot.SetTensorInfo(outputTensorInfo);
828 
829     // Connect
830     if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
831     {
832         delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
833                                                                    Connect(layer->GetInputSlot(0));
834     }
835 
836     // Prepare output slots
837     for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
838     {
839         armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
840         delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
841                                                                    &outputSlot;
842     }
843     return kTfLiteOk;
844 }
845 
VisitConvolutionOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t operatorCode)846 TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
847                                       TfLiteContext* tfLiteContext,
848                                       TfLiteNode* tfLiteNode,
849                                       int nodeIndex,
850                                       int32_t operatorCode)
851 {
852     switch(operatorCode)
853     {
854         case kTfLiteBuiltinConv2d:
855             return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
856 // Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
857 #if defined(ARMNN_POST_TFLITE_2_5)
858         case kTfLiteBuiltinConv3d:
859             return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
860 #endif
861         case kTfLiteBuiltinDepthwiseConv2d:
862             return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
863         case kTfLiteBuiltinTransposeConv:
864             return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
865         default:
866             return kTfLiteError;
867     }
868 }
869 
870 } // namespace armnnDelegate
871