• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <DelegateUtils.hpp>
9 
10 #include <armnn/utility/Assert.hpp>
11 
12 #include <tensorflow/lite/builtin_ops.h>
13 #include <tensorflow/lite/c/builtin_op_data.h>
14 #include <tensorflow/lite/c/common.h>
15 #include <tensorflow/lite/minimal_logging.h>
16 
17 namespace armnnDelegate
18 {
19 
VisitElementwiseUnaryOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,armnn::UnaryOperation unaryOperation)20 TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
21                                            TfLiteContext* tfLiteContext,
22                                            TfLiteNode* tfLiteNode,
23                                            int nodeIndex,
24                                            armnn::UnaryOperation unaryOperation)
25 {
26     TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
27     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
28 
29     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
30     const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
31     if (IsDynamicTensor(tfLiteInputTensor))
32     {
33         TF_LITE_MAYBE_KERNEL_LOG(
34             tfLiteContext,
35             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
36             nodeIndex);
37         return kTfLiteError;
38     }
39     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
40     if (IsDynamicTensor(tfLiteOutputTensor))
41     {
42         TF_LITE_MAYBE_KERNEL_LOG(
43             tfLiteContext,
44             "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
45             nodeIndex);
46         return kTfLiteError;
47     }
48 
49     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
50     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
51 
52     armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
53     bool isSupported = false;
54     armnn::BackendId setBackend;
55     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
56     {
57         FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
58                                    tfLiteContext,
59                                    IsElementwiseUnarySupported,
60                                    delegateData.m_Backends,
61                                    isSupported,
62                                    setBackend,
63                                    inputTensorInfo,
64                                    outputTensorInfo,
65                                    descriptor);
66     };
67 
68     if (!delegateData.m_Network)
69     {
70         validateFunc(outputTensorInfo, isSupported);
71         return isSupported ? kTfLiteOk : kTfLiteError;
72     }
73 
74     armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
75     layer->SetBackendId(setBackend);
76     ARMNN_ASSERT(layer != nullptr);
77 
78     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
79     outputSlot.SetTensorInfo(outputTensorInfo);
80 
81     // try to connect the Constant Inputs if there are any
82     if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
83     {
84         return kTfLiteError;
85     }
86 
87     // Connect
88     return Connect(layer, tfLiteNode, delegateData);
89 }
90 
91 } // namespace armnnDelegate
92