• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "DelegateUtils.hpp"
9 
10 #include <tensorflow/lite/builtin_ops.h>
11 #include <tensorflow/lite/c/builtin_op_data.h>
12 #include <tensorflow/lite/c/common.h>
13 #include <tensorflow/lite/minimal_logging.h>
14 #include "tensorflow/lite/delegates/utils.h"
15 
16 namespace armnnDelegate
17 {
18 
ValidateAddOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)19 TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
20                                  TfLiteContext* tfLiteContext,
21                                  const armnn::TensorInfo& inputInfo1,
22                                  const armnn::TensorInfo& inputInfo2,
23                                  const armnn::TensorInfo& outputInfo)
24 {
25     bool isSupported = false;
26     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
27     {
28         FORWARD_LAYER_SUPPORT_FUNC(__func__,
29                                    tfLiteContext,
30                                    IsAdditionSupported,
31                                    delegateData.m_Backends,
32                                    isSupported,
33                                    inputInfo1,
34                                    inputInfo2,
35                                    outputTensorInfo);
36     };
37 
38     validateFunc(outputInfo, isSupported);
39     return isSupported ? kTfLiteOk : kTfLiteError;
40 }
41 
ValidateDivOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)42 TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
43                                  TfLiteContext* tfLiteContext,
44                                  const armnn::TensorInfo& inputInfo1,
45                                  const armnn::TensorInfo& inputInfo2,
46                                  const armnn::TensorInfo& outputInfo)
47 {
48     bool isSupported = false;
49     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
50     {
51         FORWARD_LAYER_SUPPORT_FUNC(__func__,
52                                    tfLiteContext,
53                                    IsDivisionSupported,
54                                    delegateData.m_Backends,
55                                    isSupported,
56                                    inputInfo1,
57                                    inputInfo2,
58                                    outputTensorInfo);
59     };
60 
61     validateFunc(outputInfo, isSupported);
62     return isSupported ? kTfLiteOk : kTfLiteError;
63 }
64 
ValidateMaximumOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)65 TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
66                                      TfLiteContext* tfLiteContext,
67                                      const armnn::TensorInfo& inputInfo1,
68                                      const armnn::TensorInfo& inputInfo2,
69                                      const armnn::TensorInfo& outputInfo)
70 {
71     bool isSupported = false;
72     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
73     {
74         FORWARD_LAYER_SUPPORT_FUNC(__func__,
75                                    tfLiteContext,
76                                    IsMaximumSupported,
77                                    delegateData.m_Backends,
78                                    isSupported,
79                                    inputInfo1,
80                                    inputInfo2,
81                                    outputTensorInfo);
82     };
83 
84     validateFunc(outputInfo, isSupported);
85     return isSupported ? kTfLiteOk : kTfLiteError;
86 }
87 
ValidateMinimumOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)88 TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
89                                      TfLiteContext* tfLiteContext,
90                                      const armnn::TensorInfo& inputInfo1,
91                                      const armnn::TensorInfo& inputInfo2,
92                                      const armnn::TensorInfo& outputInfo)
93 {
94     bool isSupported = false;
95     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
96     {
97         FORWARD_LAYER_SUPPORT_FUNC(__func__,
98                                    tfLiteContext,
99                                    IsMinimumSupported,
100                                    delegateData.m_Backends,
101                                    isSupported,
102                                    inputInfo1,
103                                    inputInfo2,
104                                    outputTensorInfo);
105     };
106 
107     validateFunc(outputInfo, isSupported);
108     return isSupported ? kTfLiteOk : kTfLiteError;
109 }
110 
ValidateMulOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)111 TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
112                                  TfLiteContext* tfLiteContext,
113                                  const armnn::TensorInfo& inputInfo1,
114                                  const armnn::TensorInfo& inputInfo2,
115                                  const armnn::TensorInfo& outputInfo)
116 {
117     bool isSupported = false;
118     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
119     {
120         FORWARD_LAYER_SUPPORT_FUNC(__func__,
121                                    tfLiteContext,
122                                    IsMultiplicationSupported,
123                                    delegateData.m_Backends,
124                                    isSupported,
125                                    inputInfo1,
126                                    inputInfo2,
127                                    outputTensorInfo);
128     };
129 
130     validateFunc(outputInfo, isSupported);
131     return isSupported ? kTfLiteOk : kTfLiteError;
132 }
133 
ValidateSubOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,const armnn::TensorInfo & inputInfo1,const armnn::TensorInfo & inputInfo2,const armnn::TensorInfo & outputInfo)134 TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
135                                  TfLiteContext* tfLiteContext,
136                                  const armnn::TensorInfo& inputInfo1,
137                                  const armnn::TensorInfo& inputInfo2,
138                                  const armnn::TensorInfo& outputInfo)
139 {
140     bool isSupported = false;
141     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
142     {
143         FORWARD_LAYER_SUPPORT_FUNC(__func__,
144                                    tfLiteContext,
145                                    IsSubtractionSupported,
146                                    delegateData.m_Backends,
147                                    isSupported,
148                                    inputInfo1,
149                                    inputInfo2,
150                                    outputTensorInfo);
151     };
152 
153     validateFunc(outputInfo, isSupported);
154     return isSupported ? kTfLiteOk : kTfLiteError;
155 }
156 
VisitElementwiseBinaryOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t elementwiseBinaryOperatorCode)157 TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
158                                             TfLiteContext* tfLiteContext,
159                                             TfLiteNode* tfLiteNode,
160                                             int nodeIndex,
161                                             int32_t elementwiseBinaryOperatorCode)
162 {
163     TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
164     TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
165 
166     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
167     const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
168     if (IsDynamicTensor(tfLiteInputTensor0))
169     {
170         TF_LITE_MAYBE_KERNEL_LOG(
171             tfLiteContext,
172             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
173             elementwiseBinaryOperatorCode, nodeIndex);
174         return kTfLiteError;
175     }
176 
177     const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
178     if (IsDynamicTensor(tfLiteInputTensor1))
179     {
180         TF_LITE_MAYBE_KERNEL_LOG(
181             tfLiteContext,
182             "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
183             elementwiseBinaryOperatorCode, nodeIndex);
184         return kTfLiteError;
185     }
186 
187     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
188     if (IsDynamicTensor(tfLiteOutputTensor))
189     {
190         TF_LITE_MAYBE_KERNEL_LOG(
191             tfLiteContext,
192             "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
193             elementwiseBinaryOperatorCode, nodeIndex);
194         return kTfLiteError;
195     }
196 
197     armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
198     armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
199 
200     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
201 
202     if (!delegateData.m_Network)
203     {
204         switch(elementwiseBinaryOperatorCode)
205         {
206             case kTfLiteBuiltinAdd:
207                 return ValidateAddOperator(delegateData,
208                                            tfLiteContext,
209                                            inputTensorInfo0,
210                                            inputTensorInfo1,
211                                            outputTensorInfo);
212             case kTfLiteBuiltinDiv:
213                 return ValidateDivOperator(delegateData,
214                                            tfLiteContext,
215                                            inputTensorInfo0,
216                                            inputTensorInfo1,
217                                            outputTensorInfo);
218             case kTfLiteBuiltinMaximum:
219                 return ValidateMaximumOperator(delegateData,
220                                                tfLiteContext,
221                                                inputTensorInfo0,
222                                                inputTensorInfo1,
223                                                outputTensorInfo);
224             case kTfLiteBuiltinMinimum:
225                 return ValidateMinimumOperator(delegateData,
226                                                tfLiteContext,
227                                                inputTensorInfo0,
228                                                inputTensorInfo1,
229                                                outputTensorInfo);
230             case kTfLiteBuiltinMul:
231                 return ValidateMulOperator(delegateData,
232                                            tfLiteContext,
233                                            inputTensorInfo0,
234                                            inputTensorInfo1,
235                                            outputTensorInfo);
236             case kTfLiteBuiltinSub:
237                 return ValidateSubOperator(delegateData,
238                                            tfLiteContext,
239                                            inputTensorInfo0,
240                                            inputTensorInfo1,
241                                            outputTensorInfo);
242             default:
243                 return kTfLiteError;
244         }
245     }
246 
247     armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
248 
249     switch(elementwiseBinaryOperatorCode)
250     {
251         case kTfLiteBuiltinAdd:
252             elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
253             break;
254         case kTfLiteBuiltinDiv:
255             elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
256             break;
257         case kTfLiteBuiltinMaximum:
258             elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
259             break;
260         case kTfLiteBuiltinMinimum:
261             elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
262             break;
263         case kTfLiteBuiltinMul:
264             elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
265             break;
266         case kTfLiteBuiltinSub:
267             elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
268             break;
269         default:
270             return kTfLiteError;
271     }
272     ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
273     armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
274     outputSlot.SetTensorInfo(outputTensorInfo);
275 
276     if(tflite::IsConstantTensor(&tfLiteInputTensor0))
277     {
278         auto status = ConnectConstant(elementwiseBinaryLayer,
279                                       inputTensorInfo0,
280                                       tfLiteContext,
281                                       tfLiteInputTensor0,
282                                       delegateData,
283                                       tfLiteNode->inputs->data[0]);
284         if (status == kTfLiteError)
285         {
286             return status;
287         }
288     }
289 
290     if(tflite::IsConstantTensor(&tfLiteInputTensor1))
291     {
292         auto status = ConnectConstant(elementwiseBinaryLayer,
293                                       inputTensorInfo1,
294                                       tfLiteContext,
295                                       tfLiteInputTensor1,
296                                       delegateData,
297                                       tfLiteNode->inputs->data[1]);
298         if (status == kTfLiteError)
299         {
300             return status;
301         }
302     }
303 
304     auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
305                                         inputTensorInfo1,
306                                         elementwiseBinaryLayer,
307                                         tfLiteContext,
308                                         tfLiteNode,
309                                         delegateData);
310     if (!reshapeLayer)
311     {
312         return kTfLiteError;
313     }
314 
315     auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
316     if (!tfLiteNodeParameters)
317     {
318         // No Activation
319         return kTfLiteOk;
320     }
321     // Check activation
322     TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
323     return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
324 }
325 
326 } // namespace armnnDelegate
327