1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <armnn/utility/IgnoreUnused.hpp>
9
10 #include <tensorflow/lite/builtin_ops.h>
11 #include <tensorflow/lite/c/builtin_op_data.h>
12 #include <tensorflow/lite/c/common.h>
13 #include <tensorflow/lite/minimal_logging.h>
14
15 namespace armnnDelegate
16 {
17
VisitDequantizeOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t tfLiteDequantizeOperatorCode)18 TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 TfLiteNode* tfLiteNode,
21 int nodeIndex,
22 int32_t tfLiteDequantizeOperatorCode)
23 {
24 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26
27 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
28 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
29 if (IsDynamicTensor(tfLiteInputTensor))
30 {
31 TF_LITE_MAYBE_KERNEL_LOG(
32 tfLiteContext,
33 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
34 tfLiteDequantizeOperatorCode, nodeIndex);
35 return kTfLiteError;
36 }
37
38 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
39 if (IsDynamicTensor(tfLiteOutputTensor))
40 {
41 TF_LITE_MAYBE_KERNEL_LOG(
42 tfLiteContext,
43 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
44 tfLiteDequantizeOperatorCode, nodeIndex);
45
46 return kTfLiteError;
47 }
48
49 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
50 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
51
52 bool isSupported = false;
53 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
54 {
55 FORWARD_LAYER_SUPPORT_FUNC(__func__,
56 tfLiteContext,
57 IsDequantizeSupported,
58 delegateData.m_Backends,
59 isSupported,
60 inputTensorInfo,
61 outputTensorInfo);
62 };
63
64 if (!delegateData.m_Network)
65 {
66 validateFunc(outputTensorInfo, isSupported);
67 return isSupported ? kTfLiteOk : kTfLiteError;
68 }
69
70 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
71 ARMNN_ASSERT(dequantizeLayer != nullptr);
72
73 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
74 outputSlot.SetTensorInfo(outputTensorInfo);
75
76 return Connect(dequantizeLayer, tfLiteNode, delegateData);
77 }
78
VisitQuantizeOperator(DelegateData & delegateData,TfLiteContext * tfLiteContext,TfLiteNode * tfLiteNode,int nodeIndex,int32_t tfLiteQuantizeOperatorCode)79 TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
80 TfLiteContext* tfLiteContext,
81 TfLiteNode* tfLiteNode,
82 int nodeIndex,
83 int32_t tfLiteQuantizeOperatorCode)
84 {
85 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
86 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
87
88 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
89 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
90 if (IsDynamicTensor(tfLiteInputTensor))
91 {
92 TF_LITE_MAYBE_KERNEL_LOG(
93 tfLiteContext,
94 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
95 tfLiteQuantizeOperatorCode, nodeIndex);
96 return kTfLiteError;
97 }
98
99 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
100 if (IsDynamicTensor(tfLiteOutputTensor))
101 {
102 TF_LITE_MAYBE_KERNEL_LOG(
103 tfLiteContext,
104 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
105 tfLiteQuantizeOperatorCode, nodeIndex);
106 return kTfLiteError;
107 }
108
109 // Only affine per-layer quantization is supported.
110 if (!IsAffineQuantization(tfLiteOutputTensor))
111 {
112 TF_LITE_MAYBE_KERNEL_LOG(
113 tfLiteContext,
114 "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
115 tfLiteQuantizeOperatorCode, nodeIndex);
116 return kTfLiteError;
117 }
118
119 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
120 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
121
122 bool isSupported = false;
123 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
124 {
125 FORWARD_LAYER_SUPPORT_FUNC(__func__,
126 tfLiteContext,
127 IsQuantizeSupported,
128 delegateData.m_Backends,
129 isSupported,
130 inputTensorInfo,
131 outputTensorInfo);
132 };
133
134 if (!delegateData.m_Network)
135 {
136 validateFunc(outputTensorInfo, isSupported);
137 return isSupported ? kTfLiteOk : kTfLiteError;
138 }
139
140 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
141 ARMNN_ASSERT(quantizeLayer != nullptr);
142
143 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
144 outputSlot.SetTensorInfo(outputTensorInfo);
145
146 return Connect(quantizeLayer, tfLiteNode, delegateData);
147 }
148
149 } // namespace armnnDelegate
150