• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
16 #define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
17 
18 #include <algorithm>
19 #include <limits>
20 
21 #include "flatbuffers/flatbuffers.h"
22 #include "tensorflow/lite/c/builtin_op_data.h"
23 #include "tensorflow/lite/c/common.h"
24 
25 namespace tflite {
26 
NumDimensions(const TfLiteTensor * t)27 inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
SizeOfDimension(const TfLiteTensor * t,int dim)28 inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
29   return t->dims->data[dim];
30 }
GetInput(TfLiteContext * context,const TfLiteNode * node,int index)31 inline const TfLiteTensor* GetInput(TfLiteContext* context,
32                                     const TfLiteNode* node, int index) {
33   return &context
34               ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
35 }
36 // Note: You must check if result is not null:
37 // TfLiteTensor* my_tensor = GetVariableInput(context, node, kMyTensorIdx);
38 // TF_LITE_ENSURE(context, my_tensor != nullptr);
GetVariableInput(TfLiteContext * context,const TfLiteNode * node,int index)39 inline TfLiteTensor* GetVariableInput(TfLiteContext* context,
40                                       const TfLiteNode* node, int index) {
41   TfLiteTensor* tensor =
42       &context->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
43   return (tensor->is_variable) ? tensor : nullptr;
44 }
GetOutput(TfLiteContext * context,const TfLiteNode * node,int index)45 inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
46                                int index) {
47   return &context
48               ->tensors[flatbuffers::EndianScalar(node->outputs->data[index])];
49 }
GetTemporary(TfLiteContext * context,const TfLiteNode * node,int index)50 inline TfLiteTensor* GetTemporary(TfLiteContext* context,
51                                   const TfLiteNode* node, int index) {
52   return &context->tensors[flatbuffers::EndianScalar(
53       node->temporaries->data[index])];
54 }
GetIntermediates(TfLiteContext * context,const TfLiteNode * node,int index)55 inline const TfLiteTensor* GetIntermediates(TfLiteContext* context,
56                                             const TfLiteNode* node, int index) {
57   return &context->tensors[node->intermediates->data[index]];
58 }
NumInputs(const TfLiteNode * node)59 inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; }
NumOutputs(const TfLiteNode * node)60 inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; }
NumIntermediates(const TfLiteNode * node)61 inline int NumIntermediates(const TfLiteNode* node) {
62   return node->intermediates->size;
63 }
64 
NumElements(const TfLiteIntArray * dims)65 inline int64_t NumElements(const TfLiteIntArray* dims) {
66   int64_t count = 1;
67   for (int i = 0; i < dims->size; ++i) {
68     count *= dims->data[i];
69   }
70   return count;
71 }
72 
NumElements(const TfLiteTensor * t)73 inline int64_t NumElements(const TfLiteTensor* t) {
74   return NumElements(t->dims);
75 }
76 
GetOptionalInputTensor(TfLiteContext * context,const TfLiteNode * node,int index)77 inline const TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context,
78                                                   const TfLiteNode* node,
79                                                   int index) {
80   const bool use_tensor = index < node->inputs->size &&
81                           node->inputs->data[index] != kTfLiteOptionalTensor;
82   if (use_tensor) {
83     return &context
84                 ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
85   }
86   return nullptr;
87 }
88 
89 // Determines whether tensor is constant.
IsConstantTensor(const TfLiteTensor * tensor)90 inline bool IsConstantTensor(const TfLiteTensor* tensor) {
91   return tensor->allocation_type == kTfLiteMmapRo;
92 }
93 
94 // Determines whether tensor is dynamic. Note that a tensor can be non-const and
95 // not dynamic. This function specifically checks for a dynamic tensor.
IsDynamicTensor(const TfLiteTensor * tensor)96 inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
97   return tensor->allocation_type == kTfLiteDynamic;
98 }
99 
100 // Sets tensor to dynamic.
SetTensorToDynamic(TfLiteTensor * tensor)101 inline void SetTensorToDynamic(TfLiteTensor* tensor) {
102   if (tensor->allocation_type != kTfLiteDynamic) {
103     tensor->allocation_type = kTfLiteDynamic;
104     tensor->data.raw = nullptr;
105   }
106 }
107 
108 // Determines whether it is a hybrid op - one that has float inputs and
109 // quantized weights.
IsHybridOp(const TfLiteTensor * input,const TfLiteTensor * weight)110 inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) {
111   return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) &&
112           input->type == kTfLiteFloat32);
113 }
114 
115 // Check dimensionality match and populate OpData for Conv and DepthwiseConv.
116 TfLiteStatus PopulateConvolutionQuantizationParams(
117     TfLiteContext* context, const TfLiteTensor* input,
118     const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
119     const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
120     int32_t* output_activation_min, int32_t* output_activation_max,
121     int32_t* per_channel_multiplier, int* per_channel_shift);
122 
123 TfLiteStatus PopulateConvolutionQuantizationParams(
124     TfLiteContext* context, const TfLiteTensor* input,
125     const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
126     const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
127     int32_t* output_activation_min, int32_t* output_activation_max,
128     int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels);
129 
130 // Calculates the multiplication factor for a quantized convolution (or
131 // quantized depthwise convolution) involving the given tensors. Returns an
132 // error if the scales of the tensors are not compatible.
133 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
134                                               const TfLiteTensor* input,
135                                               const TfLiteTensor* filter,
136                                               const TfLiteTensor* bias,
137                                               TfLiteTensor* output,
138                                               double* multiplier);
139 
140 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
141                                               const TfLiteTensor* input,
142                                               const TfLiteTensor* filter,
143                                               TfLiteTensor* output,
144                                               double* multiplier);
145 
146 // Calculates the useful quantized range of an activation layer given its
147 // activation tensor.
148 TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
149                                                TfLiteFusedActivation activation,
150                                                TfLiteTensor* output,
151                                                int32_t* act_min,
152                                                int32_t* act_max);
153 
154 // Calculates the useful range of an activation layer given its activation
155 // tensor.a
156 template <typename T>
CalculateActivationRange(TfLiteFusedActivation activation,T * activation_min,T * activation_max)157 void CalculateActivationRange(TfLiteFusedActivation activation,
158                               T* activation_min, T* activation_max) {
159   if (activation == kTfLiteActRelu) {
160     *activation_min = 0;
161     *activation_max = std::numeric_limits<T>::max();
162   } else if (activation == kTfLiteActRelu6) {
163     *activation_min = 0;
164     *activation_max = 6;
165   } else if (activation == kTfLiteActRelu1) {
166     *activation_min = -1;
167     *activation_max = 1;
168   } else {
169     *activation_min = std::numeric_limits<T>::lowest();
170     *activation_max = std::numeric_limits<T>::max();
171   }
172 }
173 
174 // Return true if the given tensors have the same shape.
175 bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
176 
177 // Calculates the output_shape that is necessary for element-wise operations
178 // with broadcasting involving the two input tensors.
179 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
180                                         const TfLiteTensor* input1,
181                                         const TfLiteTensor* input2,
182                                         TfLiteIntArray** output_shape);
183 
184 // Calculates the output_shape that is necessary for element-wise operations
185 // with broadcasting involving the three input tensors.
186 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
187                                         const TfLiteTensor* input1,
188                                         const TfLiteTensor* input2,
189                                         const TfLiteTensor* input3,
190                                         TfLiteIntArray** output_shape);
191 }  // namespace tflite
192 
193 #endif  // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
194