• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
16 #define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
17 
18 #include <algorithm>
19 #include <limits>
20 
21 #include "tensorflow/lite/c/builtin_op_data.h"
22 #include "tensorflow/lite/c/c_api_internal.h"
23 
24 namespace tflite {
25 
NumDimensions(const TfLiteTensor * t)26 inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
SizeOfDimension(const TfLiteTensor * t,int dim)27 inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
28   return t->dims->data[dim];
29 }
GetInput(TfLiteContext * context,TfLiteNode * node,int index)30 inline const TfLiteTensor* GetInput(TfLiteContext* context, TfLiteNode* node,
31                                     int index) {
32   return &context->tensors[node->inputs->data[index]];
33 }
GetVariableInput(TfLiteContext * context,TfLiteNode * node,int index)34 inline TfLiteTensor* GetVariableInput(TfLiteContext* context, TfLiteNode* node,
35                                       int index) {
36   TfLiteTensor* tensor = &context->tensors[node->inputs->data[index]];
37   return (tensor->is_variable) ? tensor : nullptr;
38 }
GetOutput(TfLiteContext * context,TfLiteNode * node,int index)39 inline TfLiteTensor* GetOutput(TfLiteContext* context, TfLiteNode* node,
40                                int index) {
41   return &context->tensors[node->outputs->data[index]];
42 }
GetTemporary(TfLiteContext * context,TfLiteNode * node,int index)43 inline TfLiteTensor* GetTemporary(TfLiteContext* context, TfLiteNode* node,
44                                   int index) {
45   return &context->tensors[node->temporaries->data[index]];
46 }
NumInputs(const TfLiteNode * node)47 inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; }
NumOutputs(const TfLiteNode * node)48 inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; }
49 
NumElements(const TfLiteTensor * t)50 inline int64_t NumElements(const TfLiteTensor* t) {
51   int64_t count = 1;
52   for (int i = 0; i < NumDimensions(t); ++i) {
53     count *= SizeOfDimension(t, i);
54   }
55   return count;
56 }
57 
GetOptionalInputTensor(TfLiteContext * context,const TfLiteNode * node,int index)58 inline const TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context,
59                                                   const TfLiteNode* node,
60                                                   int index) {
61   const bool use_tensor = node->inputs->data[index] != kOptionalTensor;
62   if (use_tensor) {
63     return &context->tensors[node->inputs->data[index]];
64   }
65   return nullptr;
66 }
67 
68 // Determines whether tensor is constant.
IsConstantTensor(const TfLiteTensor * tensor)69 inline bool IsConstantTensor(const TfLiteTensor* tensor) {
70   return tensor->allocation_type == kTfLiteMmapRo;
71 }
72 
73 // Determines whether tensor is dynamic. Note that a tensor can be non-const and
74 // not dynamic. This function specifically checks for a dynamic tensor.
IsDynamicTensor(const TfLiteTensor * tensor)75 inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
76   return tensor->allocation_type == kTfLiteDynamic;
77 }
78 
79 // Sets tensor to dynamic.
SetTensorToDynamic(TfLiteTensor * tensor)80 inline void SetTensorToDynamic(TfLiteTensor* tensor) {
81   if (tensor->allocation_type != kTfLiteDynamic) {
82     tensor->allocation_type = kTfLiteDynamic;
83     tensor->data.raw = nullptr;
84   }
85 }
86 
87 // Check dimensionality match and populate OpData for Conv and DepthwiseConv.
88 TfLiteStatus PopulateConvolutionQuantizationParams(
89     TfLiteContext* context, const TfLiteTensor* input,
90     const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
91     const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
92     int32_t* output_activation_min, int32_t* output_activation_max,
93     int32_t* per_channel_multiplier, int* per_channel_shift);
94 
95 // QuantizedMultiplier with the guard that shift will not be smaller than -31.
96 void GuardedQuantizeMultiplier(double effective_output_scale,
97                                int32_t* significand, int* shift);
98 
99 // Calculates the multiplication factor for a quantized convolution (or
100 // quantized depthwise convolution) involving the given tensors. Returns an
101 // error if the scales of the tensors are not compatible.
102 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
103                                               const TfLiteTensor* input,
104                                               const TfLiteTensor* filter,
105                                               const TfLiteTensor* bias,
106                                               TfLiteTensor* output,
107                                               double* multiplier);
108 
109 // Calculates the useful quantized range of an activation layer given its
110 // activation tensor.
111 TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
112                                                TfLiteFusedActivation activation,
113                                                TfLiteTensor* output,
114                                                int32_t* act_min,
115                                                int32_t* act_max);
116 void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
117                                    TfLiteTensor* output, int32_t* act_min,
118                                    int32_t* act_max);
119 void CalculateActivationRangeInt8(TfLiteFusedActivation activation,
120                                   TfLiteTensor* output, int32_t* act_min,
121                                   int32_t* act_max);
122 // Calculates the useful range of an activation layer given its activation
123 // tensor.a
124 template <typename T>
CalculateActivationRange(TfLiteFusedActivation activation,T * activation_min,T * activation_max)125 void CalculateActivationRange(TfLiteFusedActivation activation,
126                               T* activation_min, T* activation_max) {
127   if (activation == kTfLiteActRelu) {
128     *activation_min = 0;
129     *activation_max = std::numeric_limits<T>::max();
130   } else if (activation == kTfLiteActRelu6) {
131     *activation_min = 0;
132     *activation_max = 6;
133   } else if (activation == kTfLiteActRelu1) {
134     *activation_min = -1;
135     *activation_max = 1;
136   } else {
137     *activation_min = std::numeric_limits<T>::lowest();
138     *activation_max = std::numeric_limits<T>::max();
139   }
140 }
141 
142 // Return true if the given tensors have the same shape.
143 bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
144 
145 // Calculate the output_shape that is necessary for element-wise operations
146 // with broadcasting involving the two input tensors.
147 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
148                                         const TfLiteTensor* input1,
149                                         const TfLiteTensor* input2,
150                                         TfLiteIntArray** output_shape);
151 }  // namespace tflite
152 
153 #endif  // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
154