1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
16 #define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
17
18 #include <cstdint>
19
20 #include "tensorflow/lite/c/builtin_op_data.h"
21 #include "tensorflow/lite/c/common.h"
22 #include "tensorflow/lite/kernels/internal/types.h"
23
24 namespace tflite {
25
26 struct OpDataFullyConnected {
27 // The scaling factor from input to output (aka the 'real multiplier') can
28 // be represented as a fixed point multiplier plus a left shift.
29 int32_t output_multiplier;
30 int output_shift;
31 // The range of the fused activation layer. For example for kNone and
32 // uint8_t these would be 0 and 255.
33 int32_t output_activation_min;
34 int32_t output_activation_max;
35 // The index of the temporary tensor where the quantized inputs are cached.
36 int input_quantized_index;
37 // Cached zero point values of tensors.
38 int32_t input_zero_point;
39 int32_t filter_zero_point;
40 int32_t output_zero_point;
41 };
42
43 extern const int kFullyConnectedInputTensor;
44 extern const int kFullyConnectedWeightsTensor;
45 extern const int kFullyConnectedBiasTensor;
46 extern const int kFullyConnectedOutputTensor;
47
48 // Returns a FullyConnectedParams struct with all the parameters needed for a
49 // float computation.
50 FullyConnectedParams FullyConnectedParamsFloat(
51 TfLiteFusedActivation activation);
52
53 // Returns a FullyConnectedParams struct with all the parameters needed for a
54 // quantized computation.
55 FullyConnectedParams FullyConnectedParamsQuantized(
56 const OpDataFullyConnected& op_data);
57
58 TfLiteStatus CalculateOpDataFullyConnected(
59 TfLiteContext* context, TfLiteFusedActivation activation,
60 TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter,
61 const TfLiteTensor* bias, TfLiteTensor* output, OpDataFullyConnected* data);
62
63 // This is the most generic TfLiteRegistration. The actual supported types may
64 // still be target dependent. The only requirement is that every implementation
65 // (reference or optimized) must define this function.
66 TfLiteRegistration Register_FULLY_CONNECTED();
67
68 #if defined(CMSIS_NN) || defined(ARDUINO)
69 // The Arduino is a special case where we use the CMSIS kernels, but because of
70 // the current approach to building for Arduino, we do not support -DCMSIS_NN as
71 // part of the build. As a result, we use defined(ARDUINO) as proxy for the
72 // CMSIS kernels for this one special case.
73
74 // Returns a TfLiteRegistration struct for cmsis_nn kernel variant that only
75 // supports int8.
76 TfLiteRegistration Register_FULLY_CONNECTED_INT8();
77
78 #else
79 // Note that while this block gets used for both reference and optimized kernels
80 // that do not have any specialized implementations, the only goal here is to
81 // define fallback implementation that allow reference kernels to still be used
82 // from applications that call a more specific kernel variant.
83
Register_FULLY_CONNECTED_INT8()84 inline TfLiteRegistration Register_FULLY_CONNECTED_INT8() {
85 return Register_FULLY_CONNECTED();
86 }
87
88 #endif
89 } // namespace tflite
90
91 #endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_
92