• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
16 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
17 
18 #include "tensorflow/lite/kernels/internal/common.h"
19 
20 namespace tflite {
21 namespace reference_integer_ops {
22 
FullyConnected(const FullyConnectedParams & params,const RuntimeShape & input_shape,const int8_t * input_data,const RuntimeShape & filter_shape,const int8_t * filter_data,const RuntimeShape & bias_shape,const int32 * bias_data,const RuntimeShape & output_shape,int8_t * output_data,void * gemm_context)23 inline void FullyConnected(
24     const FullyConnectedParams& params, const RuntimeShape& input_shape,
25     const int8_t* input_data, const RuntimeShape& filter_shape,
26     const int8_t* filter_data, const RuntimeShape& bias_shape,
27     const int32* bias_data, const RuntimeShape& output_shape,
28     int8_t* output_data, void* gemm_context) {
29   (void)gemm_context;  // only used in optimized code.
30   const int32 input_offset = params.input_offset;
31   const int32 filter_offset = params.weights_offset;
32   const int32 output_offset = params.output_offset;
33   const int32 output_multiplier = params.output_multiplier;
34   const int output_shift = params.output_shift;
35   const int32 output_activation_min = params.quantized_activation_min;
36   const int32 output_activation_max = params.quantized_activation_max;
37   TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
38   TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
39 
40   TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
41   const int filter_dim_count = filter_shape.DimensionsCount();
42   const int batches = output_shape.Dims(0);
43   const int output_depth = output_shape.Dims(1);
44   TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
45   const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
46   for (int b = 0; b < batches; ++b) {
47     for (int out_c = 0; out_c < output_depth; ++out_c) {
48       int32 acc = 0;
49       for (int d = 0; d < accum_depth; ++d) {
50         int32 input_val = input_data[b * accum_depth + d];
51         int32 filter_val = filter_data[out_c * accum_depth + d];
52         acc += (filter_val + filter_offset) * (input_val + input_offset);
53       }
54       if (bias_data) {
55         acc += bias_data[out_c];
56       }
57       acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
58       acc += output_offset;
59       acc = std::max(acc, output_activation_min);
60       acc = std::min(acc, output_activation_max);
61       output_data[out_c + output_depth * b] = static_cast<int8_t>(acc);
62     }
63   }
64 }
65 
66 }  // namespace reference_integer_ops
67 }  // namespace tflite
68 
69 #endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
70