1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
16 #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
17
18 #include "ruy/profiler/instrumentation.h" // from @ruy
19 #include "tensorflow/lite/kernels/cpu_backend_context.h"
20 #include "tensorflow/lite/kernels/cpu_backend_gemm.h"
21 #include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
22 #include "tensorflow/lite/kernels/internal/common.h"
23 #include "tensorflow/lite/kernels/internal/compatibility.h"
24 #include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
25 #include "tensorflow/lite/kernels/internal/types.h"
26
27 namespace tflite {
28 namespace optimized_integer_ops {
29
FullyConnected(const FullyConnectedParams & params,const RuntimeShape & input_shape,const int8 * input_data,const RuntimeShape & filter_shape,const int8 * filter_data,const RuntimeShape & bias_shape,const int32 * bias_data,const RuntimeShape & output_shape,int8 * output_data,CpuBackendContext * cpu_backend_context)30 inline void FullyConnected(
31 const FullyConnectedParams& params, const RuntimeShape& input_shape,
32 const int8* input_data, const RuntimeShape& filter_shape,
33 const int8* filter_data, const RuntimeShape& bias_shape,
34 const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
35 CpuBackendContext* cpu_backend_context) {
36 ruy::profiler::ScopeLabel label("FullyConnectedInt8/8bit");
37
38 const int32 input_offset = params.input_offset;
39 const int32 filter_offset = params.weights_offset;
40 const int32 output_offset = params.output_offset;
41 const int32 output_multiplier = params.output_multiplier;
42 const int output_shift = params.output_shift;
43 const int32 output_activation_min = params.quantized_activation_min;
44 const int32 output_activation_max = params.quantized_activation_max;
45 TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
46 TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
47 // TODO(b/62193649): This really should be:
48 // const int batches = ArraySize(output_dims, 1);
49 // but the current --variable_batch hack consists in overwriting the 3rd
50 // dimension with the runtime batch size, as we don't keep track for each
51 // array of which dimension is the batch dimension in it.
52 const int output_dim_count = output_shape.DimensionsCount();
53 const int filter_dim_count = filter_shape.DimensionsCount();
54 const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
55 const int filter_rows = filter_shape.Dims(filter_dim_count - 2);
56 const int filter_cols = filter_shape.Dims(filter_dim_count - 1);
57 TFLITE_DCHECK_EQ(filter_shape.FlatSize(), filter_rows * filter_cols);
58 const int output_rows = output_shape.Dims(output_dim_count - 1);
59 TFLITE_DCHECK_EQ(output_rows, filter_rows);
60 if (bias_data) {
61 TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
62 }
63
64 cpu_backend_gemm::MatrixParams<int8> lhs_params;
65 lhs_params.rows = filter_rows;
66 lhs_params.cols = filter_cols;
67 lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
68 lhs_params.zero_point = -filter_offset;
69 cpu_backend_gemm::MatrixParams<int8> rhs_params;
70 rhs_params.rows = filter_cols;
71 rhs_params.cols = batches;
72 rhs_params.order = cpu_backend_gemm::Order::kColMajor;
73 rhs_params.zero_point = -input_offset;
74 cpu_backend_gemm::MatrixParams<int8> dst_params;
75 dst_params.rows = filter_rows;
76 dst_params.cols = batches;
77 dst_params.order = cpu_backend_gemm::Order::kColMajor;
78 dst_params.zero_point = output_offset;
79 cpu_backend_gemm::GemmParams<int32, int8> gemm_params;
80 gemm_params.bias = bias_data;
81 gemm_params.clamp_min = output_activation_min;
82 gemm_params.clamp_max = output_activation_max;
83 gemm_params.multiplier_fixedpoint = output_multiplier;
84 gemm_params.multiplier_exponent = output_shift;
85 cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data,
86 dst_params, output_data, gemm_params,
87 cpu_backend_context);
88 }
89
90 } // namespace optimized_integer_ops
91 } // namespace tflite
92
93 #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
94