• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_
16 #define TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_
17 
18 #include <vector>
19 
20 #include "tensorflow/lite/delegates/hexagon/builders/op_builder.h"
21 
22 namespace tflite {
23 namespace delegates {
24 namespace hexagon {
25 
26 // Stores quantization data for Conv/TransposeConv nodes.
27 // This information is used to handle the per-channel quantized weights & biases
28 // correctly in the Hexagon delegate.
29 struct PerChannelQuantData {
30   // This is initialized while processing quantized weights, and acts as an
31   // input to Hexagon Conv nodes.
32   OpBuilder* channel_scales_node = nullptr;
33   // Scale information is obtained from TfLiteAffineQuantization in the weights
34   // tensor.
35   float* scales_data = nullptr;
36   int num_scale_values = 1;
37 };
38 
39 class Conv2dOpBuilder : public OpBuilder {
40  public:
Conv2dOpBuilder(GraphBuilder * graph_builder,int op_type)41   explicit Conv2dOpBuilder(GraphBuilder* graph_builder, int op_type)
42       : OpBuilder(graph_builder, op_type) {}
43   TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
44                                 const TfLiteIntArray* outputs,
45                                 TfLiteContext* context) override;
46 
47   TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
48                                TfLiteContext* context) override;
49 
50   ~Conv2dOpBuilder() override;
51 
52  private:
53   TfLiteStatus InitializeWeightsNodes(const TfLiteIntArray* inputs,
54                                       const TfLiteIntArray* outputs,
55                                       TfLiteContext* context,
56                                       const int input_depth);
57 
58   TfLiteStatus InitializeBiasNodes(const TfLiteIntArray* inputs,
59                                    const TfLiteIntArray* outputs,
60                                    TfLiteContext* context);
61 
62   TensorID node_output_;
63   std::vector<float> transposed_weights_;
64   std::vector<int> stride_shape_;
65   std::vector<int> weight_shape_;
66   OpBuilder* weights_min_node_ = nullptr;
67   OpBuilder* weights_max_node_ = nullptr;
68   OpBuilder* bias_min_node_ = nullptr;
69   OpBuilder* bias_max_node_ = nullptr;
70 
71   // Modified only if node has per-channel quantized weights/biases.
72   PerChannelQuantData per_channel_quant_;
73 
74   // Only used for dilated Depthwise Conv.
75   std::vector<int> dilation_factors_h_w_;
76   std::vector<int> space_to_batch_paddings_;
77   std::vector<int> batch_to_space_crops_;
78 };
79 
80 // ProcessPerChannelQuantizedWeights & ProcessPerChannelQuantizedBias can be
81 // used to pre-process per-channel quantized weights & biases for Hexagon.
82 // NOTE: ProcessPerChannelQuantizedWeights should be run before
83 // ProcessPerChannelQuantizedBias. This is becase we set PerChannelQuantData
84 // based on the weights tensor, which is utilized while preprocessing bias.
85 
86 TfLiteStatus ProcessPerChannelQuantizedWeights(
87     const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
88     TfLiteContext* context, float* weights_min, float* weights_max,
89     GraphBuilder* graph_builder, PerChannelQuantData* per_channel_quant);
90 
91 TfLiteStatus ProcessPerChannelQuantizedBias(
92     const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
93     TfLiteContext* context, float* bias_min, float* bias_max,
94     GraphBuilder* graph_builder, PerChannelQuantData* per_channel_quant,
95     OpBuilder** bias_const_node = nullptr);
96 
97 }  // namespace hexagon
98 }  // namespace delegates
99 }  // namespace tflite
100 
101 #endif  // TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_
102