1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/pool_2d_builder.h"
16
17 #include <stdint.h>
18
19 #include <limits>
20
21 #include "tensorflow/lite/c/builtin_op_data.h"
22 #include "tensorflow/lite/c/common.h"
23 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
24 #include "tensorflow/lite/kernels/kernel_util.h"
25
26 namespace tflite {
27 namespace delegates {
28 namespace hexagon {
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)29 TfLiteStatus Pool2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
30 const TfLiteIntArray* outputs,
31 TfLiteContext* context) {
32 // Input data tensor.
33 int tensor_id = inputs->data[0];
34 const auto& data_tensor = context->tensors[tensor_id];
35 AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
36 TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, data_tensor));
37
38 const TfLitePoolParams* pool_params =
39 reinterpret_cast<const TfLitePoolParams*>(builtin_data_);
40
41 // Padding type.
42 if (pool_params->padding == kTfLitePaddingSame) {
43 SetPaddingType(NN_PAD_SAME);
44 } else if (pool_params->padding == kTfLitePaddingValid) {
45 SetPaddingType(NN_PAD_VALID);
46 }
47
48 // Pooling window (filter) width/height as inputs.
49 static int dummy = 0;
50 filter_shape_ = {1, pool_params->filter_height, pool_params->filter_width, 1};
51 auto* filter_node = graph_builder_->AddConstNodeWithData(
52 filter_shape_.data(), (char*)&dummy, sizeof(dummy));
53 AddInput(TensorID(filter_node->GetID(), 0));
54 // Stride width/height as inputs.
55 stride_shape_ = {1, pool_params->stride_height, pool_params->stride_width, 1};
56 auto* stride_node = graph_builder_->AddConstNodeWithData(
57 stride_shape_.data(), (char*)&dummy, sizeof(dummy));
58 AddInput(TensorID(stride_node->GetID(), 0));
59
60 // Hexagon outputs for this node.
61 int output_batch_size, output_height_size, output_width_size,
62 output_depth_size;
63 GetDims(&output_batch_size, &output_height_size, &output_width_size,
64 &output_depth_size, context->tensors[outputs->data[0]].dims);
65
66 if (op_node_.op_type == OP_QuantizedMaxPool_8) {
67 node_output_ = AddOutput(sizeof(uint8_t), 4,
68 {output_batch_size, output_height_size,
69 output_width_size, output_depth_size});
70 AddOutput(sizeof(float), 4, kScalarShape);
71 AddOutput(sizeof(float), 4, kScalarShape);
72 } else {
73 // Hexagon's AvgPool output has different min/max bounds than what TFLite
74 // expects. Therefore, we add a Requantize op to correct the ranges.
75 TensorID pool_out = AddOutput(sizeof(uint8_t), 4,
76 {output_batch_size, output_height_size,
77 output_width_size, output_depth_size});
78 const auto& pool_out_min = AddOutput(sizeof(float), 4, kScalarShape);
79 const auto& pool_out_max = AddOutput(sizeof(float), 4, kScalarShape);
80
81 // Output min/max for requantization.
82 TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
83 context->tensors[outputs->data[0]], &output_min_, &output_max_));
84 auto* output_min_const = graph_builder_->AddConstNodeWithData(
85 kScalarShape, (char*)&output_min_, sizeof(output_min_));
86 auto* output_max_const = graph_builder_->AddConstNodeWithData(
87 kScalarShape, (char*)&output_max_, sizeof(output_max_));
88
89 auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
90 requantize_op->SetOpType(OP_Requantize_8to8);
91 requantize_op->AddInput(pool_out);
92 requantize_op->AddInput(pool_out_min);
93 requantize_op->AddInput(pool_out_max);
94 requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
95 requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
96 node_output_ =
97 requantize_op->AddOutput(sizeof(uint8_t), 4,
98 {output_batch_size, output_height_size,
99 output_width_size, output_depth_size});
100 requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
101 requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
102 }
103
104 return kTfLiteOk;
105 }
106
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)107 TfLiteStatus Pool2dOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
108 TfLiteContext* context) {
109 // Should be only 1 output.
110 graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
111 node_output_.second);
112
113 return kTfLiteOk;
114 }
115
~Pool2dOpBuilder()116 Pool2dOpBuilder::~Pool2dOpBuilder() {}
117
CreatePool2DBuilder(GraphBuilder * graph_builder,int op_type)118 OpBuilder* CreatePool2DBuilder(GraphBuilder* graph_builder, int op_type) {
119 return new Pool2dOpBuilder(graph_builder, op_type);
120 }
121
122 } // namespace hexagon
123 } // namespace delegates
124 } // namespace tflite
125