• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/reduce_builder.h"
16 
17 #include <stdint.h>
18 
19 #include <limits>
20 
21 #include "tensorflow/lite/c/builtin_op_data.h"
22 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
23 #include "tensorflow/lite/kernels/kernel_util.h"
24 #include "tensorflow/lite/util.h"
25 
26 namespace tflite {
27 namespace delegates {
28 namespace hexagon {
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)29 TfLiteStatus ReduceOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
30                                                const TfLiteIntArray* outputs,
31                                                TfLiteContext* context) {
32   // Input data tensor.
33   int tensor_id = inputs->data[0];
34   const auto& input_tensor = context->tensors[tensor_id];
35   AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
36   TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, input_tensor));
37 
38   // Axes tensor should be constant.
39   tensor_id = inputs->data[1];
40   const auto& axes_tensor = context->tensors[tensor_id];
41   if (axes_tensor.allocation_type == kTfLiteMmapRo) {
42     // If the axes input is a constant, bake it into the Hexagon graph as a
43     // Const node.
44     auto* const_axes_node =
45         graph_builder_->AddConstNodeWithData(tensor_id, axes_tensor);
46     AddInput(TensorID(const_axes_node->GetID(), 0));
47   } else {
48     context->ReportError(context, "Reduction op doesn't have constant axis");
49     return kTfLiteError;
50   }
51 
52   auto& output_tensor = context->tensors[outputs->data[0]];
53   int output_batch_size, output_height_size, output_width_size,
54       output_depth_size;
55   GetDims(&output_batch_size, &output_height_size, &output_width_size,
56           &output_depth_size, output_tensor.dims);
57 
58   float output_min = -1, output_max = -1;
59   ComputeMinAndMaxQuantValues(output_tensor, &output_min, &output_max);
60   auto* output_min_const = graph_builder_->AddConstNodeWithData(
61       kScalarShape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
62   auto* output_max_const = graph_builder_->AddConstNodeWithData(
63       kScalarShape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
64   // Min/max values for output tensor.
65   AddInput(TensorID(output_min_const->GetID(), 0));
66   AddInput(TensorID(output_max_const->GetID(), 0));
67 
68   // Add outputs
69   size_t output_element_size = 0;
70   TF_LITE_ENSURE_STATUS(
71       GetSizeOfType(context, output_tensor.type, &output_element_size));
72   auto mean_output = AddOutput(output_element_size, 4,
73                                {output_batch_size, output_height_size,
74                                 output_width_size, output_depth_size});
75   auto mean_out_min = AddOutput(output_element_size, 4, kScalarShape);
76   auto mean_out_max = AddOutput(output_element_size, 4, kScalarShape);
77   // Mean op doesn't honor the passed min/max for output, so we need
78   // to add requantize.
79   auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
80   requantize_op->SetOpType(OP_Requantize_8to8);
81   requantize_op->AddInput(mean_output);
82   requantize_op->AddInput(mean_out_min);
83   requantize_op->AddInput(mean_out_max);
84   requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
85   requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
86   node_output_ =
87       requantize_op->AddOutput(sizeof(uint8_t), 4,
88                                {output_batch_size, output_height_size,
89                                 output_width_size, output_depth_size});
90   requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
91   requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
92 
93   return kTfLiteOk;
94 }
95 
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)96 TfLiteStatus ReduceOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
97                                               TfLiteContext* context) {
98   // Should be only 1 output.
99   graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
100                                   node_output_.second);
101 
102   return kTfLiteOk;
103 }
104 
~ReduceOpBuilder()105 ReduceOpBuilder::~ReduceOpBuilder() {}
106 
CreateReduceBuilder(GraphBuilder * graph_builder,int op_type)107 OpBuilder* CreateReduceBuilder(GraphBuilder* graph_builder, int op_type) {
108   return new ReduceOpBuilder(graph_builder, op_type);
109 }
110 
111 }  // namespace hexagon
112 }  // namespace delegates
113 }  // namespace tflite
114