• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/op_builder.h"
16 
17 namespace tflite {
18 namespace delegates {
19 namespace hexagon {
20 // Builder for SquaredDifference op by computing Mul(Sub(A,B), Sub(A,B))
21 class SquaredDifferenceOpBuilder : public OpBuilder {
22  public:
SquaredDifferenceOpBuilder(GraphBuilder * graph_builder,int op_type)23   explicit SquaredDifferenceOpBuilder(GraphBuilder* graph_builder, int op_type)
24       : OpBuilder(graph_builder, op_type) {}
25   TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
26                                 const TfLiteIntArray* outputs,
27                                 TfLiteContext* context) override;
28 
29   TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
30                                TfLiteContext* context) override;
31 
32  private:
33   TensorID node_output_;
34 };
35 
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)36 TfLiteStatus SquaredDifferenceOpBuilder::PopulateSubGraph(
37     const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
38     TfLiteContext* context) {
39   // We model Squared Diff as Mul(Sub(a,b), Sub(a,b))
40 
41   // Add first Sub op.
42   const int tensor_a_index = inputs->data[0];
43   const int tensor_b_index = inputs->data[1];
44   const auto& tensor_a = context->tensors[tensor_a_index];
45   const auto& tensor_b = context->tensors[tensor_b_index];
46   AddInput(graph_builder_->GetHexagonTensorId(tensor_a_index));
47   AddInput(graph_builder_->GetHexagonTensorId(tensor_b_index));
48   // Inputs min/max
49   TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_a));
50   TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_b));
51   // Output details.
52   float output_min = -1, output_max = -1;
53   TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
54       context->tensors[outputs->data[0]], &output_min, &output_max));
55   auto* output_min_const = graph_builder_->AddConstNodeWithData(
56       kScalarShape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
57   auto* output_max_const = graph_builder_->AddConstNodeWithData(
58       kScalarShape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
59   int output_batch_size, output_height_size, output_width_size,
60       output_depth_size;
61   GetDims(&output_batch_size, &output_height_size, &output_width_size,
62           &output_depth_size, context->tensors[outputs->data[0]].dims);
63 
64   auto sub_out = AddOutput(sizeof(uint8_t), 4,
65                            {output_batch_size, output_height_size,
66                             output_width_size, output_depth_size});
67   auto sub_min = AddOutput(sizeof(float), 4, kScalarShape);
68   auto sub_max = AddOutput(sizeof(float), 4, kScalarShape);
69 
70   // Add Mul
71   auto* mul_op = graph_builder_->AddNode(GetTFLiteNodeID());
72   mul_op->SetOpType(OP_QuantizedMul_8x8to8);
73   mul_op->AddInput(sub_out);
74   mul_op->AddInput(sub_out);
75   mul_op->AddInput(sub_min);
76   mul_op->AddInput(sub_max);
77   mul_op->AddInput(sub_min);
78   mul_op->AddInput(sub_max);
79   mul_op->AddInput(TensorID(output_min_const->GetID(), 0));
80   mul_op->AddInput(TensorID(output_max_const->GetID(), 0));
81   node_output_ = mul_op->AddOutput(sizeof(uint8_t), 4,
82                                    {output_batch_size, output_height_size,
83                                     output_width_size, output_depth_size});
84   mul_op->AddOutput(sizeof(float), 4, kScalarShape);
85   mul_op->AddOutput(sizeof(float), 4, kScalarShape);
86 
87   return kTfLiteOk;
88 }
89 
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)90 TfLiteStatus SquaredDifferenceOpBuilder::RegisterOutputs(
91     const TfLiteIntArray* outputs, TfLiteContext* context) {
92   graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
93                                   node_output_.second);
94   return kTfLiteOk;
95 }
96 
CreateSquaredDifferenceOpBuilder(GraphBuilder * graph_builder,int op_type)97 OpBuilder* CreateSquaredDifferenceOpBuilder(GraphBuilder* graph_builder,
98                                             int op_type) {
99   return new SquaredDifferenceOpBuilder(graph_builder, op_type);
100 }
101 
102 }  // namespace hexagon
103 }  // namespace delegates
104 }  // namespace tflite
105