1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/pad_builder.h"
16
17 #include <stdint.h>
18
19 #include <limits>
20
21 #include "tensorflow/lite/c/builtin_op_data.h"
22 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
23 #include "tensorflow/lite/kernels/kernel_util.h"
24
25 namespace tflite {
26 namespace delegates {
27 namespace hexagon {
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)28 TfLiteStatus PadOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
29 const TfLiteIntArray* outputs,
30 TfLiteContext* context) {
31 // Input data tensor.
32 int tensor_id = inputs->data[0];
33 const auto& input_tensor = context->tensors[tensor_id];
34 AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
35
36 // Min/max values for input tensor.
37 TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, input_tensor));
38
39 // Padding tensor.
40 tensor_id = inputs->data[1];
41 const auto& padding_tensor = context->tensors[tensor_id];
42 if (padding_tensor.allocation_type == kTfLiteMmapRo) {
43 // If the padding input is a constant, bake it into the Hexagon graph as a
44 // Const node.
45 auto* const_padding_node =
46 graph_builder_->AddConstNodeWithData(tensor_id, padding_tensor);
47 AddInput(TensorID(const_padding_node->GetID(), 0));
48 } else {
49 AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
50 }
51
52 // Hexagon outputs for this node.
53 int output_batch_size, output_height_size, output_width_size,
54 output_depth_size;
55 GetDims(&output_batch_size, &output_height_size, &output_width_size,
56 &output_depth_size, context->tensors[outputs->data[0]].dims);
57 node_output_ = AddOutput(sizeof(uint8_t), 4,
58 {output_batch_size, output_height_size,
59 output_width_size, output_depth_size});
60 AddOutput(sizeof(float), 4, kScalarShape);
61 AddOutput(sizeof(float), 4, kScalarShape);
62
63 return kTfLiteOk;
64 }
65
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)66 TfLiteStatus PadOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
67 TfLiteContext* context) {
68 // Should be only 1 output.
69 graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
70 node_output_.second);
71 return kTfLiteOk;
72 }
73
~PadOpBuilder()74 PadOpBuilder::~PadOpBuilder() {}
75
CreatePadBuilder(GraphBuilder * graph_builder,int op_type)76 OpBuilder* CreatePadBuilder(GraphBuilder* graph_builder, int op_type) {
77 return new PadOpBuilder(graph_builder, op_type);
78 }
79
80 } // namespace hexagon
81 } // namespace delegates
82 } // namespace tflite
83