1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/coreml/builders/op_builder.h"
16
17 #include <string>
18
19 #include "tensorflow/lite/builtin_ops.h"
20 #include "tensorflow/lite/c/builtin_op_data.h"
21 #include "tensorflow/lite/delegates/coreml/builders/op_factory.h"
22 #include "tensorflow/lite/kernels/kernel_util.h"
23
24 namespace tflite {
25 namespace delegates {
26 namespace coreml {
27
ToString() const28 std::string TensorID::ToString() const {
29 return std::to_string(node_) + "_" + std::to_string(output_id_);
30 }
31
NodeID() const32 int TensorID::NodeID() const { return node_; }
33
OutputID() const34 int TensorID::OutputID() const { return output_id_; }
35
AddBuilder(int builtin_code,const TfLiteNode * node)36 OpBuilder* GraphBuilder::AddBuilder(int builtin_code, const TfLiteNode* node) {
37 switch (builtin_code) {
38 case kTfLiteBuiltinAdd:
39 return AddBuilder(CreateAddOpBuilder, node);
40 case kTfLiteBuiltinAveragePool2d:
41 return AddBuilder(CreateAveragePool2dOpBuilder, node);
42 case kTfLiteBuiltinConcatenation:
43 return AddBuilder(CreateConcatenationOpBuilder, node);
44 case kTfLiteBuiltinConv2d:
45 return AddBuilder(CreateConvolutionOpBuilder, node);
46 case kTfLiteBuiltinDepthwiseConv2d:
47 return AddBuilder(CreateDepthwiseConvolutionOpBuilder, node);
48 // TODO(b/141490853): Add proper dequantize OpBuilder for int8/uint8 inputs.
49 case kTfLiteBuiltinDequantize:
50 // FP16 dequantize is claimed by the delegate to prevent them from running
51 // on CPU, but don't need to be excuted on the Core ML delegate either.
52 return AddBuilder(CreateDummyOpBuilder, node);
53 case kTfLiteBuiltinFullyConnected:
54 return AddBuilder(CreateFullyConnectedOpBuilder, node);
55 case kTfLiteBuiltinLogistic:
56 return AddBuilder(CreateLogisticOpBuilder, node);
57 case kTfLiteBuiltinMaxPool2d:
58 return AddBuilder(CreateMaxPool2dOpBuilder, node);
59 case kTfLiteBuiltinMean:
60 return AddBuilder(CreateMeanOpBuilder, node);
61 case kTfLiteBuiltinMirrorPad:
62 return AddBuilder(CreateMirrorPadOpBuilder, node);
63 case kTfLiteBuiltinMul:
64 return AddBuilder(CreateMulOpBuilder, node);
65 case kTfLiteBuiltinPad:
66 case kTfLiteBuiltinPadv2:
67 return AddBuilder(CreatePadOpBuilder, node);
68 case kTfLiteBuiltinRelu:
69 return AddBuilder(CreateReluOpBuilder, node);
70 case kTfLiteBuiltinReluN1To1:
71 return AddBuilder(CreateReluN1To1OpBuilder, node);
72 case kTfLiteBuiltinRelu6:
73 return AddBuilder(CreateRelu6OpBuilder, node);
74 case kTfLiteBuiltinReshape:
75 return AddBuilder(CreateReshapeOpBuilder, node);
76 case kTfLiteBuiltinResizeBilinear:
77 return AddBuilder(CreateResizeBilinearOpBuilder, node);
78 case kTfLiteBuiltinSoftmax:
79 return AddBuilder(CreateSoftmaxOpBuilder, node);
80 case kTfLiteBuiltinTanh:
81 return AddBuilder(CreateTanhOpBuilder, node);
82 case kTfLiteBuiltinTransposeConv:
83 return AddBuilder(CreateTransposeConvolutionOpBuilder, node);
84 case kTfLiteBuiltinHardSwish:
85 return AddBuilder(CreateHardSwishOpBuilder, node);
86 default:
87 return nullptr;
88 }
89 }
90
AddBuilder(const std::function<OpBuilder * (GraphBuilder *)> & builder,const TfLiteNode * node)91 OpBuilder* GraphBuilder::AddBuilder(
92 const std::function<OpBuilder*(GraphBuilder*)>& builder,
93 const TfLiteNode* node) {
94 if (builder == nullptr) {
95 fprintf(stderr, "builder should be set.\n");
96 return nullptr;
97 }
98 OpBuilder* op = builder(this);
99
100 builders_.emplace_back(op);
101 op->SetNodeID(builders_.size());
102 if (node != nullptr) {
103 op->SetBuiltinData(node->builtin_data);
104 op->SetTfLiteNode(node);
105 }
106 return builders_.back().get();
107 }
108
BuildModel()109 CoreML::Specification::Model* GraphBuilder::BuildModel() {
110 CoreML::Specification::Model* model = new CoreML::Specification::Model();
111 if (coreml_version_ == 2) { // Core ML 2, iOS >= 12.0
112 model->set_specificationversion(3);
113 } else if (coreml_version_ == 3) { // Core ML 3, iOS >= 13.0
114 model->set_specificationversion(4);
115 model->mutable_neuralnetwork()->set_arrayinputshapemapping(
116 CoreML::Specification::EXACT_ARRAY_MAPPING);
117 } else {
118 fprintf(stderr, "Unsupported Core ML version: %d\n", coreml_version_);
119 delete model;
120 return nullptr;
121 }
122 auto* neural_network = model->mutable_neuralnetwork();
123 for (auto& builder : builders_) {
124 CoreML::Specification::NeuralNetworkLayer* layer = builder->Build();
125 if (layer == nullptr) {
126 fprintf(stderr, "Null layer returned from builder: %s\n",
127 builder->DebugName().c_str());
128 continue;
129 }
130 neural_network->mutable_layers()->AddAllocated(layer);
131 }
132 return model;
133 }
134
AddTensorWithID(int tf_tensor_id,const TensorID & tensor_id)135 void GraphBuilder::AddTensorWithID(int tf_tensor_id,
136 const TensorID& tensor_id) {
137 if (tensors_.size() <= tf_tensor_id) {
138 tensors_.resize(tf_tensor_id + 1);
139 used_tensor_.resize(tf_tensor_id + 1);
140 }
141 tensors_[tf_tensor_id] = tensor_id;
142 }
143
GetTensorName(int tensor_id)144 std::string GraphBuilder::GetTensorName(int tensor_id) {
145 return GetTensorID(tensor_id).ToString();
146 }
147
GetTensorID(int tensor_id)148 const TensorID GraphBuilder::GetTensorID(int tensor_id) {
149 if (!HasTensor(tensor_id)) {
150 // TODO(karimnosseir): Double check if this happened, if we are
151 // adding in execution order it shouldn't happen.
152 fprintf(stderr, "index out of range...!!! Requested index %d , size %d\n",
153 tensor_id, static_cast<int>(tensors_.size()));
154 // Return invalid ID.
155 return TensorID(-1, -1);
156 }
157 used_tensor_[tensor_id] = true;
158 return tensors_[tensor_id];
159 }
160
HasTensor(int tflite_tensor_index)161 bool GraphBuilder::HasTensor(int tflite_tensor_index) {
162 if (tensors_.size() <= tflite_tensor_index) {
163 return false;
164 }
165 return tensors_[tflite_tensor_index].NodeID() != -1;
166 }
167
IsTensorUsed(int tflite_tensor_index)168 bool GraphBuilder::IsTensorUsed(int tflite_tensor_index) {
169 if (!HasTensor(tflite_tensor_index)) return false;
170 return used_tensor_[tflite_tensor_index];
171 }
172
Build()173 CoreML::Specification::NeuralNetworkLayer* OpBuilder::Build() {
174 layer_->set_name(DebugName());
175 return layer_.release();
176 }
177
PopulateSubgraph(TfLiteContext * context)178 TfLiteStatus OpBuilder::PopulateSubgraph(TfLiteContext* context) {
179 builder_output_ = AddOutput();
180 return kTfLiteOk;
181 }
182
SetBuiltinData(void * builtin_data)183 void OpBuilder::SetBuiltinData(void* builtin_data) {
184 builtin_data_ = builtin_data;
185 }
186
SetNodeID(int id)187 void OpBuilder::SetNodeID(int id) { node_id_ = id; }
188
SetTfLiteNode(const TfLiteNode * node)189 void OpBuilder::SetTfLiteNode(const TfLiteNode* node) { tflite_node_ = node; }
190
GetID() const191 int OpBuilder::GetID() const { return node_id_; }
192
GetOutput(TfLiteContext * context)193 TensorID OpBuilder::GetOutput(TfLiteContext* context) {
194 if (builder_output_.NodeID() != -1) {
195 return builder_output_;
196 }
197 // builder_output_ is not set when PopulateSubgraph is not called.
198 builder_output_ = AddOutput();
199 return builder_output_;
200 }
201
AddInput(const std::string & input_name)202 void OpBuilder::AddInput(const std::string& input_name) {
203 if (layer_ == nullptr) {
204 layer_.reset(new CoreML::Specification::NeuralNetworkLayer);
205 }
206 *layer_->mutable_input()->Add() = input_name;
207 }
208
AddInput(const TensorID & input_id)209 void OpBuilder::AddInput(const TensorID& input_id) {
210 AddInput(input_id.ToString());
211 }
212
AddInput(int tf_input_id)213 void OpBuilder::AddInput(int tf_input_id) {
214 AddInput(graph_builder_->GetTensorName(tf_input_id));
215 }
216
AddOutput()217 TensorID OpBuilder::AddOutput() {
218 auto tensor_id = TensorID(GetID(), num_outputs_++);
219 *layer_->mutable_output()->Add() = tensor_id.ToString();
220 return tensor_id;
221 }
222
SetDebugName(const char * name,int id)223 void OpBuilder::SetDebugName(const char* name, int id) {
224 debug_name_ = std::string(name) + "_" + std::to_string(id);
225 }
226
227 } // namespace coreml
228 } // namespace delegates
229 } // namespace tflite
230