• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/contrib/lite/toco/tflite/export.h"
16 
17 #include "flatbuffers/flexbuffers.h"
18 #include "absl/strings/str_join.h"
19 #include "tensorflow/contrib/lite/schema/schema_generated.h"
20 #include "tensorflow/contrib/lite/toco/tflite/operator.h"
21 #include "tensorflow/contrib/lite/toco/tflite/types.h"
22 #include "tensorflow/contrib/lite/toco/tooling_util.h"
23 #include "tensorflow/contrib/lite/version.h"
24 
25 namespace toco {
26 
27 namespace tflite {
28 
29 using flatbuffers::FlatBufferBuilder;
30 using flatbuffers::Offset;
31 using flatbuffers::Vector;
32 using ::tflite::Buffer;
33 using ::tflite::BuiltinOperator;
34 using ::tflite::BuiltinOperator_CUSTOM;
35 using ::tflite::BuiltinOperator_MAX;
36 using ::tflite::BuiltinOperator_MIN;
37 using ::tflite::CreateBuffer;
38 using ::tflite::CreateModel;
39 using ::tflite::CreateOperator;
40 using ::tflite::CreateTensor;
41 using ::tflite::Operator;
42 using ::tflite::OperatorCode;
43 using ::tflite::SubGraph;
44 using ::tflite::Tensor;
45 
46 namespace {
47 
GetOperatorKey(const::toco::Operator & op)48 details::OperatorKey GetOperatorKey(const ::toco::Operator& op) {
49   string custom_code;
50   if (op.type == OperatorType::kTensorFlowUnsupported) {
51     const TensorFlowUnsupportedOperator& unsupported_op =
52         static_cast<const TensorFlowUnsupportedOperator&>(op);
53     custom_code = unsupported_op.tensorflow_op;
54   }
55   return details::OperatorKey(op.type, custom_code);
56 }
57 
58 }  // Anonymous namespace.
59 
60 namespace details {
61 
LoadTensorsMap(const Model & model,TensorsMap * tensors_map)62 void LoadTensorsMap(const Model& model, TensorsMap* tensors_map) {
63   // First find a list of unique array names.
64   std::set<string> names;
65   for (const auto& array_pair : model.GetArrayMap()) {
66     names.insert(array_pair.first);
67   }
68 
69   // Now assign indices to them and fill in the map.
70   int index = 0;
71   for (const auto& name : names) {
72     (*tensors_map)[name] = index;
73     ++index;
74   }
75 }
76 
LoadOperatorsMap(const Model & model,OperatorsMap * operators_map)77 void LoadOperatorsMap(const Model& model, OperatorsMap* operators_map) {
78   // First find a list of unique operator types.
79   std::set<OperatorKey> keys;
80   for (const auto& op : model.operators) {
81     keys.insert(GetOperatorKey(*op));
82   }
83   // Now assign indices to them and fill in the map.
84   int index = 0;
85   for (const auto& key : keys) {
86     (*operators_map)[key] = index;
87     ++index;
88   }
89 }
90 }  // namespace details
91 
ExportTensors(const Model & model,const details::TensorsMap & tensors_map,FlatBufferBuilder * builder,std::vector<const Array * > * buffers_to_write)92 Offset<Vector<Offset<Tensor>>> ExportTensors(
93     const Model& model, const details::TensorsMap& tensors_map,
94     FlatBufferBuilder* builder, std::vector<const Array*>* buffers_to_write) {
95   // In the end we will need to produce a vector sorted by the indices of the
96   // tensors in the tensors_map.
97   std::map<int, Offset<Tensor>> ordered_tensors;
98 
99   for (const auto& array_pair : model.GetArrayMap()) {
100     const string& tensor_name = array_pair.first;
101     const toco::Array& array = *array_pair.second;
102 
103     int buffer_index = buffers_to_write->size();
104     auto type = DataType::Serialize(array.data_type);
105     buffers_to_write->push_back(&array);
106 
107     std::vector<int> shape;
108     if (array.has_shape()) {
109       for (int d : array.shape().dims()) {
110         shape.push_back(d);
111       }
112     }
113 
114     Offset<Vector<float>> min;
115     Offset<Vector<float>> max;
116     Offset<Vector<float>> scale;
117     Offset<Vector<int64_t>> zero_point;
118     if (array.minmax) {
119       min = builder->CreateVector(
120           std::vector<float>{static_cast<float>(array.minmax->min)});
121       max = builder->CreateVector(
122           std::vector<float>{static_cast<float>(array.minmax->max)});
123     }
124     if (array.quantization_params) {
125       scale = builder->CreateVector(std::vector<float>{
126           static_cast<float>(array.quantization_params->scale)});
127       zero_point = builder->CreateVector(
128           std::vector<int64_t>{array.quantization_params->zero_point});
129     }
130     auto q_param = ::tflite::CreateQuantizationParameters(*builder, min, max,
131                                                           scale, zero_point);
132 
133     int index = tensors_map.at(tensor_name);
134     ordered_tensors[index] =
135         CreateTensor(*builder, builder->CreateVector(shape), type, buffer_index,
136                      builder->CreateString(tensor_name), q_param);
137   }
138 
139   std::vector<Offset<Tensor>> tensor_vector;
140   tensor_vector.reserve(ordered_tensors.size());
141   for (const auto& tensor : ordered_tensors) {
142     tensor_vector.push_back(tensor.second);
143   }
144 
145   return builder->CreateVector(tensor_vector);
146 }
147 
ExportInputTensors(const Model & model,const details::TensorsMap & tensors_map,FlatBufferBuilder * builder)148 Offset<Vector<int32_t>> ExportInputTensors(
149     const Model& model, const details::TensorsMap& tensors_map,
150     FlatBufferBuilder* builder) {
151   std::vector<int32_t> inputs;
152   for (const auto& input : model.flags.input_arrays()) {
153     inputs.push_back(tensors_map.at(input.name()));
154   }
155   return builder->CreateVector<int32_t>(inputs);
156 }
157 
ExportOutputTensors(const Model & model,const details::TensorsMap & tensors_map,FlatBufferBuilder * builder)158 Offset<Vector<int32_t>> ExportOutputTensors(
159     const Model& model, const details::TensorsMap& tensors_map,
160     FlatBufferBuilder* builder) {
161   std::vector<int32_t> outputs;
162   for (const string& output : model.flags.output_arrays()) {
163     outputs.push_back(tensors_map.at(output));
164   }
165   return builder->CreateVector<int32_t>(outputs);
166 }
167 
ExportOperatorCodes(const Model & model,const std::map<OperatorType,std::unique_ptr<BaseOperator>> & ops_by_type,const details::OperatorsMap & operators_map,FlatBufferBuilder * builder,std::set<string> * error_summary)168 Offset<Vector<Offset<OperatorCode>>> ExportOperatorCodes(
169     const Model& model,
170     const std::map<OperatorType, std::unique_ptr<BaseOperator>>& ops_by_type,
171     const details::OperatorsMap& operators_map, FlatBufferBuilder* builder,
172     std::set<string>* error_summary) {
173   // Map from operator name to TF Lite enum value, for all builtins.
174   std::map<string, BuiltinOperator> builtin_ops;
175   for (int i = BuiltinOperator_MIN; i <= BuiltinOperator_MAX; ++i) {
176     BuiltinOperator op = static_cast<BuiltinOperator>(i);
177     string name = EnumNameBuiltinOperator(op);
178     if (op != BuiltinOperator_CUSTOM && !name.empty()) {
179       builtin_ops[name] = op;
180     }
181   }
182 
183   // We will need to produce a vector of codes in the same order as they
184   // appear in the operators_map.
185   std::map<int, Offset<OperatorCode>> ordered_opcodes;
186 
187   for (const auto& op : model.operators) {
188     const details::OperatorKey operator_key = GetOperatorKey(*op);
189     int op_index = operators_map.at(operator_key);
190 
191     string name = HelpfulOperatorTypeName(*op);
192     bool is_builtin = false;
193     if (ops_by_type.count(op->type) != 0) {
194       name = ops_by_type.at(op->type)->name();
195       is_builtin = (builtin_ops.count(name) > 0);
196     }
197 
198     if (is_builtin) {
199       ordered_opcodes[op_index] =
200           CreateOperatorCode(*builder, builtin_ops[name], 0);
201     } else {
202       // This could be a kTensorFlowUnsupported, in which case we should be
203       // able to retrieve the original Tensorflow name from the OperatorKey, or
204       // this could be a proper TOCO operator that is completely unknown to TF
205       // Lite.
206       if (!operator_key.custom_code.empty()) {
207         name = operator_key.custom_code;
208       }
209       // Either way, this is an operator that is not supported by TF Lite,
210       // so we output it as a custom op and add it to the error summary.
211       if (error_summary) {
212         error_summary->insert(name);
213       }
214       ordered_opcodes[op_index] = CreateOperatorCode(
215           *builder, BuiltinOperator_CUSTOM, builder->CreateString(name));
216     }
217   }
218 
219   std::vector<Offset<OperatorCode>> opcode_vector;
220   opcode_vector.reserve(ordered_opcodes.size());
221   for (const auto& opcode : ordered_opcodes) {
222     opcode_vector.push_back(opcode.second);
223   }
224 
225   return builder->CreateVector(opcode_vector);
226 }
227 
ExportOperators(const Model & model,const std::map<OperatorType,std::unique_ptr<BaseOperator>> & ops_by_type,const details::OperatorsMap & operators_map,const details::TensorsMap & tensors_map,FlatBufferBuilder * builder)228 Offset<Vector<Offset<Operator>>> ExportOperators(
229     const Model& model,
230     const std::map<OperatorType, std::unique_ptr<BaseOperator>>& ops_by_type,
231     const details::OperatorsMap& operators_map,
232     const details::TensorsMap& tensors_map, FlatBufferBuilder* builder) {
233   // The operators are in execution order, so we just follow tf.mini order.
234   std::vector<Offset<Operator>> op_vector;
235   for (const auto& op : model.operators) {
236     std::vector<int32_t> inputs;
237     for (const string& input : op->inputs) {
238       // -1 is the ID for optional tensor in TFLite output
239       int id = model.IsOptionalArray(input) ? -1 : tensors_map.at(input);
240       inputs.push_back(id);
241     }
242     std::vector<int32_t> outputs;
243     for (const string& output : op->outputs) {
244       outputs.push_back(tensors_map.at(output));
245     }
246 
247     int op_index = operators_map.at(GetOperatorKey(*op));
248 
249     // This is a custom op unless we can find it in ops_by_type, and even then
250     // it could be a custom op (such as kTensorFlowUnsupported).
251 
252     auto options = Options::Custom(0);
253     if (ops_by_type.count(op->type) != 0) {
254       options = ops_by_type.at(op->type)->Serialize(*op, builder);
255     }
256     // The only supported CustomOptionFormat is FLEXBUFFERS now.
257     op_vector.push_back(CreateOperator(
258         *builder, op_index, builder->CreateVector(inputs),
259         builder->CreateVector(outputs), options.type, options.builtin,
260         options.custom, ::tflite::CustomOptionsFormat_FLEXBUFFERS));
261   }
262 
263   return builder->CreateVector(op_vector);
264 }
265 
ExportBuffers(const Model & model,const std::vector<const Array * > & buffers_to_write,FlatBufferBuilder * builder)266 Offset<Vector<Offset<Buffer>>> ExportBuffers(
267     const Model& model, const std::vector<const Array*>& buffers_to_write,
268     FlatBufferBuilder* builder) {
269   std::vector<Offset<Buffer>> buffer_vector;
270   size_t index = 0;
271   for (const Array* array_ptr : buffers_to_write) {
272     const Array& array = *array_ptr;
273     Offset<Vector<uint8_t>> data_buffer = DataBuffer::Serialize(array, builder);
274     buffer_vector.push_back(CreateBuffer(*builder, data_buffer));
275     index++;
276   }
277   return builder->CreateVector(buffer_vector);
278 }
279 
Export(const Model & model,bool allow_custom_ops,string * output_file_contents)280 void Export(const Model& model, bool allow_custom_ops,
281             string* output_file_contents) {
282   flatbuffers::FlatBufferBuilder builder(/*initial_size=*/10240);
283 
284   const auto ops_by_type = BuildOperatorByTypeMap();
285 
286   details::TensorsMap tensors_map;
287   details::LoadTensorsMap(model, &tensors_map);
288 
289   details::OperatorsMap operators_map;
290   details::LoadOperatorsMap(model, &operators_map);
291 
292   std::vector<const Array*> buffers_to_write;
293   Array empty_array;
294   buffers_to_write.push_back(&empty_array);
295 
296   auto tensors = ExportTensors(model, tensors_map, &builder, &buffers_to_write);
297   auto inputs = ExportInputTensors(model, tensors_map, &builder);
298   auto outputs = ExportOutputTensors(model, tensors_map, &builder);
299 
300   std::set<string> error_summary;
301   auto op_codes = ExportOperatorCodes(model, ops_by_type, operators_map,
302                                       &builder, &error_summary);
303   if (!allow_custom_ops && !error_summary.empty()) {
304     LOG(QFATAL) << "Some of the operators in the model are not supported by "
305                    "the standard TensorFlow Lite runtime. If you have a custom "
306                    "implementation for them you can disable this error with "
307                    "--allow_custom_ops. Here is a list of operators for which "
308                    "you will need custom implementations: "
309                 << absl::StrJoin(error_summary, ", ") << ".";
310   }
311 
312   auto ops =
313       ExportOperators(model, ops_by_type, operators_map, tensors_map, &builder);
314 
315   // TODO(aselle): add support to toco for multiple subgraphs.
316   auto subgraph = CreateSubGraph(builder, tensors, inputs, outputs, ops);
317   std::vector<flatbuffers::Offset<SubGraph>> subgraphs = {subgraph};
318 
319   auto buffers = ExportBuffers(model, buffers_to_write, &builder);
320   auto description = builder.CreateString("TOCO Converted.");
321   auto new_model_location =
322       CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
323                   builder.CreateVector(subgraphs), description, buffers);
324   ::tflite::FinishModelBuffer(builder, new_model_location);
325   const uint8_t* buffer = builder.GetBufferPointer();
326   int size = builder.GetSize();
327   *output_file_contents = string(reinterpret_cast<const char*>(buffer), size);
328 }
329 
330 }  // namespace tflite
331 
332 }  // namespace toco
333