• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.h"
17 
18 #include <ostream>
19 #include <utility>
20 
21 #include "llvm/ADT/None.h"
22 #include "llvm/Support/ToolOutputFile.h"
23 #include "mlir/IR/BuiltinOps.h"  // from @llvm-project
24 #include "mlir/IR/MLIRContext.h"  // from @llvm-project
25 #include "mlir/Pass/Pass.h"  // from @llvm-project
26 #include "mlir/Support/FileUtilities.h"  // from @llvm-project
27 #include "mlir/Transforms/ViewOpGraph.h"  // from @llvm-project
28 #include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
29 #include "tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h"
30 #include "tensorflow/compiler/mlir/lite/tf_tfl_passes.h"
31 #include "tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h"
32 #include "tensorflow/compiler/mlir/lite/transforms/passes.h"
33 #include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
34 #include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
35 #include "tensorflow/core/framework/graph.pb.h"
36 #include "tensorflow/core/framework/types.pb.h"
37 #include "tensorflow/core/lib/core/errors.h"
38 #include "tensorflow/core/platform/status.h"
39 #include "tensorflow/core/protobuf/graph_debug_info.pb.h"
40 #include "tensorflow/lite/toco/model_flags.pb.h"
41 #include "tensorflow/lite/toco/toco_flags.pb.h"
42 #include "tensorflow/lite/toco/types.pb.h"
43 #include "tensorflow/stream_executor/lib/statusor.h"
44 
45 namespace tensorflow {
ConvertGraphDefToTFLiteFlatBuffer(const toco::ModelFlags & model_flags,const toco::TocoFlags & toco_flags,const GraphDebugInfo & debug_info,const GraphDef & input,string * result)46 Status ConvertGraphDefToTFLiteFlatBuffer(const toco::ModelFlags& model_flags,
47                                          const toco::TocoFlags& toco_flags,
48                                          const GraphDebugInfo& debug_info,
49                                          const GraphDef& input,
50                                          string* result) {
51   mlir::MLIRContext context;
52   GraphImportConfig specs;
53   mlir::TFL::QuantizationSpecs quant_specs;
54 
55   // Parse input arrays.
56   std::vector<string> node_names;
57   std::vector<string> node_dtypes;
58   std::vector<llvm::Optional<std::vector<int>>> node_shapes;
59   std::vector<llvm::Optional<double>> node_mins;
60   std::vector<llvm::Optional<double>> node_maxs;
61 
62   // Populate quantization specs.
63   TF_RETURN_IF_ERROR(internal::PopulateQuantizationSpecs(
64       model_flags, toco_flags, &quant_specs, &node_names, &node_dtypes,
65       &node_shapes, &node_mins, &node_maxs));
66 
67   TF_RETURN_IF_ERROR(tensorflow::ParseInputArrayInfo(
68       node_names, node_dtypes, node_shapes, &specs.inputs));
69 
70   // Parse output arrays.
71   std::vector<string> output_arrays(model_flags.output_arrays().begin(),
72                                     model_flags.output_arrays().end());
73   TF_RETURN_IF_ERROR(
74       tensorflow::ParseOutputArrayInfo(output_arrays, &specs.outputs));
75 
76   specs.prune_unused_nodes = true;
77   specs.convert_legacy_fed_inputs = true;
78   specs.graph_as_function = false;
79   specs.upgrade_legacy = true;
80   internal::WarningUnusedFlags(model_flags, toco_flags);
81 
82   // Register all custom ops, including user-specified custom ops.
83   TF_RETURN_IF_ERROR(internal::RegisterAllCustomOps(toco_flags));
84 
85   TF_ASSIGN_OR_RETURN(
86       auto module, ConvertGraphdefToMlir(input, debug_info, specs, &context));
87 
88   mlir::TFL::PassConfig pass_config(quant_specs);
89   bool emit_builtin_tflite_ops = !toco_flags.force_select_tf_ops();
90   pass_config.emit_builtin_tflite_ops = emit_builtin_tflite_ops;
91   pass_config.lower_tensor_list_ops = true;
92   // Disable the unfolding of the 16x16 TF::BatchMatMulOp to avoid the
93   // conversion to an unsupported 16x16 TFL::FullyConnectedOp.
94   if (toco_flags.inference_type() == toco::IODataType::QUANTIZED_INT16) {
95     pass_config.unfold_batch_matmul = false;
96   }
97 
98   return internal::ConvertMLIRToTFLiteFlatBuffer(
99       toco_flags, std::move(module), pass_config, /*saved_model_tags=*/{},
100       result,
101       /*session=*/llvm::None);
102 }
103 
104 }  // namespace tensorflow
105