• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_
17 #define TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_
18 
19 #include <unordered_set>
20 
21 #include "absl/types/span.h"
22 #include "llvm/Support/SourceMgr.h"
23 #include "mlir/IR/BuiltinOps.h"  // from @llvm-project
24 #include "mlir/IR/MLIRContext.h"  // from @llvm-project
25 #include "mlir/Pass/PassManager.h"  // from @llvm-project
26 #include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
27 #include "tensorflow/compiler/mlir/lite/quantization/quantization_config.h"
28 #include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
29 #include "tensorflow/stream_executor/lib/statusor.h"
30 
31 namespace tensorflow {
32 
33 // Load a TF model from a GraphDef definition or a TF control flow dialect MLIR
34 // source into a MLIR module. If `input_mlir` is true, load from a MLIR source
35 // file; otherwise, load from a GraphDef.
36 // Setting prune_unused_nodes to true, would prune unreachable nodes if
37 // output_arrays is specified.
38 stream_executor::port::StatusOr<mlir::OwningModuleRef>
39 LoadFromGraphdefOrMlirSource(
40     const std::string& input_filename, bool input_mlir,
41     bool use_splatted_constant, const std::vector<std::string>& extra_tf_opdefs,
42     const GraphImportConfig& specs, absl::string_view debug_info_file,
43     absl::string_view input_arrays, absl::string_view input_dtypes,
44     absl::string_view input_shapes, absl::string_view output_arrays,
45     llvm::SourceMgr* source_mgr, mlir::MLIRContext* context);
46 
47 // Load Saved model (either v1 or v2) into MLIR.
48 stream_executor::port::StatusOr<mlir::OwningModuleRef> ImportSavedModel(
49     const std::string& input_filename, const int saved_model_version,
50     const std::unordered_set<std::string>& tags,
51     absl::Span<const std::string> extra_tf_opdefs,
52     absl::Span<std::string> exported_names, const GraphImportConfig& specs,
53     mlir::MLIRContext* context);
54 
55 // Taking a MLIR module in TF executor dialect and a set of parameters,
56 // applies a set of passes to convert the module to TF Lite dialect and
57 // serializes the result to a string. Depending on an attribute in the module
58 // main function, full integer quantization is applied.
59 // If `quantizated_buffer_type` is provided as INT8 or FLOAT16, the
60 // corresponding weight quantization will take place.
61 // If `export_to_mlir` is true, the
62 // result is exported in MLIR text format, otherwise exported in flat buffer.
63 Status ConvertTFExecutorToTFLOrFlatbuffer(
64     mlir::ModuleOp module, bool export_to_mlir, bool emit_builtin_tflite_ops,
65     bool emit_select_tf_ops, bool emit_custom_ops,
66     const std::unordered_set<std::string>& select_user_tf_ops,
67     const mlir::TFL::QuantizationSpecs& quant_specs,
68     const std::unordered_set<std::string>& saved_model_tags,
69     std::string* result, mlir::PassManager* pass_manager);
70 }  // namespace tensorflow
71 
72 #endif  // TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_
73