1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_ 17 #define TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_ 18 19 #include <unordered_set> 20 21 #include "absl/types/span.h" 22 #include "llvm/Support/SourceMgr.h" 23 #include "mlir/IR/BuiltinOps.h" // from @llvm-project 24 #include "mlir/IR/MLIRContext.h" // from @llvm-project 25 #include "mlir/Pass/PassManager.h" // from @llvm-project 26 #include "tensorflow/cc/saved_model/loader.h" 27 #include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h" 28 #include "tensorflow/compiler/mlir/lite/quantization/quantization_config.h" 29 #include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h" 30 #include "tensorflow/stream_executor/lib/statusor.h" 31 32 namespace tensorflow { 33 34 // Load a TF model from a GraphDef definition or a TF control flow dialect MLIR 35 // source into a MLIR module. If `input_mlir` is true, load from a MLIR source 36 // file; otherwise, load from a GraphDef. 37 // Setting prune_unused_nodes to true, would prune unreachable nodes if 38 // output_arrays is specified. 39 stream_executor::port::StatusOr<mlir::OwningModuleRef> 40 LoadFromGraphdefOrMlirSource( 41 const std::string& input_filename, bool input_mlir, 42 bool use_splatted_constant, const std::vector<std::string>& extra_tf_opdefs, 43 const GraphImportConfig& specs, absl::string_view debug_info_file, 44 absl::string_view input_arrays, absl::string_view input_dtypes, 45 absl::string_view input_shapes, absl::string_view output_arrays, 46 absl::string_view control_output_arrays, llvm::SourceMgr* source_mgr, 47 mlir::MLIRContext* context); 48 49 // Load Saved model (either v1 or v2) into MLIR. 50 // 'saved_model_bundle' will be initialized if V1 model was loaded. 51 stream_executor::port::StatusOr<mlir::OwningModuleRef> ImportSavedModel( 52 const std::string& input_filename, const int saved_model_version, 53 const std::unordered_set<std::string>& tags, 54 absl::Span<const std::string> extra_tf_opdefs, 55 absl::Span<std::string> exported_names, const GraphImportConfig& specs, 56 bool enable_variable_lifting, mlir::MLIRContext* context, 57 std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle); 58 59 // Taking a MLIR module in TF executor dialect and a set of parameters, 60 // applies a set of passes to convert the module to TF Lite dialect and 61 // serializes the result to a string. Depending on an attribute in the module 62 // main function, full integer quantization is applied. 63 // If `quantizated_buffer_type` is provided as INT8 or FLOAT16, the 64 // corresponding weight quantization will take place. 65 // If `export_to_mlir` is true, the 66 // result is exported in MLIR text format, otherwise exported in flat buffer. 67 Status ConvertTFExecutorToTFLOrFlatbuffer( 68 mlir::ModuleOp module, bool export_to_mlir, bool emit_builtin_tflite_ops, 69 bool emit_select_tf_ops, bool emit_custom_ops, bool allow_all_select_tf_ops, 70 const std::unordered_set<std::string>& select_user_tf_ops, 71 const mlir::TFL::QuantizationSpecs& quant_specs, 72 const std::unordered_set<std::string>& saved_model_tags, 73 std::string* result, mlir::PassManager* pass_manager); 74 } // namespace tensorflow 75 76 #endif // TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_ 77