1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_TPU_PASSES_H_
17 #define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_TPU_PASSES_H_
18
19 // This file contains stub implementations for Google internal TPU APIs.
20
21 #include "mlir/IR/BuiltinOps.h"
22 #include "mlir/IR/MLIRContext.h"
23 #include "mlir/Pass/Pass.h"
24 #include "mlir/Support/LogicalResult.h"
25 #include "mlir/Transforms/DialectConversion.h"
26 #include "mlir/Pass/PassOptions.h"
27
28 namespace tensorflow {
29
30 class CoreRTConverter;
31
32 namespace tfrt_compiler {
33
34 class FallbackConverter;
35
36 }
37
38 struct TfrtTpuCompileOptions
39 : mlir::PassPipelineOptions<TfrtTpuCompileOptions> {
40 Option<bool> move_resource_gather_to_host{
41 *this, "move-resource-gather-to-host",
42 llvm::cl::desc("Move resource gather ops to host"),
43 llvm::cl::init(false)};
44 Option<int64_t> gather_table_width_threshold_bytes{
45 *this, "gather-table-width-threshold-bytes",
46 llvm::cl::desc(
47 "The threshold to control whether a TPU resource gather op should be "
48 "moved to host. A negative values means all are moved."),
49 llvm::cl::init(-1)};
50 };
51
52 struct TfrtTpuExecuteOpConversionOptions {
53 bool use_core_selector = false;
54 bool use_bundled_transfer = false;
55 bool transfer_result_to_host = false;
56 bool use_tpu_host_allocator_for_inputs = false;
57 };
58
59 // Registers a set of dialects used in TFRT TPU lowering.
RegisterTPUDialects(mlir::DialectRegistry * registry)60 inline void RegisterTPUDialects(mlir::DialectRegistry *registry) {}
61
62 // Adds a target dialect and a set of rewrite patterns for TFRT TPU lowering.
AddTPUTargetDialectAndPatterns(mlir::ConversionTarget * target,mlir::RewritePatternSet * patterns,mlir::MLIRContext * context,CoreRTConverter * corert_converter,tfrt_compiler::FallbackConverter * fallback_converter,const TfrtTpuExecuteOpConversionOptions & tpu_exec_conv_opts,bool tpu_lower_to_fallback)63 inline void AddTPUTargetDialectAndPatterns(
64 mlir::ConversionTarget *target, mlir::RewritePatternSet *patterns,
65 mlir::MLIRContext *context, CoreRTConverter *corert_converter,
66 tfrt_compiler::FallbackConverter *fallback_converter,
67 const TfrtTpuExecuteOpConversionOptions &tpu_exec_conv_opts,
68 bool tpu_lower_to_fallback) {}
69
70 // Rewrites specific TF TPU ops to equivalent TF ops in a module.
RunTPUBackwardCompatConversion(mlir::ModuleOp module,const TfrtTpuCompileOptions & options)71 inline mlir::LogicalResult RunTPUBackwardCompatConversion(
72 mlir::ModuleOp module, const TfrtTpuCompileOptions &options) {
73 return mlir::failure();
74 }
75
76 } // namespace tensorflow
77
78 #endif // TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_TPU_PASSES_H_
79