1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project
16 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
17 #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
18 #include "tensorflow/compiler/mlir/tfrt/transforms/passes.h"
19
20 namespace tensorflow {
21 namespace tfrt_compiler {
22 namespace {
23
24 // Fold tf.DeviceIndex to tf.Const if it has device assigned.
25 class FoldDeviceIndex : public mlir::OpRewritePattern<mlir::TF::DeviceIndexOp> {
26 public:
27 using mlir::OpRewritePattern<mlir::TF::DeviceIndexOp>::OpRewritePattern;
28
matchAndRewrite(mlir::TF::DeviceIndexOp op,mlir::PatternRewriter & rewriter) const29 mlir::LogicalResult matchAndRewrite(
30 mlir::TF::DeviceIndexOp op,
31 mlir::PatternRewriter& rewriter) const override {
32 auto device = op->getAttrOfType<mlir::StringAttr>("device");
33 if (!device) return mlir::failure();
34
35 DeviceNameUtils::ParsedName parsed_name;
36 if (!DeviceNameUtils::ParseFullName(device.getValue().str(),
37 &parsed_name) ||
38 !parsed_name.has_type)
39 return mlir::failure();
40
41 int32_t i = 0;
42 mlir::ArrayAttr device_names = op.device_names();
43 for (; i < device_names.size(); ++i) {
44 auto device_name = device_names[i].cast<mlir::StringAttr>().getValue();
45 if (device_name == parsed_name.type) break;
46 }
47
48 rewriter.replaceOpWithNewOp<mlir::TF::ConstOp>(
49 op,
50 mlir::DenseIntElementsAttr::get(
51 mlir::RankedTensorType::get(/*shape=*/{}, rewriter.getI32Type()),
52 i));
53
54 return mlir::success();
55 }
56 };
57
58 // Optimization pass for TFRT-specific rewrite patterns.
59 class OptimizeTfForTfrt
60 : public mlir::PassWrapper<OptimizeTfForTfrt,
61 mlir::OperationPass<mlir::func::FuncOp>> {
62 public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OptimizeTfForTfrt)63 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OptimizeTfForTfrt)
64
65 llvm::StringRef getArgument() const final { return "optimize-tf-for-tfrt"; }
66
getDescription() const67 llvm::StringRef getDescription() const final {
68 return "optmize TF MLIR for TFRT workflow.";
69 }
70
initialize(mlir::MLIRContext * context)71 mlir::LogicalResult initialize(mlir::MLIRContext* context) override {
72 mlir::RewritePatternSet pattern_list(context);
73 pattern_list.add<FoldDeviceIndex>(context);
74 patterns_ = std::move(pattern_list);
75 return mlir::success();
76 }
77
runOnOperation()78 void runOnOperation() override {
79 auto func = getOperation();
80 if (mlir::failed(mlir::applyPatternsAndFoldGreedily(func, patterns_)))
81 signalPassFailure();
82 }
83
84 private:
85 mlir::FrozenRewritePatternSet patterns_;
86 };
87
88 } // namespace
89
90 std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateOptimizeTfForTfrtPass()91 CreateOptimizeTfForTfrtPass() {
92 return std::make_unique<OptimizeTfForTfrt>();
93 }
94
95 static mlir::PassRegistration<OptimizeTfForTfrt> register_pass;
96
97 } // namespace tfrt_compiler
98 } // namespace tensorflow
99