• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_
17 #define TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_
18 
19 #include <functional>
20 
21 #include "tensorflow/compiler/mlir/mlir_bridge_rollout_policy.h"
22 #include "mlir/IR/BuiltinOps.h"  // from @llvm-project
23 #include "tensorflow/core/common_runtime/function_optimization_registry.h"
24 #include "tensorflow/core/common_runtime/optimization_registry.h"
25 
26 namespace tensorflow {
27 
28 // -------------------------------------------------------------------------- //
29 // MLIR passes running on Tensorflow function graphs (Tensorflow V2).
30 // -------------------------------------------------------------------------- //
31 
32 // Disabled - skip execution of the pass.
33 // Enabled - execute the pass, propagate errors to the caller if any.
34 // FallbackEnabled - execute the pass and commit all the changes to the MLIR
35 //   module in case of success. Do not commit any changes in case of failures,
36 //   let the rest of the pipeline run.
37 enum class MlirOptimizationPassState { Disabled, Enabled, FallbackEnabled };
38 
39 // An API for registering MLIR ModulePass with the Tensorflow runtime. These
40 // passes are running only for function graphs built by Tensorflow V2 and
41 // instantiated by the process_function_library_runtime (see
42 // FunctionOptimizationPass for details).
43 class MlirOptimizationPass {
44  public:
45   virtual ~MlirOptimizationPass() = default;
46   virtual llvm::StringRef name() const = 0;
47 
48   // Returns an enum value:
49   //   Enabled if the pass is enabled for the given graph with specified config.
50   //   Disabled if the pass is disabled.
51   //   FallbackEnabled if the pass needs to be executed in fallback mode.
52   //
53   // When the pass is FallbackEnabled, the pass is executed and the changes it
54   // makes to the MLIR module will be committed only if the pass was successful,
55   // otherwise no changes are committed and the rest of the pipeline is run.
56   //
57   // `device_set` can be nullptr if the devices information is not
58   // available or no device specific filtering is required.
59   // `function_library` contains function definitions for function calls in
60   // `graph` not included in the `graph` FunctionLibraryDefinition.
61   virtual MlirOptimizationPassState GetPassState(
62       const DeviceSet* device_set, const ConfigProto& config_proto,
63       const Graph& graph,
64       const FunctionLibraryDefinition& function_library) const = 0;
65 
66   virtual Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
67                      const Graph& graph,
68                      const FunctionLibraryDefinition& function_library) = 0;
69 };
70 
71 class MlirOptimizationPassRegistry {
72  public:
73   struct PassRegistration {
74     int priority;
75     std::unique_ptr<MlirOptimizationPass> pass;
76   };
77 
78   struct PriorityComparator {
operatorPriorityComparator79     bool operator()(const PassRegistration& x,
80                     const PassRegistration& y) const {
81       return x.priority < y.priority;
82     }
83   };
84 
85   using Passes = std::set<PassRegistration, PriorityComparator>;
86 
87   // Returns the global registry of MLIR optimization passes.
88   static MlirOptimizationPassRegistry& Global();
89 
90   // Register optimization `pass` with the given `priority`.
Add(int priority,std::unique_ptr<MlirOptimizationPass> pass)91   void Add(int priority, std::unique_ptr<MlirOptimizationPass> pass) {
92     auto inserted = passes_.insert({priority, std::move(pass)});
93     CHECK(inserted.second)
94         << "Pass priority must be unique. "
95         << "Previously registered pass with the same priority: "
96         << inserted.first->pass->name().str();
97   }
98 
99   // Free the memory allocated for all passes.
ClearPasses()100   void ClearPasses() { passes_.clear(); }
101 
passes()102   const Passes& passes() const { return passes_; }
103 
104  private:
105   Passes passes_;
106 };
107 
108 // Function optimization pass that runs all MLIR passes registered in
109 // MlirOptimizationPassRegistry.
110 class MlirFunctionOptimizationPass : public FunctionOptimizationPass {
111  public:
112   explicit MlirFunctionOptimizationPass(
113       const MlirOptimizationPassRegistry* registry =
114           &MlirOptimizationPassRegistry::Global())
registry_(registry)115       : registry_(registry) {}
116 
117   // Executes all of the underlying registered MlirOptimizationPasses.
118   Status Run(const DeviceSet& device_set, const ConfigProto& config_proto,
119              std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
120              std::vector<std::string>* control_ret_node_names,
121              bool* control_rets_updated) override;
122 
123  private:
124   const MlirOptimizationPassRegistry* registry_;
125 };
126 
127 // -------------------------------------------------------------------------- //
128 // MLIR passes running on Tensorflow V1 graphs.
129 // -------------------------------------------------------------------------- //
130 
131 // An API for registering MLIR ModulePass with the Tensorflow runtime. These
132 // passes are running only for V1 graphs (legacy graphs) executed via Session
133 // runtime. Graph importer updates legacy graph behavior to V2 constructs (e.g.
134 // it raises control flow from Switch/Merge nodes to functional control flow
135 // with If/While operations).
136 class MlirV1CompatOptimizationPass {
137  public:
138   virtual ~MlirV1CompatOptimizationPass() = default;
139   virtual llvm::StringRef name() const = 0;
140 
141   // Returns a MlirOptimizationPassState based on the given graph and
142   // config. See comments on `MlirOptimizationPassState` enum for more info
143   // on exact values.
144   virtual MlirOptimizationPassState GetPassState(
145       const DeviceSet* device_set, const ConfigProto& config_proto,
146       const Graph& graph,
147       const FunctionLibraryDefinition& function_library) const = 0;
148 
149   virtual Status Run(const GraphOptimizationPassOptions& options,
150                      mlir::ModuleOp module) = 0;
151 };
152 
153 class MlirV1CompatOptimizationPassRegistry {
154  public:
155   // Returns the global registry of MLIR optimization passes.
156   static MlirV1CompatOptimizationPassRegistry& Global();
157 
Add(std::unique_ptr<MlirV1CompatOptimizationPass> pass)158   void Add(std::unique_ptr<MlirV1CompatOptimizationPass> pass) {
159     CHECK(pass_ == nullptr) << "Only a single pass can be registered";
160     pass_ = std::move(pass);
161   }
162 
pass()163   MlirV1CompatOptimizationPass* pass() const {
164     return pass_ ? pass_.get() : nullptr;
165   }
166 
167  private:
168   std::unique_ptr<MlirV1CompatOptimizationPass> pass_{};
169 };
170 
171 class MlirV1CompatGraphOptimizationPass : public GraphOptimizationPass {
172  public:
173   explicit MlirV1CompatGraphOptimizationPass(
174       const MlirV1CompatOptimizationPassRegistry* registry =
175           &MlirV1CompatOptimizationPassRegistry::Global())
registry_(registry)176       : registry_(registry) {}
177 
178   Status Run(const GraphOptimizationPassOptions& options) override;
179 
180  private:
181   const MlirV1CompatOptimizationPassRegistry* registry_;
182 };
183 
184 // -------------------------------------------------------------------------- //
185 // Helper classes for static registration of MLIR (V1 Compat) passes in the
186 // corresponding registry.
187 // -------------------------------------------------------------------------- //
188 
189 namespace mlir_pass_registration {
190 
191 class MlirOptimizationPassRegistration {
192  public:
MlirOptimizationPassRegistration(int priority,std::unique_ptr<MlirOptimizationPass> pass)193   explicit MlirOptimizationPassRegistration(
194       int priority, std::unique_ptr<MlirOptimizationPass> pass) {
195     MlirOptimizationPassRegistry::Global().Add(priority, std::move(pass));
196   }
197 };
198 
199 class MlirV1CompatOptimizationPassRegistration {
200  public:
MlirV1CompatOptimizationPassRegistration(std::unique_ptr<MlirV1CompatOptimizationPass> pass)201   explicit MlirV1CompatOptimizationPassRegistration(
202       std::unique_ptr<MlirV1CompatOptimizationPass> pass) {
203     MlirV1CompatOptimizationPassRegistry::Global().Add(std::move(pass));
204   }
205 };
206 
207 }  // namespace mlir_pass_registration
208 
209 }  // namespace tensorflow
210 
211 #endif  // TENSORFLOW_COMPILER_MLIR_MLIR_GRAPH_OPTIMIZATION_PASS_H_
212