1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "PassDetail.h"
14 #include "mlir/Dialect/GPU/GPUDialect.h"
15 #include "mlir/Dialect/GPU/Passes.h"
16 #include "mlir/Dialect/GPU/Utils.h"
17 #include "mlir/Dialect/StandardOps/IR/Ops.h"
18 #include "mlir/IR/BlockAndValueMapping.h"
19 #include "mlir/IR/Builders.h"
20 #include "mlir/IR/SymbolTable.h"
21 #include "mlir/Support/LLVM.h"
22 #include "mlir/Transforms/RegionUtils.h"
23
24 using namespace mlir;
25
26 template <typename OpTy>
createForAllDimensions(OpBuilder & builder,Location loc,SmallVectorImpl<Value> & values)27 static void createForAllDimensions(OpBuilder &builder, Location loc,
28 SmallVectorImpl<Value> &values) {
29 for (StringRef dim : {"x", "y", "z"}) {
30 Value v = builder.create<OpTy>(loc, builder.getIndexType(),
31 builder.getStringAttr(dim));
32 values.push_back(v);
33 }
34 }
35
36 /// Adds operations generating block/thread ids and grid/block dimensions at the
37 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
38 /// entry block of `launchOpBody`, to the corresponding result value of the
39 /// added operations.
injectGpuIndexOperations(Location loc,Region & launchFuncOpBody,Region & launchOpBody,BlockAndValueMapping & map)40 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
41 Region &launchOpBody,
42 BlockAndValueMapping &map) {
43 OpBuilder builder(loc->getContext());
44 Block &firstBlock = launchOpBody.front();
45 builder.setInsertionPointToStart(&launchFuncOpBody.front());
46 SmallVector<Value, 12> indexOps;
47 createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
48 createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
49 createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
50 createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
51 // Replace the leading 12 function args with the respective thread/block index
52 // operations. Iterate backwards since args are erased and indices change.
53 for (auto indexOp : enumerate(indexOps))
54 map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
55 }
56
57 /// Identifies operations that are beneficial to sink into kernels. These
58 /// operations may not have side-effects, as otherwise sinking (and hence
59 /// duplicating them) is not legal.
isSinkingBeneficiary(Operation * op)60 static bool isSinkingBeneficiary(Operation *op) {
61 return isa<ConstantOp, DimOp, SelectOp, CmpIOp>(op);
62 }
63
64 /// For a given operation `op`, computes whether it is beneficial to sink the
65 /// operation into the kernel. An operation can be sunk if doing so does not
66 /// introduce new kernel arguments. Whether a value is already available in the
67 /// kernel (and hence does not introduce new arguments) is checked by
68 /// querying `existingDependencies` and `availableValues`.
69 /// If an operand is not yet available, we recursively check whether it can be
70 /// made available by siking its defining op.
71 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
72 /// the order they should appear in the kernel. Furthermore, `availableValues`
73 /// is updated with results that will be available after sinking the identified
74 /// ops.
75 static bool
extractBeneficiaryOps(Operation * op,llvm::SetVector<Value> existingDependencies,llvm::SetVector<Operation * > & beneficiaryOps,llvm::SmallPtrSetImpl<Value> & availableValues)76 extractBeneficiaryOps(Operation *op,
77 llvm::SetVector<Value> existingDependencies,
78 llvm::SetVector<Operation *> &beneficiaryOps,
79 llvm::SmallPtrSetImpl<Value> &availableValues) {
80 if (beneficiaryOps.count(op))
81 return true;
82
83 if (!isSinkingBeneficiary(op))
84 return false;
85
86 for (Value operand : op->getOperands()) {
87 // It is already visible in the kernel, keep going.
88 if (availableValues.count(operand))
89 continue;
90 // Else check whether it can be made available via sinking or already is a
91 // dependency.
92 Operation *definingOp = operand.getDefiningOp();
93 if ((!definingOp ||
94 !extractBeneficiaryOps(definingOp, existingDependencies,
95 beneficiaryOps, availableValues)) &&
96 !existingDependencies.count(operand))
97 return false;
98 }
99 // We will sink the operation, mark its results as now available.
100 beneficiaryOps.insert(op);
101 for (Value result : op->getResults())
102 availableValues.insert(result);
103 return true;
104 }
105
sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp)106 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
107 Region &launchOpBody = launchOp.body();
108
109 // Identify uses from values defined outside of the scope of the launch
110 // operation.
111 llvm::SetVector<Value> sinkCandidates;
112 getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
113
114 llvm::SetVector<Operation *> toBeSunk;
115 llvm::SmallPtrSet<Value, 4> availableValues;
116 for (Value operand : sinkCandidates) {
117 Operation *operandOp = operand.getDefiningOp();
118 if (!operandOp)
119 continue;
120 extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues);
121 }
122
123 // Insert operations so that the defs get cloned before uses.
124 BlockAndValueMapping map;
125 OpBuilder builder(launchOpBody);
126 for (Operation *op : toBeSunk) {
127 Operation *clonedOp = builder.clone(*op, map);
128 // Only replace uses within the launch op.
129 for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
130 replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
131 launchOp.body());
132 }
133 return success();
134 }
135
136 /// Outline the `gpu.launch` operation body into a kernel function. Replace
137 /// `gpu.terminator` operations by `gpu.return` in the generated function.
outlineKernelFuncImpl(gpu::LaunchOp launchOp,StringRef kernelFnName,llvm::SetVector<Value> & operands)138 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
139 StringRef kernelFnName,
140 llvm::SetVector<Value> &operands) {
141 Location loc = launchOp.getLoc();
142 // Create a builder with no insertion point, insertion will happen separately
143 // due to symbol table manipulation.
144 OpBuilder builder(launchOp.getContext());
145 Region &launchOpBody = launchOp.body();
146
147 // Identify uses from values defined outside of the scope of the launch
148 // operation.
149 getUsedValuesDefinedAbove(launchOpBody, operands);
150
151 // Create the gpu.func operation.
152 SmallVector<Type, 4> kernelOperandTypes;
153 kernelOperandTypes.reserve(operands.size());
154 for (Value operand : operands) {
155 kernelOperandTypes.push_back(operand.getType());
156 }
157 FunctionType type =
158 FunctionType::get(kernelOperandTypes, {}, launchOp.getContext());
159 auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
160 outlinedFunc.setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
161 builder.getUnitAttr());
162 BlockAndValueMapping map;
163
164 // Map the arguments corresponding to the launch parameters like blockIdx,
165 // threadIdx, etc.
166 Region &outlinedFuncBody = outlinedFunc.body();
167 injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
168
169 // Map arguments from gpu.launch region to the arguments of the gpu.func
170 // operation.
171 Block &entryBlock = outlinedFuncBody.front();
172 for (auto operand : enumerate(operands))
173 map.map(operand.value(), entryBlock.getArgument(operand.index()));
174
175 // Clone the region of the gpu.launch operation into the gpu.func operation.
176 // TODO: If cloneInto can be modified such that if a mapping for
177 // a block exists, that block will be used to clone operations into (at the
178 // end of the block), instead of creating a new block, this would be much
179 // cleaner.
180 launchOpBody.cloneInto(&outlinedFuncBody, map);
181
182 // Branch from entry of the gpu.func operation to the block that is cloned
183 // from the entry block of the gpu.launch operation.
184 Block &launchOpEntry = launchOpBody.front();
185 Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
186 builder.setInsertionPointToEnd(&entryBlock);
187 builder.create<BranchOp>(loc, clonedLaunchOpEntry);
188
189 outlinedFunc.walk([](gpu::TerminatorOp op) {
190 OpBuilder replacer(op);
191 replacer.create<gpu::ReturnOp>(op.getLoc());
192 op.erase();
193 });
194 return outlinedFunc;
195 }
196
outlineKernelFunc(gpu::LaunchOp launchOp,StringRef kernelFnName,llvm::SmallVectorImpl<Value> & operands)197 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
198 StringRef kernelFnName,
199 llvm::SmallVectorImpl<Value> &operands) {
200 DenseSet<Value> inputOperandSet;
201 inputOperandSet.insert(operands.begin(), operands.end());
202 llvm::SetVector<Value> operandSet(operands.begin(), operands.end());
203 auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
204 for (auto operand : operandSet) {
205 if (!inputOperandSet.count(operand))
206 operands.push_back(operand);
207 }
208 return funcOp;
209 }
210
211 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
212 /// launching `kernelFunc`. The kernel func contains the body of the
213 /// `gpu.launch` with constant region arguments inlined.
convertToLaunchFuncOp(gpu::LaunchOp launchOp,gpu::GPUFuncOp kernelFunc,ValueRange operands)214 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
215 gpu::GPUFuncOp kernelFunc,
216 ValueRange operands) {
217 OpBuilder builder(launchOp);
218 builder.create<gpu::LaunchFuncOp>(
219 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
220 launchOp.getBlockSizeOperandValues(), operands);
221 launchOp.erase();
222 }
223
224 namespace {
225 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
226 ///
227 /// This pass moves the kernel code of each LaunchOp into a function created
228 /// inside a nested module. It also creates an external function of the same
229 /// name in the parent module.
230 ///
231 /// The gpu.modules are intended to be compiled to a cubin blob independently in
232 /// a separate pass. The external functions can then be annotated with the
233 /// symbol of the cubin accessor function.
234 class GpuKernelOutliningPass
235 : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
236 public:
runOnOperation()237 void runOnOperation() override {
238 SymbolTable symbolTable(getOperation());
239 bool modified = false;
240 for (auto func : getOperation().getOps<FuncOp>()) {
241 // Insert just after the function.
242 Block::iterator insertPt(func->getNextNode());
243 auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
244 llvm::SetVector<Value> operands;
245 std::string kernelFnName =
246 Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
247
248 // Pull in instructions that can be sunk
249 if (failed(sinkOperationsIntoLaunchOp(op)))
250 return WalkResult::interrupt();
251 gpu::GPUFuncOp outlinedFunc =
252 outlineKernelFuncImpl(op, kernelFnName, operands);
253
254 // Create nested module and insert outlinedFunc. The module will
255 // originally get the same name as the function, but may be renamed on
256 // insertion into the parent module.
257 auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
258 symbolTable.insert(kernelModule, insertPt);
259
260 // Potentially changes signature, pulling in constants.
261 convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
262 modified = true;
263 return WalkResult::advance();
264 });
265 if (funcWalkResult.wasInterrupted())
266 return signalPassFailure();
267 }
268
269 // If any new module was inserted in this module, annotate this module as
270 // a container module.
271 if (modified)
272 getOperation().setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
273 UnitAttr::get(&getContext()));
274 }
275
276 private:
277 /// Returns a gpu.module containing kernelFunc and all callees (recursive).
createKernelModule(gpu::GPUFuncOp kernelFunc,const SymbolTable & parentSymbolTable)278 gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
279 const SymbolTable &parentSymbolTable) {
280 // TODO: This code cannot use an OpBuilder because it must be inserted into
281 // a SymbolTable by the caller. SymbolTable needs to be refactored to
282 // prevent manual building of Ops with symbols in code using SymbolTables
283 // and then this needs to use the OpBuilder.
284 auto context = getOperation().getContext();
285 OpBuilder builder(context);
286 OperationState state(kernelFunc.getLoc(),
287 gpu::GPUModuleOp::getOperationName());
288 gpu::GPUModuleOp::build(builder, state, kernelFunc.getName());
289 auto kernelModule = cast<gpu::GPUModuleOp>(Operation::create(state));
290 SymbolTable symbolTable(kernelModule);
291 symbolTable.insert(kernelFunc);
292
293 SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
294 while (!symbolDefWorklist.empty()) {
295 if (Optional<SymbolTable::UseRange> symbolUses =
296 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
297 for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
298 StringRef symbolName =
299 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
300 if (symbolTable.lookup(symbolName))
301 continue;
302
303 Operation *symbolDefClone =
304 parentSymbolTable.lookup(symbolName)->clone();
305 symbolDefWorklist.push_back(symbolDefClone);
306 symbolTable.insert(symbolDefClone);
307 }
308 }
309 }
310
311 return kernelModule;
312 }
313 };
314
315 } // namespace
316
createGpuKernelOutliningPass()317 std::unique_ptr<OperationPass<ModuleOp>> mlir::createGpuKernelOutliningPass() {
318 return std::make_unique<GpuKernelOutliningPass>();
319 }
320