• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
11 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
12 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
13 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
14 #include "mlir/Dialect/Linalg/Passes.h"
15 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
16 #include "mlir/Dialect/Linalg/Utils/Utils.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/Support/LLVM.h"
23 #include "mlir/Transforms/DialectConversion.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26 
27 #include "llvm/ADT/TypeSwitch.h"
28 
29 using namespace mlir;
30 using namespace mlir::edsc;
31 using namespace mlir::edsc::intrinsics;
32 using namespace mlir::linalg;
33 
34 using edsc::op::operator+;
35 
makeCanonicalAffineApplies(OpBuilder & b,Location loc,AffineMap map,ArrayRef<Value> vals)36 static SmallVector<Value, 8> makeCanonicalAffineApplies(OpBuilder &b,
37                                                         Location loc,
38                                                         AffineMap map,
39                                                         ArrayRef<Value> vals) {
40   if (map.isEmpty())
41     return {};
42 
43   assert(map.getNumInputs() == vals.size());
44   SmallVector<Value, 8> res;
45   res.reserve(map.getNumResults());
46   auto dims = map.getNumDims();
47   for (auto e : map.getResults()) {
48     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
49     SmallVector<Value, 4> operands(vals.begin(), vals.end());
50     canonicalizeMapAndOperands(&exprMap, &operands);
51     res.push_back(affine_apply(exprMap, operands));
52   }
53   return res;
54 }
55 
permuteIvs(ArrayRef<Value> ivs,Optional<AffineMap> permutation)56 static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
57                                         Optional<AffineMap> permutation) {
58   return permutation ? applyMapToValues(ScopedContext::getBuilderRef(),
59                                         ScopedContext::getLocation(),
60                                         permutation.getValue(), ivs)
61                      : SmallVector<Value, 4>(ivs.begin(), ivs.end());
62 }
63 
64 template <typename IndexedValueType, typename OpType>
inlineRegionAndEmitStore(OpType op,ArrayRef<Value> indexedValues,ArrayRef<SmallVector<Value,8>> indexing,ArrayRef<Value> outputBuffers)65 static void inlineRegionAndEmitStore(OpType op, ArrayRef<Value> indexedValues,
66                                      ArrayRef<SmallVector<Value, 8>> indexing,
67                                      ArrayRef<Value> outputBuffers) {
68   assert(op->getNumRegions() == 1 && "Expected single region op");
69   auto &b = ScopedContext::getBuilderRef();
70   auto &block = op->getRegion(0).front();
71   BlockAndValueMapping map;
72   map.map(block.getArguments(), indexedValues);
73   for (auto &op : block.without_terminator()) {
74     assert(op.getNumRegions() == 0 && "expected a non-nested region");
75     auto *newOp = b.clone(op, map);
76     map.map(op.getResults(), newOp->getResults());
77   }
78 
79   Operation &terminator = block.back();
80   assert(isa<linalg::YieldOp>(terminator) &&
81          "expected a yield op in the end of the region");
82   for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) {
83     IndexedValueType O(outputBuffers[i]);
84     O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i));
85   }
86 }
87 
88 // Returns a pair that contains input indices and output indices of a
89 // SingleInputPoolingOp `op`.
90 struct InputAndOutputIndices {
91   SmallVector<Value, 8> inputs;
92   SmallVector<Value, 8> outputs;
93 };
94 template <typename SingleInputPoolingOp>
getInputAndOutputIndices(ArrayRef<Value> allIvs,SingleInputPoolingOp op)95 static InputAndOutputIndices getInputAndOutputIndices(ArrayRef<Value> allIvs,
96                                                       SingleInputPoolingOp op) {
97   auto &b = ScopedContext::getBuilderRef();
98   auto loc = ScopedContext::getLocation();
99   auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
100   auto maps = llvm::to_vector<8>(
101       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
102   return InputAndOutputIndices{
103       makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
104       makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
105 }
106 
107 /// Emits the MLIR for the scalar part of the generic op by:
108 ///   1. Emitting load ops for each input and output view in order. This is
109 ///      achieved by applying the appropriate input or output map to the
110 ///      enclosing induction variables.
111 ///   2. Emitting a call to `op.fun()` that takes as arguments the scalars
112 ///      from point 1. above.
113 ///   3. Emitting store ops to store the results of 2. to the output
114 ///      views.
115 ///
116 /// An example output may resemble:
117 ///
118 /// ```
119 ///    scf.for %i = %c0 to %0 step %c1 {
120 ///      scf.for %j = %c0 to %1 step %c1 {
121 ///        scf.for %k = %c0 to %4 step %c1 {
122 ///          %11 = load %arg0[%i, %j] :
123 ///            memref<?x?xf32, stride_specification>
124 ///          %12 = load %arg1[%i, %j, %k] :
125 ///            memref<?x?x?xf32, stride_specification>
126 ///          %13 = load %arg2[%i, %k, %j] :
127 ///            memref<?x?x?xf32, stride_specification>
128 ///          %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
129 ///          store %14#0, %arg1[%i, %j, %k] :
130 ///            memref<?x?x?Xf32, stride_specification>
131 ///          store %14#1, %arg2[%i, %k, %j] :
132 ///            memref<?x?x?Xf32, stride_specification>
133 ///       }
134 ///      }
135 ///    }
136 /// ```
137 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,LinalgOp linalgOp)138 static void emitScalarImplementation(ArrayRef<Value> allIvs,
139                                      LinalgOp linalgOp) {
140   assert(linalgOp.hasBufferSemantics() &&
141          "expected linalg op with buffer semantics");
142   auto &b = ScopedContext::getBuilderRef();
143   auto loc = ScopedContext::getLocation();
144   unsigned nInputs = linalgOp.getNumInputs();
145   unsigned nOutputs = linalgOp.getNumOutputs();
146   SmallVector<Value, 4> indexedValues;
147   indexedValues.reserve(nInputs + nOutputs);
148 
149   auto allIvsPlusDims = SmallVector<Value, 4>(allIvs.begin(), allIvs.end());
150 
151   // TODO: Avoid the loads if the corresponding argument of the
152   // region has no uses.
153   // 1.a. Emit load from input views.
154   for (unsigned i = 0; i < nInputs; ++i) {
155     auto indexing = makeCanonicalAffineApplies(
156         b, loc, linalgOp.getInputIndexingMap(i), allIvsPlusDims);
157     // Passing through IndexedValueType emits the proper load operation.
158     indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing));
159   }
160   // 1.b. Emit load from output views.
161   for (unsigned i = 0; i < nOutputs; ++i) {
162     auto indexing = makeCanonicalAffineApplies(
163         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims);
164     // Passing through IndexedValueType emits the proper load operation.
165     indexedValues.push_back(
166         IndexedValueType(linalgOp.getOutputBuffer(i))(indexing));
167   }
168 
169   // TODO: When a region inliner exists, use it.
170   // 2. Inline region, currently only works for a single basic block.
171   // 3. Emit store.
172   SmallVector<SmallVector<Value, 8>, 8> indexing;
173   SmallVector<Value, 8> outputBuffers;
174   for (unsigned i = 0; i < nOutputs; ++i) {
175     indexing.push_back(makeCanonicalAffineApplies(
176         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims));
177     outputBuffers.push_back(linalgOp.getOutputBuffer(i));
178   }
179   inlineRegionAndEmitStore<IndexedValueType>(linalgOp, indexedValues, indexing,
180                                              outputBuffers);
181 }
182 
183 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,CopyOp copyOp)184 static void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
185   assert(copyOp.hasBufferSemantics() &&
186          "expected linalg op with buffer semantics");
187   auto nPar = copyOp.getNumParallelLoops();
188   assert(nPar == allIvs.size());
189   auto inputIvs =
190       permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
191   auto outputIvs =
192       permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
193   SmallVector<Value, 8> iivs(inputIvs.begin(), inputIvs.end());
194   SmallVector<Value, 8> oivs(outputIvs.begin(), outputIvs.end());
195   IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
196   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
197   // an n-D loop nest; with or without permutations.
198   // clang-format off
199     nPar > 0 ? O(oivs) = I(iivs) :
200                O() = I();
201   // clang-format on
202 }
203 
204 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,FillOp fillOp)205 static void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
206   assert(fillOp.hasBufferSemantics() &&
207          "expected linalg op with buffer semantics");
208   auto nPar = fillOp.getNumParallelLoops();
209   assert(nPar == allIvs.size());
210   auto ivs = SmallVector<Value, 4>(allIvs.begin(), allIvs.begin() + nPar);
211   IndexedValueType O(fillOp.getOutputBuffer(0));
212   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
213   // an n-D loop nest; with or without permutations.
214   nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
215 }
216 
217 // Create a padded view into the given `input` tensor using the 'indices'
218 // to access the tensor. `skipPadding` lists the dimensions for which no padding
219 // is needed e.g. the non-spatial dimensions for convolutions.
220 template <typename IndexedValueType>
getPaddedInput(Value input,ArrayRef<Value> indices,ArrayRef<int> skipPadding,Value padValue)221 Value getPaddedInput(Value input, ArrayRef<Value> indices,
222                      ArrayRef<int> skipPadding, Value padValue) {
223   // TODO: add a level of indirection to linalg.generic.
224 
225   IndexedValueType indexedInput(input);
226 
227   auto *context = ScopedContext::getContext();
228   Value zeroIndex = std_constant_index(0);
229   SmallVector<Value, 8> conds;
230   SmallVector<Value, 8> clampedImIdx;
231   for (auto iter : llvm::enumerate(indices)) {
232     int idx = iter.index();
233     auto dim = iter.value();
234     if (is_contained(skipPadding, idx)) {
235       clampedImIdx.push_back(dim);
236       continue;
237     }
238 
239     using edsc::op::sge;
240     using edsc::op::slt;
241     using edsc::op::operator||;
242     Value leftOutOfBound = slt(dim, zeroIndex);
243     if (conds.empty())
244       conds.push_back(leftOutOfBound);
245     else
246       conds.push_back(conds.back() || leftOutOfBound);
247     Value rightBound = std_dim(input, idx);
248     conds.push_back(conds.back() || (sge(dim, rightBound)));
249 
250     // When padding is involved, the indices will only be shifted to negative,
251     // so having a max op is enough.
252     auto maxMap = AffineMap::get(/*dimCount=*/1, 0,
253                                  {getAffineDimExpr(/*position=*/0, context),
254                                   getAffineConstantExpr(0, context)},
255                                  context);
256     clampedImIdx.push_back(affine_max(dim.getType(), maxMap, ValueRange{dim}));
257   }
258 
259   Value readInput = indexedInput(clampedImIdx);
260   return conds.empty() ? readInput
261                        : (Value)std_select(conds.back(), padValue, readInput);
262 }
263 
264 namespace {
265 
266 /// The padding value for a given Op depends on the semantics of the Op.
267 /// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is
268 /// -inf or minInt and for PoolingMinOp is inf or maxInt.
269 template <typename OpType>
getPadValueAttr(Type type)270 Attribute getPadValueAttr(Type type) {
271   llvm_unreachable("Unexpected op type for getPadValueAttr");
272   return {};
273 }
274 
275 template <>
getPadValueAttr(Type type)276 Attribute getPadValueAttr<PoolingMaxOp>(Type type) {
277   auto &b = ScopedContext::getBuilderRef();
278   if (auto floatType = type.dyn_cast<FloatType>()) {
279     return b.getFloatAttr(
280         floatType,
281         APFloat::getInf(floatType.getFloatSemantics(), /*Negative*/ true));
282   }
283   if (auto intType = type.dyn_cast<IntegerType>()) {
284     unsigned width = intType.getWidth();
285     // The select instruction used to lower the PoolingMin uses a signed
286     // comparison, use a signed constant irrespective of the signedness of the
287     // integer type.
288     return b.getIntegerAttr(intType, APInt::getSignedMinValue(width));
289   }
290   llvm_unreachable("Unsupported data type for PoolingMaxOp");
291   return {};
292 }
293 
294 template <>
getPadValueAttr(Type type)295 Attribute getPadValueAttr<PoolingMinOp>(Type type) {
296   auto &b = ScopedContext::getBuilderRef();
297   if (auto floatType = type.dyn_cast<FloatType>()) {
298     return b.getFloatAttr(floatType,
299                           APFloat::getInf(floatType.getFloatSemantics()));
300   }
301   if (auto intType = type.dyn_cast<IntegerType>()) {
302     unsigned width = intType.getWidth();
303     // The select instruction used to lower the PoolingMin uses a signed
304     // comparison, use a signed constant irrespective of the signedness of the
305     // integer type.
306     return b.getIntegerAttr(intType, APInt::getSignedMaxValue(width));
307   }
308   llvm_unreachable("Unsupported data type for PoolingMinOp");
309   return {};
310 }
311 
312 template <>
getPadValueAttr(Type type)313 Attribute getPadValueAttr<PoolingSumOp>(Type type) {
314   auto &b = ScopedContext::getBuilderRef();
315   return b.getZeroAttr(type);
316 }
317 
318 template <>
getPadValueAttr(Type type)319 Attribute getPadValueAttr<ConvOp>(Type type) {
320   auto &b = ScopedContext::getBuilderRef();
321   return b.getZeroAttr(type);
322 }
323 
324 } // namespace
325 
326 /// Returns true is `convOp` has a non-zero padding.
hasPadding(ConvOp convOp)327 static bool hasPadding(ConvOp convOp) {
328   for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
329     if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
330       return true;
331   }
332   return false;
333 }
334 
335 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,ConvOp convOp)336 static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
337   assert(convOp.hasBufferSemantics() &&
338          "expected linalg op with buffer semantics");
339   auto &b = ScopedContext::getBuilderRef();
340   auto loc = ScopedContext::getLocation();
341   auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
342   auto maps = llvm::to_vector<8>(
343       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
344   SmallVector<Value, 8> fIdx(
345       makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
346   SmallVector<Value, 8> imIdx(
347       makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
348   SmallVector<Value, 8> oIdx(
349       makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
350 
351   IndexedValueType F(convOp.filter()), O(convOp.output());
352 
353   // Emit scalar form. Padded conv involves an affine.max in the memory access
354   // which is not allowed by affine.load. Override to use an StdIndexedValue
355   // when there is non-zero padding.
356   if (hasPadding(convOp)) {
357     Type type = convOp.input().getType().cast<MemRefType>().getElementType();
358     Value padValue = std_constant(type, getPadValueAttr<ConvOp>(type));
359     Value paddedInput = getPaddedInput<StdIndexedValue>(
360         convOp.input(), imIdx,
361         /* Only need to pad the window dimensions */
362         {0, static_cast<int>(imIdx.size()) - 1}, padValue);
363     O(oIdx) += F(fIdx) * paddedInput;
364   } else {
365     IndexedValueType I(convOp.input());
366     O(oIdx) += F(fIdx) * I(imIdx);
367   }
368 }
369 
370 template <typename PoolingOp>
hasPadding(PoolingOp poolingOp)371 static bool hasPadding(PoolingOp poolingOp) {
372   for (unsigned i = 0, e = poolingOp.getNumWindowLoops(); i < e; ++i) {
373     if (poolingOp.getLowPad(i) > 0 || poolingOp.getHighPad(i) > 0)
374       return true;
375   }
376   return false;
377 }
378 
379 template <typename IndexedValueType, typename PoolingOp>
getPoolingInput(PoolingOp op,ArrayRef<Value> inputIndices)380 static Value getPoolingInput(PoolingOp op, ArrayRef<Value> inputIndices) {
381   if (hasPadding(op)) {
382     Type type =
383         op.input().getType().template cast<MemRefType>().getElementType();
384     Value padValue = std_constant(type, getPadValueAttr<PoolingOp>(type));
385     return getPaddedInput<StdIndexedValue>(op.input(), inputIndices,
386                                            /*Pad every dimension*/ {},
387                                            padValue);
388   }
389   IndexedValueType input(op.input());
390   return input(inputIndices);
391 }
392 
393 template <typename IndexedValueType, typename OpType>
emitPoolingMinMaxScalarImplementation(ArrayRef<Value> allIvs,OpType op)394 void emitPoolingMinMaxScalarImplementation(ArrayRef<Value> allIvs, OpType op) {
395   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
396   // Emit scalar form.
397   IndexedValueType output(op.output());
398   Value lhs = output(indices.outputs);
399   Value rhs = getPoolingInput<IndexedValueType>(op, indices.inputs);
400   using edsc::op::sgt;
401   using edsc::op::slt;
402   Value value = std::is_same<OpType, PoolingMinOp>()
403                     ? std_select(slt(lhs, rhs), lhs, rhs)
404                     : std_select(sgt(lhs, rhs), lhs, rhs);
405   output(indices.outputs) = value;
406 }
407 
408 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,PoolingMaxOp op)409 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMaxOp op) {
410   emitPoolingMinMaxScalarImplementation<IndexedValueType, PoolingMaxOp>(allIvs,
411                                                                         op);
412 }
413 
414 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,PoolingMinOp op)415 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMinOp op) {
416   emitPoolingMinMaxScalarImplementation<IndexedValueType, PoolingMinOp>(allIvs,
417                                                                         op);
418 }
419 
420 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,PoolingSumOp op)421 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingSumOp op) {
422   auto indices = getInputAndOutputIndices(allIvs, op);
423   IndexedValueType output(op.output());
424 
425   // Emit scalar form.
426   output(indices.outputs) +=
427       getPoolingInput<IndexedValueType>(op, indices.inputs);
428 }
429 
430 /// Emits the MLIR for the scalar part of the indexed generic op by:
431 ///   1. Emitting load ops for each input and output view in order. This is
432 ///      achieved by applying the appropriate input or output map to the
433 ///      enclosing induction variables.
434 ///   2. Emitting a call to `op.fun()` that takes as arguments the induction
435 ///      variables and the scalars from point 1. above.
436 ///   3. Emitting store ops to store the results of 2. to the output views.
437 ///
438 /// An example output may resemble:
439 ///
440 /// ```
441 ///    scf.for %i = %c0 to %0 step %c1 {
442 ///      scf.for %j = %c0 to %1 step %c1 {
443 ///        scf.for %k = %c0 to %4 step %c1 {
444 ///          %11 = load %arg0[%i, %j] :
445 ///            memref<?x?xf32, stride_specification>
446 ///          %12 = load %arg1[%i, %j, %k] :
447 ///            memref<?x?x?xf32, stride_specification>
448 ///          %13 = load %arg2[%i, %k, %j] :
449 ///            memref<?x?x?xf32, stride_specification>
450 ///          %14:2 = call @foo(%i, %j, %k, %11, %12, %13) :
451 ///            (index, index, index, f32, f32, f32) -> (f32, f32)
452 ///          store %14#0, %arg1[%i, %j, %k] :
453 ///            memref<?x?x?Xf32, stride_specification>
454 ///          store %14#1, %arg2[%i, %k, %j] :
455 ///            memref<?x?x?Xf32, stride_specification>
456 ///       }
457 ///      }
458 ///    }
459 /// ```
460 template <typename IndexedValueType>
emitScalarImplementation(ArrayRef<Value> allIvs,IndexedGenericOp indexedGenericOp)461 static void emitScalarImplementation(ArrayRef<Value> allIvs,
462                                      IndexedGenericOp indexedGenericOp) {
463   assert(indexedGenericOp.hasBufferSemantics() &&
464          "expected linalg op with buffer semantics");
465   auto &b = ScopedContext::getBuilderRef();
466   auto loc = ScopedContext::getLocation();
467   unsigned nInputs = indexedGenericOp.getNumInputs();
468   unsigned nOutputs = indexedGenericOp.getNumOutputs();
469   unsigned nLoops = allIvs.size();
470   SmallVector<Value, 4> indexedValues;
471   indexedValues.reserve(nLoops + nInputs + nOutputs);
472   for (unsigned i = 0; i < nLoops; ++i)
473     indexedValues.push_back(allIvs[i]);
474 
475   // TODO: Avoid the loads if the corresponding argument of the
476   // region has no uses.
477   // 1.a. Emit load from input views.
478   for (unsigned i = 0; i < nInputs; ++i) {
479     auto indexing = makeCanonicalAffineApplies(
480         b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs);
481     // Pass input i through IndexedValueType emits the proper load operation.
482     indexedValues.push_back(
483         IndexedValueType(indexedGenericOp.getInput(i))(indexing));
484   }
485   // 1.b. Emit load from output views.
486   for (unsigned i = 0; i < nOutputs; ++i) {
487     auto indexing = makeCanonicalAffineApplies(
488         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs);
489     // Pass output i through IndexedValueType emits the proper load operation.
490     indexedValues.push_back(
491         IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing));
492   }
493 
494   // TODO: When a region inliner exists, use it.
495   // 2. Inline region, currently only works for a single basic block.
496   // 3. Emit store.
497   SmallVector<SmallVector<Value, 8>, 8> indexing;
498   SmallVector<Value, 8> outputBuffers;
499   for (unsigned i = 0; i < nOutputs; ++i) {
500     indexing.push_back(makeCanonicalAffineApplies(
501         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs));
502     outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i));
503   }
504   inlineRegionAndEmitStore<IndexedValueType>(indexedGenericOp, indexedValues,
505                                              indexing, outputBuffers);
506 }
507 
508 template <typename LoopTy>
linalgOpToLoopsImpl(Operation * op,OpBuilder & builder)509 static Optional<LinalgLoops> linalgOpToLoopsImpl(Operation *op,
510                                                  OpBuilder &builder) {
511   using IndexedValueTy = typename GenerateLoopNest<LoopTy>::IndexedValueTy;
512 
513   ScopedContext scope(builder, op->getLoc());
514 
515   // The flattened loopToOperandRangesMaps is expected to be an invertible
516   // permutation map (which is asserted in the inverse calculation).
517   auto linalgOp = cast<LinalgOp>(op);
518   assert(linalgOp.hasBufferSemantics() &&
519          "expected linalg op with buffer semantics");
520   auto loopRanges = linalgOp.createLoopRanges(builder, op->getLoc());
521   SmallVector<Value, 4> allIvs;
522   GenerateLoopNest<LoopTy>::doit(
523       loopRanges, /*iterInitArgs*/ {}, linalgOp.iterator_types().getValue(),
524       [&](ValueRange ivs, ValueRange iterArgs) -> scf::ValueVector {
525         assert(iterArgs.empty() && "unexpected iterArgs");
526         allIvs.append(ivs.begin(), ivs.end());
527         llvm::TypeSwitch<Operation *>(op)
528             .Case<CopyOp, FillOp, ConvOp, PoolingMaxOp, PoolingMinOp,
529                   PoolingSumOp, IndexedGenericOp, LinalgOp>([&](auto op) {
530               emitScalarImplementation<IndexedValueTy>(allIvs, op);
531             })
532             .Default([&](Operation *op) { assert(false && "unexpected op"); });
533         return scf::ValueVector{};
534       });
535   // Number of loop ops might be different from the number of ivs since some
536   // loops like affine.parallel and scf.parallel have multiple ivs.
537   llvm::SetVector<Operation *> loopSet;
538   for (Value iv : allIvs) {
539     if (!iv)
540       return {};
541     // The induction variable is a block argument of the entry block of the
542     // loop operation.
543     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
544     if (!ivVal)
545       return {};
546     loopSet.insert(ivVal.getOwner()->getParentOp());
547   }
548   LinalgLoops loops(loopSet.begin(), loopSet.end());
549   return loops;
550 }
551 
552 namespace {
553 template <typename LoopType>
554 class LinalgRewritePattern : public RewritePattern {
555 public:
LinalgRewritePattern()556   LinalgRewritePattern() : RewritePattern(/*benefit=*/1, MatchAnyOpTypeTag()) {}
557 
matchAndRewrite(Operation * op,PatternRewriter & rewriter) const558   LogicalResult matchAndRewrite(Operation *op,
559                                 PatternRewriter &rewriter) const override {
560     if (!isa<LinalgOp>(op))
561       return failure();
562     if (!linalgOpToLoopsImpl<LoopType>(op, rewriter))
563       return failure();
564     rewriter.eraseOp(op);
565     return success();
566   }
567 };
568 
569 struct FoldAffineOp;
570 } // namespace
571 
572 template <typename LoopType>
lowerLinalgToLoopsImpl(FuncOp funcOp,MLIRContext * context)573 static void lowerLinalgToLoopsImpl(FuncOp funcOp, MLIRContext *context) {
574   OwningRewritePatternList patterns;
575   patterns.insert<LinalgRewritePattern<LoopType>>();
576   DimOp::getCanonicalizationPatterns(patterns, context);
577   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
578   patterns.insert<FoldAffineOp>(context);
579   // Just apply the patterns greedily.
580   applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
581 }
582 
583 namespace {
584 /// Local folding pattern for AffineApplyOp that we can apply greedily.
585 /// This replaces AffineApplyOp by the proper value in cases where the
586 /// associated map is trivial.
587 /// A trivial map here is defined as a map with a single result and either:
588 ///   1. Zero operand + returns a single AffineConstantExpr
589 ///   2. One operand + returns a single AffineDimExpr
590 ///   3. One operand + returns a single AffineSymbolExpr
591 //
592 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
593 /// other cases, it is replaced by its unique operand.
594 struct FoldAffineOp : public RewritePattern {
FoldAffineOp__anon1f3d68f60811::FoldAffineOp595   FoldAffineOp(MLIRContext *context)
596       : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
597 
matchAndRewrite__anon1f3d68f60811::FoldAffineOp598   LogicalResult matchAndRewrite(Operation *op,
599                                 PatternRewriter &rewriter) const override {
600     AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
601     auto map = affineApplyOp.getAffineMap();
602     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
603       return failure();
604 
605     AffineExpr expr = map.getResult(0);
606     if (map.getNumInputs() == 0) {
607       if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
608         rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
609         return success();
610       }
611       return failure();
612     }
613     if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
614       rewriter.replaceOp(op, op->getOperand(0));
615       return success();
616     }
617     return failure();
618   }
619 };
620 
621 struct LowerToAffineLoops
622     : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
runOnFunction__anon1f3d68f60811::LowerToAffineLoops623   void runOnFunction() override {
624     lowerLinalgToLoopsImpl<AffineForOp>(getFunction(), &getContext());
625   }
626 };
627 
628 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
runOnFunction__anon1f3d68f60811::LowerToLoops629   void runOnFunction() override {
630     lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
631   }
632 };
633 
634 struct LowerToParallelLoops
635     : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
runOnFunction__anon1f3d68f60811::LowerToParallelLoops636   void runOnFunction() override {
637     lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
638   }
639 };
640 } // namespace
641 
createConvertLinalgToLoopsPass()642 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
643   return std::make_unique<LowerToLoops>();
644 }
645 
646 std::unique_ptr<OperationPass<FuncOp>>
createConvertLinalgToParallelLoopsPass()647 mlir::createConvertLinalgToParallelLoopsPass() {
648   return std::make_unique<LowerToParallelLoops>();
649 }
650 
651 std::unique_ptr<OperationPass<FuncOp>>
createConvertLinalgToAffineLoopsPass()652 mlir::createConvertLinalgToAffineLoopsPass() {
653   return std::make_unique<LowerToAffineLoops>();
654 }
655 
656 /// Emits a loop nest with the proper body for `op`.
657 template <typename LoopTy>
linalgLowerOpToLoops(OpBuilder & builder,Operation * op)658 Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
659                                                          Operation *op) {
660   return linalgOpToLoopsImpl<LoopTy>(op, builder);
661 }
662 
663 template Optional<LinalgLoops>
664 mlir::linalg::linalgLowerOpToLoops<AffineForOp>(OpBuilder &builder,
665                                                 Operation *op);
666 template Optional<LinalgLoops>
667 mlir::linalg::linalgLowerOpToLoops<scf::ForOp>(OpBuilder &builder,
668                                                Operation *op);
669 template Optional<LinalgLoops>
670 mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp>(OpBuilder &builder,
671                                                     Operation *op);
672 
673 /// Emits a loop nest of `affine.for` with the proper body for `op`.
linalgOpToAffineLoops(OpBuilder & builder,Operation * op)674 LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder,
675                                                   Operation *op) {
676   Optional<LinalgLoops> loops = linalgLowerOpToLoops<AffineForOp>(builder, op);
677   return loops ? success() : failure();
678 }
679 
680 /// Emits a loop nest of `scf.for` with the proper body for `op`.
linalgOpToLoops(OpBuilder & builder,Operation * op)681 LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
682   Optional<LinalgLoops> loops = linalgLowerOpToLoops<scf::ForOp>(builder, op);
683   return loops ? success() : failure();
684 }
685 
686 /// Emits a loop nest of `scf.parallel` with the proper body for `op`.
linalgOpToParallelLoops(OpBuilder & builder,Operation * op)687 LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
688                                                     Operation *op) {
689   Optional<LinalgLoops> loops =
690       linalgLowerOpToLoops<scf::ParallelOp>(builder, op);
691   return loops ? success() : failure();
692 }
693