• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// RUN: mlir-opt %s -convert-linalg-to-affine-loops | FileCheck %s
2
3// Test that we can lower all the way to LLVM without crashing, don't check results here.
4// RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1
5
6// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
7
8// CHECK-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
9
10// CHECK-DAG: #[[$clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
11
12func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
13  %c0 = constant 0 : index
14  %c1 = constant 1 : index
15  %A = view %arg0[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32>
16  %B = view %arg0[%c0][%K, %N] : memref<?xi8> to memref<?x?xf32>
17  %C = view %arg0[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32>
18  linalg.matmul ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
19               outs(%C: memref<?x?xf32>)
20  return
21}
22
23// CHECK-LABEL: func @matmul(%{{.*}}: memref<?xi8>,
24// CHECK-SAME: [[M:arg[0-9]+]]: index
25// CHECK-SAME: [[N:arg[0-9]+]]: index
26// CHECK-SAME: [[K:arg[0-9]+]]: index
27//       CHECK: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref<?xi8> to memref<?x?xf32>
28//       CHECK: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref<?xi8> to memref<?x?xf32>
29//       CHECK: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref<?xi8> to memref<?x?xf32>
30//       CHECK: affine.for %{{.*}}  = 0 to %{{.*}} {
31//       CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
32//       CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
33//   CHECK-DAG:       %[[a:.*]] = affine.load %[[A]][%{{.*}}, %{{.*}}] : memref<?x?xf32>
34//   CHECK-DAG:       %[[b:.*]] = affine.load %[[B]][%{{.*}}, %{{.*}}] : memref<?x?xf32>
35//   CHECK-DAG:       %[[inc:.*]] = mulf %[[a]], %[[b]] : f32
36//   CHECK-DAG:       %[[c:.*]] = affine.load %[[C]][%{{.*}}, %{{.*}}] : memref<?x?xf32>
37//   CHECK-DAG:       %[[res:.*]] = addf %[[c]], %[[inc]] : f32
38//       CHECK:       affine.store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref<?x?xf32>
39
40func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg2: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
41  linalg.conv(%arg0, %arg1, %arg2) {strides = [2]}: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
42  return
43}
44
45// CHECK-LABEL: func @conv_view3(
46//  CHECK: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
47//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
48//       CHECK:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
49//       CHECK:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[$strided3D]]>
50//       CHECK:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
51//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
52//       CHECK:   affine.for %{{.*}} = 0 to %[[B]] {
53//       CHECK:     affine.for %{{.*}} = 0 to %[[X0]] {
54//       CHECK:       affine.for %{{.*}} = 0 to %[[K]] {
55//       CHECK:         affine.for %{{.*}} = 0 to %[[Q]] {
56//       CHECK:           affine.for %{{.*}} = 0 to %[[Z0]] {
57//       CHECK:            %[[SUM:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
58//       No padding needed here; only affine loads.
59//       CHECK-NEXT:       affine.load
60//       CHECK-NEXT:       affine.load
61
62func @conv_padding(%arg0: memref<?x?x?x?xf32>,
63                   %arg1: memref<?x?x?x?xf32>,
64                   %arg2: memref<?x?x?x?xf32>) {
65  linalg.conv(%arg0, %arg1, %arg2) {dilations = [1, 1],
66                                    padding = dense<[[0, 1], [1, 1]]> : tensor<2x2xi64>,
67                                    strides = [1, 1]} :
68    memref<?x?x?x?xf32>, memref<?x?x?x?xf32>, memref<?x?x?x?xf32>
69  return
70}
71// CHECK-LABEL: func @conv_padding
72//       CHECK: %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>) {
73//       CHECK:   %[[ZERO:.*]] = constant 0.000000e+00 : f32
74//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32>
75//       CHECK:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32>
76//       CHECK:   %[[Q:.*]] =  dim %arg0, %c2 : memref<?x?x?x?xf32>
77//       CHECK:   %[[K:.*]] =  dim %arg0, %c3 : memref<?x?x?x?xf32>
78//       CHECK:   %[[B:.*]] =  dim %arg1, %c0 : memref<?x?x?x?xf32>
79//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32>
80//       CHECK:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32>
81//       CHECK:   affine.for %{{.*}} = 0 to %[[B]] {
82//       CHECK:     affine.for %{{.*}} = 0 to %[[X0]] {
83//       CHECK:       affine.for %{{.*}} = 0 to %[[X1]] {
84//       CHECK:         affine.for %{{.*}} = 0 to %[[K]] {
85//       CHECK:           affine.for %{{.*}} = 0 to %[[Q]] {
86//       CHECK:             affine.for %{{.*}} = 0 to %[[Z0]] {
87//       CHECK:               affine.for %{{.*}} = 0 to %[[Z1]] {
88//       CHECK:                 %[[SUM0:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
89//       CHECK:                 %[[SUM1:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
90//       CHECK:                 %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[SUM0]])
91//       CHECK:                 %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[SUM1]])
92// Padded conv involves an affine.max in the memory access and this is not
93// allowed by affine.load. Use std.load in such cases.
94//       CHECK:                 %{{.*}} = load %{{.*}}[%{{.*}}, %[[IDX]], %[[IDY]], %{{.*}}] : memref<?x?x?x?xf32>
95//       CHECK:                 %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : f32
96//       CHECK:                 %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32>
97//       CHECK:                 %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
98//       CHECK:                 %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32>
99//       CHECK:                 %{{.*}} = addf %{{.*}}, %{{.*}} : f32
100//       CHECK:                 affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32>
101
102//----------------------------------------------------------------------------//
103// Named ops to loops.
104//----------------------------------------------------------------------------//
105func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
106  linalg.batch_matmul ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
107                     outs(%C : memref<?x?x?xf32>)
108  return
109}
110// CHECK-LABEL: @named_batch_matmul
111//  CHECK-SAME: %[[mA:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
112//  CHECK-SAME: %[[mB:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
113//  CHECK-SAME: %[[mC:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
114//       CHECK: %[[B:.*]] = dim %[[mA]], %c0 : memref<?x?x?xf32>
115//       CHECK: %[[M:.*]] = dim %[[mA]], %c1 : memref<?x?x?xf32>
116//       CHECK: %[[K:.*]] = dim %[[mA]], %c2 : memref<?x?x?xf32>
117//       CHECK: %[[N:.*]] = dim %[[mB]], %c2 : memref<?x?x?xf32>
118//       CHECK: affine.for %[[b:.*]] = 0 to %[[B]] {
119//       CHECK:   affine.for %[[m:.*]] = 0 to %[[M]] {
120//       CHECK:     affine.for %[[n:.*]] = 0 to %[[N]] {
121//       CHECK:       affine.for %[[k:.*]] = 0 to %[[K]] {
122//       CHECK:       %[[va:.*]] = affine.load %[[mA]][%[[b]], %[[m]], %[[k]]] : memref<?x?x?xf32>
123//       CHECK:       %[[vb:.*]] = affine.load %[[mB]][%[[b]], %[[k]], %[[n]]] : memref<?x?x?xf32>
124//       CHECK:       %[[vc:.*]] = affine.load %[[mC]][%[[b]], %[[m]], %[[n]]] : memref<?x?x?xf32>
125//       CHECK:       %[[inc:.*]] = mulf %[[va]], %[[vb]] : f32
126//       CHECK:       %[[res:.*]] = addf %[[vc]], %[[inc]] : f32
127//       CHECK:       affine.store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref<?x?x?xf32>
128
129// CHECK-LABEL: func @pooling_max_min
130func @pooling_max_min(%arg0: memref<?x?xf32>,
131                      %arg1: memref<?x?xi32>,
132                      %arg2: memref<?x?xf32>) {
133  linalg.pooling_max(%arg0, %arg1, %arg2) { strides = [2, 1] }:
134    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
135  linalg.pooling_min(%arg0, %arg1, %arg2) { strides = [2, 1] }:
136    memref<?x?xf32>, memref<?x?xi32>, memref<?x?xf32>
137  return
138}
139// This is a basic check to make sure the right load/stores are used. loops.mlir
140// checks for the rest.
141// CHECK:      affine.load
142// CHECK-NEXT: affine.load
143// CHECK-NEXT: cmpf
144// CHECK-NEXT: select
145// CHECK-NEXT: affine.store
146// The min pooling body.
147// CHECK:      affine.load
148// CHECK-NEXT: affine.load
149// CHECK-NEXT: cmpf
150// CHECK-NEXT: select
151// CHECK-NEXT: affine.store
152