• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true -debug < %s 2>&1 | FileCheck %s
2; REQUIRES: asserts
3;
4;    /* C := alpha*A*B + beta*C */
5;    for (i = 0; i < _PB_NI; i++)
6;      for (j = 0; j < _PB_NJ; j += 2)
7;        {
8;	   C[i][j] *= beta;
9;	   for (k = 0; k < _PB_NK; ++k)
10;	     C[i][j] += alpha * A[i][k] * B[k][j];
11;        }
12;
13; Check that we won’t detect the matrix multiplication pattern,
14; if, for example, there are memory accesses that have stride 2
15; after the interchanging of loops.
16;
17; CHECK-NOT: The matrix multiplication pattern was detected
18;
19target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
20target triple = "x86_64-unknown-unknown"
21
22define internal void @kernel_gemm(i32 %arg, i32 %arg1, i32 %arg2, double %arg3, double %arg4, [1056 x double]* %arg5, [1024 x double]* %arg6, [1056 x double]* %arg7) #0 {
23bb:
24  br label %bb8
25
26bb8:                                              ; preds = %bb29, %bb
27  %tmp = phi i64 [ 0, %bb ], [ %tmp30, %bb29 ]
28  br label %bb9
29
30bb9:                                              ; preds = %bb26, %bb8
31  %tmp10 = phi i64 [ 0, %bb8 ], [ %tmp27, %bb26 ]
32  %tmp11 = getelementptr inbounds [1056 x double], [1056 x double]* %arg5, i64 %tmp, i64 %tmp10
33  %tmp12 = load double, double* %tmp11, align 8
34  %tmp13 = fmul double %tmp12, %arg4
35  store double %tmp13, double* %tmp11, align 8
36  br label %Copy_0
37
38Copy_0:                                             ; preds = %Copy_0, %bb9
39  %tmp15 = phi i64 [ 0, %bb9 ], [ %tmp24, %Copy_0 ]
40  %tmp16 = getelementptr inbounds [1024 x double], [1024 x double]* %arg6, i64 %tmp, i64 %tmp15
41  %tmp17 = load double, double* %tmp16, align 8
42  %tmp18 = fmul double %tmp17, %arg3
43  %tmp19 = getelementptr inbounds [1056 x double], [1056 x double]* %arg7, i64 %tmp15, i64 %tmp10
44  %tmp20 = load double, double* %tmp19, align 8
45  %tmp21 = fmul double %tmp18, %tmp20
46  %tmp22 = load double, double* %tmp11, align 8
47  %tmp23 = fadd double %tmp22, %tmp21
48  store double %tmp23, double* %tmp11, align 8
49  %tmp24 = add nuw nsw i64 %tmp15, 1
50  %tmp25 = icmp ne i64 %tmp24, 1024
51  br i1 %tmp25, label %Copy_0, label %bb26
52
53bb26:                                             ; preds = %Copy_0
54  %tmp27 = add nuw nsw i64 %tmp10, 2
55  %tmp28 = icmp ne i64 %tmp27, 1056
56  br i1 %tmp28, label %bb9, label %bb29
57
58bb29:                                             ; preds = %bb26
59  %tmp30 = add nuw nsw i64 %tmp, 1
60  %tmp31 = icmp ne i64 %tmp30, 1056
61  br i1 %tmp31, label %bb8, label %bb32
62
63bb32:                                             ; preds = %bb29
64  ret void
65}
66