• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -basicaa -loop-rotate -licm -instcombine -indvars -loop-unroll -S %s | FileCheck %s
2;
3; PR18361: ScalarEvolution::getAddRecExpr():
4;          Assertion `isLoopInvariant(Operands[i],...
5;
6; After a series of loop optimizations, SCEV's LoopDispositions grow stale.
7; In particular, LoopSimplify hoists %cmp4, resulting in this SCEV for %add:
8; {(zext i1 %cmp4 to i32),+,1}<nw><%for.cond1.preheader>
9;
10; When recomputing the SCEV for %ashr, we truncate the operands to get:
11; (zext i1 %cmp4 to i16)
12;
13; This SCEV was never mapped to a value so never invalidated. It's
14; loop disposition is still marked as non-loop-invariant, which is
15; inconsistent with the AddRec.
16
17target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
18target triple = "x86_64-apple-macosx"
19
20@d = common global i32 0, align 4
21@a = common global i32 0, align 4
22@c = common global i32 0, align 4
23@b = common global i32 0, align 4
24
25; Check that the def-use chain that leads to the bad SCEV is still
26; there.
27;
28; CHECK-LABEL: @foo
29; CHECK-LABEL: entry:
30; CHECK-LABEL: for.cond1.preheader:
31; CHECK-LABEL: for.body3:
32; CHECK: %cmp4.le.le
33; CHECK: %conv.le.le = zext i1 %cmp4.le.le to i32
34; CHECK: %xor.le.le = xor i32 %conv6.le.le, 1
35define void @foo() {
36entry:
37  br label %for.cond
38
39for.cond:                                         ; preds = %for.inc7, %entry
40  %storemerge = phi i32 [ 0, %entry ], [ %inc8, %for.inc7 ]
41  %f.0 = phi i32 [ undef, %entry ], [ %f.1, %for.inc7 ]
42  store i32 %storemerge, i32* @d, align 4
43  %cmp = icmp slt i32 %storemerge, 1
44  br i1 %cmp, label %for.cond1, label %for.end9
45
46for.cond1:                                        ; preds = %for.cond, %for.body3
47  %storemerge1 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
48  %f.1 = phi i32 [ %xor, %for.body3 ], [ %f.0, %for.cond ]
49  store i32 %storemerge1, i32* @a, align 4
50  %cmp2 = icmp slt i32 %storemerge1, 1
51  br i1 %cmp2, label %for.body3, label %for.inc7
52
53for.body3:                                        ; preds = %for.cond1
54  %0 = load i32, i32* @c, align 4
55  %cmp4 = icmp sge i32 %storemerge1, %0
56  %conv = zext i1 %cmp4 to i32
57  %1 = load i32, i32* @d, align 4
58  %add = add nsw i32 %conv, %1
59  %sext = shl i32 %add, 16
60  %conv6 = ashr exact i32 %sext, 16
61  %xor = xor i32 %conv6, 1
62  %inc = add nsw i32 %storemerge1, 1
63  br label %for.cond1
64
65for.inc7:                                         ; preds = %for.cond1
66  %2 = load i32, i32* @d, align 4
67  %inc8 = add nsw i32 %2, 1
68  br label %for.cond
69
70for.end9:                                         ; preds = %for.cond
71  %cmp10 = icmp sgt i32 %f.0, 0
72  br i1 %cmp10, label %if.then, label %if.end
73
74if.then:                                          ; preds = %for.end9
75  store i32 0, i32* @b, align 4
76  br label %if.end
77
78if.end:                                           ; preds = %if.then, %for.end9
79  ret void
80}
81