1; This test contains extremely tricky call graph structures for the inliner to 2; handle correctly. They form cycles where the inliner introduces code that is 3; immediately or can eventually be transformed back into the original code. And 4; each step changes the call graph and so will trigger iteration. This requires 5; some out-of-band way to prevent infinitely re-inlining and re-transforming the 6; code. 7; 8; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -S | FileCheck %s 9 10 11; The `test1_*` collection of functions form a directly cycling pattern. 12 13define void @test1_a(i8** %ptr) { 14; CHECK-LABEL: define void @test1_a( 15entry: 16 call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 0) 17; Inlining and simplifying this call will reliably produce the exact same call, 18; over and over again. However, each inlining increments the count, and so we 19; expect this test case to stop after one round of inlining with a final 20; argument of '1'. 21; CHECK-NOT: call 22; CHECK: call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 1) 23; CHECK-NOT: call 24 25 ret void 26} 27 28define void @test1_b(i8* %arg, i1 %flag, i32 %inline_count) { 29; CHECK-LABEL: define void @test1_b( 30entry: 31 %a = alloca i8* 32 store i8* %arg, i8** %a 33; This alloca and store should remain through any optimization. 34; CHECK: %[[A:.*]] = alloca 35; CHECK: store i8* %arg, i8** %[[A]] 36 37 br i1 %flag, label %bb1, label %bb2 38 39bb1: 40 call void @test1_a(i8** %a) noinline 41 br label %bb2 42 43bb2: 44 %cast = bitcast i8** %a to void (i8*, i1, i32)** 45 %p = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %cast 46 %inline_count_inc = add i32 %inline_count, 1 47 call void %p(i8* %arg, i1 %flag, i32 %inline_count_inc) 48; And we should continue to load and call indirectly through optimization. 49; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i1, i32)** 50; CHECK: %[[P:.*]] = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %[[CAST]] 51; CHECK: call void %[[P]]( 52 53 ret void 54} 55 56define void @test2_a(i8** %ptr) { 57; CHECK-LABEL: define void @test2_a( 58entry: 59 call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 0) 60; Inlining and simplifying this call will reliably produce the exact same call, 61; but only after doing two rounds if inlining, first from @test2_b then 62; @test2_c. We check the exact number of inlining rounds before we cut off to 63; break the cycle by inspecting the last paramater that gets incremented with 64; each inlined function body. 65; CHECK-NOT: call 66; CHECK: call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 2) 67; CHECK-NOT: call 68 ret void 69} 70 71define void @test2_b(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) { 72; CHECK-LABEL: define void @test2_b( 73entry: 74 %a = alloca i8* 75 store i8* %arg2, i8** %a 76; This alloca and store should remain through any optimization. 77; CHECK: %[[A:.*]] = alloca 78; CHECK: store i8* %arg2, i8** %[[A]] 79 80 br i1 %flag, label %bb1, label %bb2 81 82bb1: 83 call void @test2_a(i8** %a) noinline 84 br label %bb2 85 86bb2: 87 %p = load i8*, i8** %a 88 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)* 89 %inline_count_inc = add i32 %inline_count, 1 90 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc) 91; And we should continue to load and call indirectly through optimization. 92; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)** 93; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]] 94; CHECK: call void %[[P]]( 95 96 ret void 97} 98 99define void @test2_c(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) { 100; CHECK-LABEL: define void @test2_c( 101entry: 102 %a = alloca i8* 103 store i8* %arg1, i8** %a 104; This alloca and store should remain through any optimization. 105; CHECK: %[[A:.*]] = alloca 106; CHECK: store i8* %arg1, i8** %[[A]] 107 108 br i1 %flag, label %bb1, label %bb2 109 110bb1: 111 call void @test2_a(i8** %a) noinline 112 br label %bb2 113 114bb2: 115 %p = load i8*, i8** %a 116 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)* 117 %inline_count_inc = add i32 %inline_count, 1 118 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc) 119; And we should continue to load and call indirectly through optimization. 120; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)** 121; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]] 122; CHECK: call void %[[P]]( 123 124 ret void 125} 126