1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-unroll -mtriple=thumbv7a-unknown-linux-gnueabihf -S %s | FileCheck %s 3 4; Check we unroll even with optsize, if the result is smaller, either because 5; we have single iteration loops or bodies with constant folding opportunities 6; after fully unrolling. 7 8; TODO: Looks like we should enable some unrolling for M-class, even when 9; optimising for size. 10 11declare i32 @get() 12 13define void @fully_unrolled_single_iteration(i32* %src) #0 { 14; CHECK-LABEL: @fully_unrolled_single_iteration( 15; CHECK-NEXT: entry: 16; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4 17; CHECK-NEXT: br label [[FOR_BODY:%.*]] 18; CHECK: for.body: 19; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[SRC:%.*]] 20; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0 21; CHECK-NEXT: store i32 [[V]], i32* [[ARRAYIDX]], align 4 22; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32* 23; CHECK-NEXT: call void @use(i32* nonnull [[PTR]]) 24; CHECK-NEXT: ret void 25; 26entry: 27 %arr = alloca [4 x i32], align 4 28 br label %for.body 29 30for.body: ; preds = %for.body, %entry 31 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 32 %src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv 33 %v = load i32, i32* %src.idx 34 %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv 35 store i32 %v, i32* %arrayidx, align 4 36 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 37 %exitcond = icmp eq i64 %indvars.iv.next, 1 38 br i1 %exitcond, label %for.cond.cleanup, label %for.body 39 40for.cond.cleanup: ; preds = %for.cond 41 %ptr = bitcast [4 x i32]* %arr to i32* 42 call void @use(i32* nonnull %ptr) #4 43 ret void 44} 45 46 47define void @fully_unrolled_smaller() #0 { 48; CHECK-LABEL: @fully_unrolled_smaller( 49; CHECK-NEXT: entry: 50; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4 51; CHECK-NEXT: br label [[FOR_BODY:%.*]] 52; CHECK: for.body: 53; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0 54; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4 55; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1 56; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4 57; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2 58; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4 59; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3 60; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4 61; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32* 62; CHECK-NEXT: call void @use(i32* nonnull [[PTR]]) 63; CHECK-NEXT: ret void 64; 65entry: 66 %arr = alloca [4 x i32], align 4 67 br label %for.body 68 69for.body: ; preds = %for.body, %entry 70 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 71 %indvars.iv.tr = trunc i64 %indvars.iv to i32 72 %shl.0 = shl i32 %indvars.iv.tr, 3 73 %shl.1 = shl i32 16, %shl.0 74 %or = or i32 %shl.1, %shl.0 75 %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv 76 store i32 %or, i32* %arrayidx, align 4 77 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 78 %exitcond = icmp eq i64 %indvars.iv, 3 79 br i1 %exitcond, label %for.cond.cleanup, label %for.body 80 81for.cond.cleanup: ; preds = %for.cond 82 %ptr = bitcast [4 x i32]* %arr to i32* 83 call void @use(i32* nonnull %ptr) #4 84 ret void 85} 86 87define void @fully_unrolled_smaller_Oz() #1 { 88; CHECK-LABEL: @fully_unrolled_smaller_Oz( 89; CHECK-NEXT: entry: 90; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4 91; CHECK-NEXT: br label [[FOR_BODY:%.*]] 92; CHECK: for.body: 93; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0 94; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4 95; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1 96; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4 97; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2 98; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4 99; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3 100; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4 101; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32* 102; CHECK-NEXT: call void @use(i32* nonnull [[PTR]]) 103; CHECK-NEXT: ret void 104; 105entry: 106 %arr = alloca [4 x i32], align 4 107 br label %for.body 108 109for.body: ; preds = %for.body, %entry 110 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 111 %indvars.iv.tr = trunc i64 %indvars.iv to i32 112 %shl.0 = shl i32 %indvars.iv.tr, 3 113 %shl.1 = shl i32 16, %shl.0 114 %or = or i32 %shl.1, %shl.0 115 %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv 116 store i32 %or, i32* %arrayidx, align 4 117 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 118 %exitcond = icmp eq i64 %indvars.iv, 3 119 br i1 %exitcond, label %for.cond.cleanup, label %for.body 120 121for.cond.cleanup: ; preds = %for.cond 122 %ptr = bitcast [4 x i32]* %arr to i32* 123 call void @use(i32* nonnull %ptr) #4 124 ret void 125} 126 127 128define void @fully_unrolled_bigger() #0 { 129; CHECK-LABEL: @fully_unrolled_bigger( 130; CHECK-NEXT: entry: 131; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4 132; CHECK-NEXT: br label [[FOR_BODY:%.*]] 133; CHECK: for.body: 134; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] 135; CHECK-NEXT: [[INDVARS_IV_TR:%.*]] = trunc i64 [[INDVARS_IV]] to i32 136; CHECK-NEXT: [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3 137; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 16, [[SHL_0]] 138; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]] 139; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]] 140; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4 141; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 142; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 7 143; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] 144; CHECK: for.cond.cleanup: 145; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32* 146; CHECK-NEXT: call void @use(i32* nonnull [[PTR]]) 147; CHECK-NEXT: ret void 148; 149entry: 150 %arr = alloca [4 x i32], align 4 151 br label %for.body 152 153for.body: ; preds = %for.body, %entry 154 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 155 %indvars.iv.tr = trunc i64 %indvars.iv to i32 156 %shl.0 = shl i32 %indvars.iv.tr, 3 157 %shl.1 = shl i32 16, %shl.0 158 %or = or i32 %shl.1, %shl.0 159 %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv 160 store i32 %or, i32* %arrayidx, align 4 161 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 162 %exitcond = icmp eq i64 %indvars.iv, 7 163 br i1 %exitcond, label %for.cond.cleanup, label %for.body 164 165for.cond.cleanup: ; preds = %for.cond 166 %ptr = bitcast [4 x i32]* %arr to i32* 167 call void @use(i32* nonnull %ptr) #4 168 ret void 169} 170 171declare void @use(i32*) 172 173attributes #0 = { optsize } 174attributes #1 = { minsize optsize } 175