1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s 3 4; The test checks that there is no assert caused by issue described in PR35432 5 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7target triple = "x86_64-unknown-linux-gnu" 8 9@a = common local_unnamed_addr global [192 x [192 x i32]] zeroinitializer, align 16 10 11; Function Attrs: nounwind uwtable 12define i32 @main() local_unnamed_addr #0 { 13; CHECK-LABEL: @main( 14; CHECK-NEXT: entry: 15; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 16; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 17; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[I]] to i8* 18; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP0]]) 19; CHECK-NEXT: store i32 0, i32* [[I]], align 4 20; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[S]] to i8* 21; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull [[TMP1]]) 22; CHECK-NEXT: [[CALL:%.*]] = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull [[I]]) 23; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 24; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP2]] to i16 25; CHECK-NEXT: store i16 [[STOREMERGE6]], i16* [[S]], align 2 26; CHECK-NEXT: [[CONV17:%.*]] = and i32 [[TMP2]], 65472 27; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[CONV17]], 0 28; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END12:%.*]] 29; CHECK: for.body.lr.ph: 30; CHECK-NEXT: br label [[FOR_BODY:%.*]] 31; CHECK: for.body: 32; CHECK-NEXT: [[STOREMERGE_IN9:%.*]] = phi i32 [ [[TMP2]], [[FOR_BODY_LR_PH]] ], [ [[ADD:%.*]], [[FOR_INC9:%.*]] ] 33; CHECK-NEXT: [[CONV52:%.*]] = and i32 [[STOREMERGE_IN9]], 255 34; CHECK-NEXT: [[CMP63:%.*]] = icmp ult i32 [[TMP2]], [[CONV52]] 35; CHECK-NEXT: br i1 [[CMP63]], label [[FOR_BODY8_LR_PH:%.*]], label [[FOR_INC9]] 36; CHECK: for.body8.lr.ph: 37; CHECK-NEXT: [[CONV3:%.*]] = trunc i32 [[STOREMERGE_IN9]] to i8 38; CHECK-NEXT: [[DOTPROMOTED:%.*]] = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 39; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[CONV3]], -1 40; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 41; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], 1 42; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i32 [[TMP2]], [[TMP4]] 43; CHECK-NEXT: [[UMIN:%.*]] = select i1 [[TMP6]], i32 [[TMP2]], i32 [[TMP4]] 44; CHECK-NEXT: [[TMP7:%.*]] = sub i32 [[TMP5]], [[UMIN]] 45; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP7]], 8 46; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] 47; CHECK: vector.scevcheck: 48; CHECK-NEXT: [[TMP8:%.*]] = add i8 [[CONV3]], -1 49; CHECK-NEXT: [[TMP9:%.*]] = zext i8 [[TMP8]] to i32 50; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i32 [[TMP2]], [[TMP9]] 51; CHECK-NEXT: [[UMIN1:%.*]] = select i1 [[TMP10]], i32 [[TMP2]], i32 [[TMP9]] 52; CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[UMIN1]] 53; CHECK-NEXT: [[TMP12:%.*]] = trunc i32 [[TMP11]] to i8 54; CHECK-NEXT: [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP12]]) 55; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0 56; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1 57; CHECK-NEXT: [[TMP13:%.*]] = add i8 [[TMP8]], [[MUL_RESULT]] 58; CHECK-NEXT: [[TMP14:%.*]] = sub i8 [[TMP8]], [[MUL_RESULT]] 59; CHECK-NEXT: [[TMP15:%.*]] = icmp ugt i8 [[TMP14]], [[TMP8]] 60; CHECK-NEXT: [[TMP16:%.*]] = icmp ult i8 [[TMP13]], [[TMP8]] 61; CHECK-NEXT: [[TMP17:%.*]] = select i1 true, i1 [[TMP15]], i1 [[TMP16]] 62; CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i32 [[TMP11]], 255 63; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP17]], [[TMP18]] 64; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP19]], [[MUL_OVERFLOW]] 65; CHECK-NEXT: [[TMP21:%.*]] = or i1 false, [[TMP20]] 66; CHECK-NEXT: br i1 [[TMP21]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 67; CHECK: vector.ph: 68; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP7]], 8 69; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP7]], [[N_MOD_VF]] 70; CHECK-NEXT: [[CAST_CRD:%.*]] = trunc i32 [[N_VEC]] to i8 71; CHECK-NEXT: [[IND_END:%.*]] = sub i8 [[CONV3]], [[CAST_CRD]] 72; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[DOTPROMOTED]], i32 0 73; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 74; CHECK: vector.body: 75; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 76; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP22]], [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] 77; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] 78; CHECK-NEXT: [[TMP23:%.*]] = trunc i32 [[INDEX]] to i8 79; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i8 [[CONV3]], [[TMP23]] 80; CHECK-NEXT: [[TMP24:%.*]] = add i8 [[OFFSET_IDX]], 0 81; CHECK-NEXT: [[TMP25:%.*]] = add i8 [[OFFSET_IDX]], -4 82; CHECK-NEXT: [[TMP26]] = add <4 x i32> [[VEC_PHI]], <i32 1, i32 1, i32 1, i32 1> 83; CHECK-NEXT: [[TMP27]] = add <4 x i32> [[VEC_PHI2]], <i32 1, i32 1, i32 1, i32 1> 84; CHECK-NEXT: [[TMP28:%.*]] = add i8 [[TMP24]], -1 85; CHECK-NEXT: [[TMP29:%.*]] = add i8 [[TMP25]], -1 86; CHECK-NEXT: [[TMP30:%.*]] = zext i8 [[TMP28]] to i32 87; CHECK-NEXT: [[TMP31:%.*]] = zext i8 [[TMP29]] to i32 88; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8 89; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] 90; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] 91; CHECK: middle.block: 92; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP27]], [[TMP26]] 93; CHECK-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) 94; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP7]], [[N_VEC]] 95; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 96; CHECK: scalar.ph: 97; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[CONV3]], [[FOR_BODY8_LR_PH]] ], [ [[CONV3]], [[VECTOR_SCEVCHECK]] ] 98; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[DOTPROMOTED]], [[FOR_BODY8_LR_PH]] ], [ [[DOTPROMOTED]], [[VECTOR_SCEVCHECK]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ] 99; CHECK-NEXT: br label [[FOR_BODY8:%.*]] 100; CHECK: for.body8: 101; CHECK-NEXT: [[INC5:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY8]] ] 102; CHECK-NEXT: [[C_04:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[FOR_BODY8]] ] 103; CHECK-NEXT: [[INC]] = add i32 [[INC5]], 1 104; CHECK-NEXT: [[DEC]] = add i8 [[C_04]], -1 105; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[DEC]] to i32 106; CHECK-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP2]], [[CONV5]] 107; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY8]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE]], [[LOOP2:!llvm.loop !.*]] 108; CHECK: for.cond4.for.inc9_crit_edge: 109; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[FOR_BODY8]] ], [ [[TMP33]], [[MIDDLE_BLOCK]] ] 110; CHECK-NEXT: store i32 [[INC_LCSSA]], i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 111; CHECK-NEXT: br label [[FOR_INC9]] 112; CHECK: for.inc9: 113; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[STOREMERGE_IN9]], 65535 114; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[CONV10]], 1 115; CHECK-NEXT: [[CONV1:%.*]] = and i32 [[ADD]], 65472 116; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CONV1]], 0 117; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END12_CRIT_EDGE:%.*]] 118; CHECK: for.cond.for.end12_crit_edge: 119; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INC9]] ] 120; CHECK-NEXT: [[STOREMERGE:%.*]] = trunc i32 [[ADD_LCSSA]] to i16 121; CHECK-NEXT: store i16 [[STOREMERGE]], i16* [[S]], align 2 122; CHECK-NEXT: br label [[FOR_END12]] 123; CHECK: for.end12: 124; CHECK-NEXT: [[CALL13:%.*]] = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull [[S]]) 125; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull [[TMP1]]) 126; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP0]]) 127; CHECK-NEXT: ret i32 0 128; 129entry: 130 %i = alloca i32, align 4 131 %s = alloca i16, align 2 132 %0 = bitcast i32* %i to i8* 133 call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0) #3 134 store i32 0, i32* %i, align 4 135 %1 = bitcast i16* %s to i8* 136 call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %1) #3 137 %call = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull %i) #3 138 %2 = load i32, i32* %i, align 4 139 %storemerge6 = trunc i32 %2 to i16 140 store i16 %storemerge6, i16* %s, align 2 141 %conv17 = and i32 %2, 65472 142 %cmp8 = icmp eq i32 %conv17, 0 143 br i1 %cmp8, label %for.body.lr.ph, label %for.end12 144 145for.body.lr.ph: ; preds = %entry 146 br label %for.body 147 148for.body: ; preds = %for.body.lr.ph, %for.inc9 149 %storemerge.in9 = phi i32 [ %2, %for.body.lr.ph ], [ %add, %for.inc9 ] 150 %conv52 = and i32 %storemerge.in9, 255 151 %cmp63 = icmp ult i32 %2, %conv52 152 br i1 %cmp63, label %for.body8.lr.ph, label %for.inc9 153 154for.body8.lr.ph: ; preds = %for.body 155 %conv3 = trunc i32 %storemerge.in9 to i8 156 %.promoted = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 157 br label %for.body8 158 159for.body8: ; preds = %for.body8.lr.ph, %for.body8 160 %inc5 = phi i32 [ %.promoted, %for.body8.lr.ph ], [ %inc, %for.body8 ] 161 %c.04 = phi i8 [ %conv3, %for.body8.lr.ph ], [ %dec, %for.body8 ] 162 %inc = add i32 %inc5, 1 163 %dec = add i8 %c.04, -1 164 %conv5 = zext i8 %dec to i32 165 %cmp6 = icmp ult i32 %2, %conv5 166 br i1 %cmp6, label %for.body8, label %for.cond4.for.inc9_crit_edge 167 168for.cond4.for.inc9_crit_edge: ; preds = %for.body8 169 %inc.lcssa = phi i32 [ %inc, %for.body8 ] 170 store i32 %inc.lcssa, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 171 br label %for.inc9 172 173for.inc9: ; preds = %for.cond4.for.inc9_crit_edge, %for.body 174 %conv10 = and i32 %storemerge.in9, 65535 175 %add = add nuw nsw i32 %conv10, 1 176 %conv1 = and i32 %add, 65472 177 %cmp = icmp eq i32 %conv1, 0 178 br i1 %cmp, label %for.body, label %for.cond.for.end12_crit_edge 179 180for.cond.for.end12_crit_edge: ; preds = %for.inc9 181 %add.lcssa = phi i32 [ %add, %for.inc9 ] 182 %storemerge = trunc i32 %add.lcssa to i16 183 store i16 %storemerge, i16* %s, align 2 184 br label %for.end12 185 186for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry 187 %call13 = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull %s) #3 188 call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %1) #3 189 call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0) #3 190 ret i32 0 191} 192 193; Function Attrs: argmemonly nounwind 194declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 195 196declare i32 @goo(...) local_unnamed_addr #2 197 198declare i32 @foo(...) local_unnamed_addr #2 199 200; Function Attrs: argmemonly nounwind 201declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 202