1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX1 3; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2 4 5define void @powof2div_uniform(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){ 6; CHECK-LABEL: @powof2div_uniform( 7; CHECK-NEXT: entry: 8; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 9; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 1 10; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1 11; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 12; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 2 13; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 14; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 15; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[B]] to <4 x i32>* 16; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 17; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 3 18; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[C]] to <4 x i32>* 19; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 20; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]] 21; CHECK-NEXT: [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], <i32 2, i32 2, i32 2, i32 2> 22; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 23; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[A]] to <4 x i32>* 24; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4 25; CHECK-NEXT: ret void 26; 27entry: 28 %0 = load i32, i32* %b, align 4 29 %1 = load i32, i32* %c, align 4 30 %add = add nsw i32 %1, %0 31 %div = sdiv i32 %add, 2 32 store i32 %div, i32* %a, align 4 33 %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1 34 %2 = load i32, i32* %arrayidx3, align 4 35 %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1 36 %3 = load i32, i32* %arrayidx4, align 4 37 %add5 = add nsw i32 %3, %2 38 %div6 = sdiv i32 %add5, 2 39 %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1 40 store i32 %div6, i32* %arrayidx7, align 4 41 %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2 42 %4 = load i32, i32* %arrayidx8, align 4 43 %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2 44 %5 = load i32, i32* %arrayidx9, align 4 45 %add10 = add nsw i32 %5, %4 46 %div11 = sdiv i32 %add10, 2 47 %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2 48 store i32 %div11, i32* %arrayidx12, align 4 49 %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3 50 %6 = load i32, i32* %arrayidx13, align 4 51 %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3 52 %7 = load i32, i32* %arrayidx14, align 4 53 %add15 = add nsw i32 %7, %6 54 %div16 = sdiv i32 %add15, 2 55 %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3 56 store i32 %div16, i32* %arrayidx17, align 4 57 ret void 58} 59 60define void @powof2div_nonuniform(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){ 61; AVX1-LABEL: @powof2div_nonuniform( 62; AVX1-NEXT: entry: 63; AVX1-NEXT: [[TMP0:%.*]] = load i32, i32* [[B:%.*]], align 4 64; AVX1-NEXT: [[TMP1:%.*]] = load i32, i32* [[C:%.*]], align 4 65; AVX1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] 66; AVX1-NEXT: [[DIV:%.*]] = sdiv i32 [[ADD]], 2 67; AVX1-NEXT: store i32 [[DIV]], i32* [[A:%.*]], align 4 68; AVX1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 1 69; AVX1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4 70; AVX1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 1 71; AVX1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4 72; AVX1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], [[TMP2]] 73; AVX1-NEXT: [[DIV6:%.*]] = sdiv i32 [[ADD5]], 4 74; AVX1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1 75; AVX1-NEXT: store i32 [[DIV6]], i32* [[ARRAYIDX7]], align 4 76; AVX1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 77; AVX1-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 78; AVX1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 2 79; AVX1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 80; AVX1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 81; AVX1-NEXT: [[DIV11:%.*]] = sdiv i32 [[ADD10]], 8 82; AVX1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 83; AVX1-NEXT: store i32 [[DIV11]], i32* [[ARRAYIDX12]], align 4 84; AVX1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 85; AVX1-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4 86; AVX1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 3 87; AVX1-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX14]], align 4 88; AVX1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP7]], [[TMP6]] 89; AVX1-NEXT: [[DIV16:%.*]] = sdiv i32 [[ADD15]], 16 90; AVX1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 91; AVX1-NEXT: store i32 [[DIV16]], i32* [[ARRAYIDX17]], align 4 92; AVX1-NEXT: ret void 93; 94; AVX2-LABEL: @powof2div_nonuniform( 95; AVX2-NEXT: entry: 96; AVX2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 97; AVX2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 1 98; AVX2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 1 99; AVX2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2 100; AVX2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 2 101; AVX2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 102; AVX2-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3 103; AVX2-NEXT: [[TMP0:%.*]] = bitcast i32* [[B]] to <4 x i32>* 104; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 105; AVX2-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 3 106; AVX2-NEXT: [[TMP2:%.*]] = bitcast i32* [[C]] to <4 x i32>* 107; AVX2-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 108; AVX2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]] 109; AVX2-NEXT: [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], <i32 2, i32 4, i32 8, i32 16> 110; AVX2-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3 111; AVX2-NEXT: [[TMP6:%.*]] = bitcast i32* [[A]] to <4 x i32>* 112; AVX2-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4 113; AVX2-NEXT: ret void 114; 115entry: 116 %0 = load i32, i32* %b, align 4 117 %1 = load i32, i32* %c, align 4 118 %add = add nsw i32 %1, %0 119 %div = sdiv i32 %add, 2 120 store i32 %div, i32* %a, align 4 121 %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1 122 %2 = load i32, i32* %arrayidx3, align 4 123 %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1 124 %3 = load i32, i32* %arrayidx4, align 4 125 %add5 = add nsw i32 %3, %2 126 %div6 = sdiv i32 %add5, 4 127 %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1 128 store i32 %div6, i32* %arrayidx7, align 4 129 %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2 130 %4 = load i32, i32* %arrayidx8, align 4 131 %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2 132 %5 = load i32, i32* %arrayidx9, align 4 133 %add10 = add nsw i32 %5, %4 134 %div11 = sdiv i32 %add10, 8 135 %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2 136 store i32 %div11, i32* %arrayidx12, align 4 137 %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3 138 %6 = load i32, i32* %arrayidx13, align 4 139 %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3 140 %7 = load i32, i32* %arrayidx14, align 4 141 %add15 = add nsw i32 %7, %6 142 %div16 = sdiv i32 %add15, 16 143 %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3 144 store i32 %div16, i32* %arrayidx17, align 4 145 ret void 146} 147 148