1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 3; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 4 5target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 6 7; For this test we used to optimize the <i1 true, i1 false, i1 false, i1 true> 8; mask into <i32 2147483648, i32 0, i32 0, i32 2147483648> because we thought 9; we would lower that into a blend where only the high bit is relevant. 10; However, since the whole mask is constant, this is simplified incorrectly 11; by the generic code, because it was expecting -1 in place of 2147483648. 12; 13; The problem does not occur without AVX, because vselect of v4i32 is not legal 14; nor custom. 15; 16; <rdar://problem/18675020> 17 18define void @test(<4 x i16>* %a, <4 x i16>* %b) { 19; AVX-LABEL: test: 20; AVX: ## %bb.0: ## %body 21; AVX-NEXT: movq {{.*}}(%rip), %rax 22; AVX-NEXT: movq %rax, (%rdi) 23; AVX-NEXT: movq {{.*}}(%rip), %rax 24; AVX-NEXT: movq %rax, (%rsi) 25; AVX-NEXT: retq 26body: 27 %predphi = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -3, i16 545, i16 4385, i16 14807>, <4 x i16> <i16 123, i16 124, i16 125, i16 127> 28 %predphi42 = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer 29 store <4 x i16> %predphi, <4 x i16>* %a, align 8 30 store <4 x i16> %predphi42, <4 x i16>* %b, align 8 31 ret void 32} 33 34; Improve code coverage. 35; 36; When shrinking the condition used into the select to match a blend, this 37; test case exercises the path where the modified node is not the root 38; of the condition. 39 40define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) { 41; AVX1-LABEL: test2: 42; AVX1: ## %bb.0: ## %bb 43; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 44; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 45; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 46; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 47; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 48; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 49; AVX1-NEXT: movq (%rdi,%rsi,8), %rax 50; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [5.000000e-01,5.000000e-01,5.000000e-01,5.000000e-01] 51; AVX1-NEXT: vblendvpd %ymm0, {{.*}}(%rip), %ymm1, %ymm0 52; AVX1-NEXT: vmovupd %ymm0, (%rax) 53; AVX1-NEXT: vzeroupper 54; AVX1-NEXT: retq 55; 56; AVX2-LABEL: test2: 57; AVX2: ## %bb.0: ## %bb 58; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 59; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 60; AVX2-NEXT: movq (%rdi,%rsi,8), %rax 61; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-0.5,-0.5,-0.5,-0.5] 62; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.5,0.5,0.5,0.5] 63; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0 64; AVX2-NEXT: vmovupd %ymm0, (%rax) 65; AVX2-NEXT: vzeroupper 66; AVX2-NEXT: retq 67bb: 68 %arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198 69 %tmp1888 = load double*, double** %arrayidx1928, align 8 70 %predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01> 71 %tmp1900 = bitcast double* %tmp1888 to <4 x double>* 72 store <4 x double> %predphi.v.v, <4 x double>* %tmp1900, align 8 73 ret void 74} 75 76; For this test, we used to optimized the conditional mask for the blend, i.e., 77; we shrunk some of its bits. 78; However, this same mask was used in another select (%predphi31) that turned out 79; to be optimized into a and. In that case, the conditional mask was wrong. 80; 81; Make sure that the and is fed by the original mask. 82; 83; <rdar://problem/18819506> 84 85define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { 86; AVX1-LABEL: test3: 87; AVX1: ## %bb.0: 88; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 89; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766] 90; AVX1-NEXT: vpmuldq %xmm4, %xmm3, %xmm3 91; AVX1-NEXT: vpmuldq %xmm4, %xmm0, %xmm4 92; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 93; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] 94; AVX1-NEXT: vpsrld $31, %xmm3, %xmm4 95; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 96; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3 97; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0 98; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 99; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 100; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1 101; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 102; AVX1-NEXT: vmovq %xmm0, (%rdi) 103; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 104; AVX1-NEXT: vmovq %xmm0, (%rsi) 105; AVX1-NEXT: retq 106; 107; AVX2-LABEL: test3: 108; AVX2: ## %bb.0: 109; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766] 110; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] 111; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] 112; AVX2-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 113; AVX2-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 114; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 115; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3] 116; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4 117; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 118; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3] 119; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3 120; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0 121; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 122; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 123; AVX2-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1 124; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 125; AVX2-NEXT: vmovq %xmm0, (%rdi) 126; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 127; AVX2-NEXT: vmovq %xmm0, (%rsi) 128; AVX2-NEXT: retq 129 %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3> 130 %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer 131 %predphi = select <4 x i1> %tmp7, <4 x i16> %tmp3, <4 x i16> %tmp12 132 %predphi31 = select <4 x i1> %tmp7, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer 133 134 store <4 x i16> %predphi31, <4 x i16>* %tmp16, align 8 135 store <4 x i16> %predphi, <4 x i16>* %tmp17, align 8 136 ret void 137} 138 139; We shouldn't try to lower this directly using VSELECT because we don't have 140; vpblendvb in AVX1, only in AVX2. Instead, it should be expanded. 141 142define <32 x i8> @PR22706(<32 x i1> %x) { 143; AVX1-LABEL: PR22706: 144; AVX1: ## %bb.0: 145; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 146; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1 147; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] 148; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 149; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 150; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 151; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] 152; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1 153; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 154; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 155; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 156; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0 157; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 158; AVX1-NEXT: retq 159; 160; AVX2-LABEL: PR22706: 161; AVX2: ## %bb.0: 162; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 163; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 164; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 165; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 166; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 167; AVX2-NEXT: retq 168 %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2> 169 ret <32 x i8> %tmp 170} 171