1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx | FileCheck %s 3target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 4 5; For this test we used to optimize the <i1 true, i1 false, i1 false, i1 true> 6; mask into <i32 2147483648, i32 0, i32 0, i32 2147483648> because we thought 7; we would lower that into a blend where only the high bit is relevant. 8; However, since the whole mask is constant, this is simplified incorrectly 9; by the generic code, because it was expecting -1 in place of 2147483648. 10; 11; The problem does not occur without AVX, because vselect of v4i32 is not legal 12; nor custom. 13; 14; <rdar://problem/18675020> 15 16define void @test(<4 x i16>* %a, <4 x i16>* %b) { 17; CHECK-LABEL: test: 18; CHECK: ## BB#0: ## %body 19; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 20; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [65533,124,125,14807] 21; CHECK-NEXT: vpshufb %xmm0, %xmm1, %xmm1 22; CHECK-NEXT: vmovq %xmm1, (%rdi) 23; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,65535] 24; CHECK-NEXT: vpshufb %xmm0, %xmm1, %xmm0 25; CHECK-NEXT: vmovq %xmm0, (%rsi) 26; CHECK-NEXT: retq 27body: 28 %predphi = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -3, i16 545, i16 4385, i16 14807>, <4 x i16> <i16 123, i16 124, i16 125, i16 127> 29 %predphi42 = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer 30 store <4 x i16> %predphi, <4 x i16>* %a, align 8 31 store <4 x i16> %predphi42, <4 x i16>* %b, align 8 32 ret void 33} 34 35; Improve code coverage. 36; 37; When shrinking the condition used into the select to match a blend, this 38; test case exercises the path where the modified node is not the root 39; of the condition. 40 41define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) { 42; CHECK-LABEL: test2: 43; CHECK: ## BB#0: ## %bb 44; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 45; CHECK-NEXT: vpsrad $31, %xmm0, %xmm0 46; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 47; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 48; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 49; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 50; CHECK-NEXT: movq (%rdi,%rsi,8), %rax 51; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [5.000000e-01,5.000000e-01,5.000000e-01,5.000000e-01] 52; CHECK-NEXT: vblendvpd %ymm0, {{.*}}(%rip), %ymm1, %ymm0 53; CHECK-NEXT: vmovupd %ymm0, (%rax) 54; CHECK-NEXT: vzeroupper 55; CHECK-NEXT: retq 56bb: 57 %arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198 58 %tmp1888 = load double*, double** %arrayidx1928, align 8 59 %predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01> 60 %tmp1900 = bitcast double* %tmp1888 to <4 x double>* 61 store <4 x double> %predphi.v.v, <4 x double>* %tmp1900, align 8 62 ret void 63} 64 65; For this test, we used to optimized the conditional mask for the blend, i.e., 66; we shrunk some of its bits. 67; However, this same mask was used in another select (%predphi31) that turned out 68; to be optimized into a and. In that case, the conditional mask was wrong. 69; 70; Make sure that the and is fed by the original mask. 71; 72; <rdar://problem/18819506> 73 74define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { 75; CHECK-LABEL: test3: 76; CHECK: ## BB#0: 77; CHECK-NEXT: vmovdqa {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766] 78; CHECK-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] 79; CHECK-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] 80; CHECK-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 81; CHECK-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 82; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] 83; CHECK-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7] 84; CHECK-NEXT: vpsrld $31, %xmm3, %xmm4 85; CHECK-NEXT: vpaddd %xmm4, %xmm3, %xmm3 86; CHECK-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3 87; CHECK-NEXT: vpsubd %xmm3, %xmm0, %xmm0 88; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 89; CHECK-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 90; CHECK-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1 91; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 92; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0 93; CHECK-NEXT: vmovq %xmm0, (%rdi) 94; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm0 95; CHECK-NEXT: vmovq %xmm0, (%rsi) 96; CHECK-NEXT: retq 97 %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3> 98 %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer 99 %predphi = select <4 x i1> %tmp7, <4 x i16> %tmp3, <4 x i16> %tmp12 100 %predphi31 = select <4 x i1> %tmp7, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer 101 102 store <4 x i16> %predphi31, <4 x i16>* %tmp16, align 8 103 store <4 x i16> %predphi, <4 x i16>* %tmp17, align 8 104 ret void 105} 106 107; We shouldn't try to lower this directly using VSELECT because we don't have 108; vpblendvb in AVX1, only in AVX2. Instead, it should be expanded. 109 110define <32 x i8> @PR22706(<32 x i1> %x) { 111; CHECK-LABEL: PR22706: 112; CHECK: ## BB#0: 113; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 114; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 115; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] 116; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1 117; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 118; CHECK-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 119; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0 120; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0 121; CHECK-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 122; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 123; CHECK-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1 124; CHECK-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 125; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 126; CHECK-NEXT: retq 127 %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2> 128 ret <32 x i8> %tmp 129} 130