1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s 3 4define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 { 5; CHECK-LABEL: commute_fold_vpblendw_128: 6; CHECK: # BB#0: 7; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7] 8; CHECK-NEXT: retq 9 %1 = load <8 x i16>, <8 x i16>* %b 10 %2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17) 11 ret <8 x i16> %2 12} 13declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone 14 15define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 { 16; CHECK-LABEL: commute_fold_vpblendw_256: 17; CHECK: # BB#0: 18; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5,6,7],ymm0[8],mem[9,10,11],ymm0[12],mem[13,14,15] 19; CHECK-NEXT: retq 20 %1 = load <16 x i16>, <16 x i16>* %b 21 %2 = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %1, <16 x i16> %a, i8 17) 22 ret <16 x i16> %2 23} 24declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone 25 26define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 { 27; CHECK-LABEL: commute_fold_vpblendd_128: 28; CHECK: # BB#0: 29; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],mem[1,2,3] 30; CHECK-NEXT: retq 31 %1 = load <4 x i32>, <4 x i32>* %b 32 %2 = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %1, <4 x i32> %a, i8 1) 33 ret <4 x i32> %2 34} 35declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind readnone 36 37define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 { 38; CHECK-LABEL: commute_fold_vpblendd_256: 39; CHECK: # BB#0: 40; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6],ymm0[7] 41; CHECK-NEXT: retq 42 %1 = load <8 x i32>, <8 x i32>* %b 43 %2 = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %1, <8 x i32> %a, i8 129) 44 ret <8 x i32> %2 45} 46declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind readnone 47 48define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 { 49; CHECK-LABEL: commute_fold_vblendps_128: 50; CHECK: # BB#0: 51; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3] 52; CHECK-NEXT: retq 53 %1 = load <4 x float>, <4 x float>* %b 54 %2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 5) 55 ret <4 x float> %2 56} 57declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone 58 59define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 { 60; CHECK-LABEL: commute_fold_vblendps_256: 61; CHECK: # BB#0: 62; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3,4,5,6,7] 63; CHECK-NEXT: retq 64 %1 = load <8 x float>, <8 x float>* %b 65 %2 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %1, <8 x float> %a, i8 7) 66 ret <8 x float> %2 67} 68declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone 69 70define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 { 71; CHECK-LABEL: commute_fold_vblendpd_128: 72; CHECK: # BB#0: 73; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] 74; CHECK-NEXT: retq 75 %1 = load <2 x double>, <2 x double>* %b 76 %2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1) 77 ret <2 x double> %2 78} 79declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nounwind readnone 80 81define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 { 82; CHECK-LABEL: commute_fold_vblendpd_256: 83; CHECK: # BB#0: 84; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],mem[3] 85; CHECK-NEXT: retq 86 %1 = load <4 x double>, <4 x double>* %b 87 %2 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %1, <4 x double> %a, i8 7) 88 ret <4 x double> %2 89} 90declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone 91