; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 define <8 x float> @shuffle_v8f32_45670123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45670123: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_45670123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45670123_mem: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,0,1] ; ALL-NEXT: retq entry: %a = load <8 x float>, <8 x float>* %pa %b = load <8 x float>, <8 x float>* %pb %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_0123cdef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_0123cdef: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_01230123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v8f32_01230123: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8f32_01230123: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_01230123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v8f32_01230123_mem: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vmovaps (%rdi), %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8f32_01230123_mem: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,1,0,1] ; AVX2-NEXT: retq entry: %a = load <8 x float>, <8 x float>* %pa %b = load <8 x float>, <8 x float>* %pb %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_45674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45674567: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_45674567_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45674567_mem: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3] ; ALL-NEXT: retq entry: %a = load <8 x float>, <8 x float>* %pa %b = load <8 x float>, <8 x float>* %pb %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <32 x i8> @shuffle_v32i8_2323(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v32i8_2323: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> ret <32 x i8> %shuffle } define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v32i8_2323_domain: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v32i8_2323_domain: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX2-NEXT: retq entry: ; add forces execution domain %a2 = add <32 x i8> %a, %shuffle = shufflevector <32 x i8> %a2, <32 x i8> %b, <32 x i32> ret <32 x i8> %shuffle } define <4 x i64> @shuffle_v4i64_6701(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v4i64_6701: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; ALL-NEXT: retq entry: %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> ret <4 x i64> %shuffle } define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v4i64_6701_domain: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_6701_domain: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; AVX2-NEXT: retq entry: ; add forces execution domain %a2 = add <4 x i64> %a, %shuffle = shufflevector <4 x i64> %a2, <4 x i64> %b, <4 x i32> ret <4 x i64> %shuffle } define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v8i32_u5u7cdef: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8i32_u5u7cdef: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX2-NEXT: retq entry: ; add forces execution domain %a2 = add <8 x i32> %a, %shuffle = shufflevector <8 x i32> %a2, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle } define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v16i16_4501: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq entry: ; add forces execution domain %a2 = add <16 x i16> %a, %shuffle = shufflevector <16 x i16> %a2, <16 x i16> %b, <16 x i32> ret <16 x i16> %shuffle } define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v16i16_4501_mem: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 ; AVX1-NEXT: vmovaps (%rsi), %ymm1 ; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501_mem: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vmovdqa (%rsi), %ymm1 ; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq entry: %c = load <16 x i16>, <16 x i16>* %a %d = load <16 x i16>, <16 x i16>* %b %c2 = add <16 x i16> %c, %shuffle = shufflevector <16 x i16> %c2, <16 x i16> %d, <16 x i32> ret <16 x i16> %shuffle } ;;;; Cases with undef indicies mixed in the mask define <8 x float> @shuffle_v8f32_uu67u9ub(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67u9ub: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_uu67uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uu67: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_uu67uuab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uuab: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_uu67uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uuef: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_uu674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu674567: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_uu6789ab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu6789ab: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_4567uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_4567uu67: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } define <8 x float> @shuffle_v8f32_4567uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_4567uuef: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } ;;;; Cases we must not select vperm2f128 define <8 x float> @shuffle_v8f32_uu67ucuf(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67ucuf: ; ALL: ## BB#0: ## %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7] ; ALL-NEXT: retq entry: %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } ;; Test zero mask generation. ;; PR22984: https://llvm.org/bugs/show_bug.cgi?id=22984 ;; Prefer xor+vblendpd over vperm2f128 because that has better performance. ;; TODO: When building for optsize we should use vperm2f128. define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz01: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz01_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz23: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz23_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz45: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz45_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz67: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz67_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_01zz: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_01zz_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_23zz: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_23zz_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_45zz: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_45zz_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_67zz: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_67zz_optsize: ; ALL: ## BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> ret <4 x double> %s } ;; With AVX2 select the integer version of the instruction. Use an add to force the domain selection. define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_67zz: ; AVX1: ## BB#0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_67zz: ; AVX2: ## BB#0: ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i64> , <4 x i64> %a, <4 x i32> %c = add <4 x i64> %b, %s ret <4 x i64> %c } ;;; Memory folding cases define <4 x double> @ld0_hi0_lo1_4f64(<4 x double> * %pa, <4 x double> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_4f64: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_4f64: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %a = load <4 x double>, <4 x double> * %pa %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> %res = fadd <4 x double> %shuffle, ret <4 x double> %res } define <4 x double> @ld1_hi0_hi1_4f64(<4 x double> %a, <4 x double> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_4f64: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_4f64: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %b = load <4 x double>, <4 x double> * %pb %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> %res = fadd <4 x double> %shuffle, ret <4 x double> %res } define <8 x float> @ld0_hi0_lo1_8f32(<8 x float> * %pa, <8 x float> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_8f32: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_8f32: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %a = load <8 x float>, <8 x float> * %pa %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> %res = fadd <8 x float> %shuffle, ret <8 x float> %res } define <8 x float> @ld1_hi0_hi1_8f32(<8 x float> %a, <8 x float> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_8f32: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_8f32: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %b = load <8 x float>, <8 x float> * %pb %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> %res = fadd <8 x float> %shuffle, ret <8 x float> %res } define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_4i64: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_4i64: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %a = load <4 x i64>, <4 x i64> * %pa %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> %res = add <4 x i64> %shuffle, ret <4 x i64> %res } define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_4i64: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_4i64: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %b = load <4 x i64>, <4 x i64> * %pb %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> %res = add <4 x i64> %shuffle, ret <4 x i64> %res } define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_8i32: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_8i32: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %a = load <8 x i32>, <8 x i32> * %pa %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> %res = add <8 x i32> %shuffle, ret <8 x i32> %res } define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_8i32: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_8i32: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq entry: %b = load <8 x i32>, <8 x i32> * %pb %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> %res = add <8 x i32> %shuffle, ret <8 x i32> %res }