; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx512f --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c define <8 x double> @test_mm512_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmadd_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa8,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) ret <8 x double> %0 } declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) #1 define <8 x double> @test_mm512_mask_fmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x98,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x98,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb8,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb8,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa8,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa8,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmsub_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xaa,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) ret <8 x double> %0 } define <8 x double> @test_mm512_mask_fmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9a,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9a,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xaa,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xaa,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fnmadd_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xac,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) ret <8 x double> %0 } define <8 x double> @test_mm512_mask3_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbc,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbc,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fnmadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fnmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xac,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xac,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fnmsub_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xae,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %sub1 = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %sub1, i32 8) ret <8 x double> %0 } define <8 x double> @test_mm512_maskz_fnmsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fnmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xae,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xae,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %sub1 = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %sub1, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmadd_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 ret <8 x double> %0 } define <8 x double> @test_mm512_mask_fmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x98,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x98,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) + zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) + zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa8,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa8,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmsub_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xaa,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 ret <8 x double> %0 } define <8 x double> @test_mm512_mask_fmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9a,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9a,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xaa,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xaa,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fnmadd_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xac,0xc2] ; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 ret <8 x double> %0 } define <8 x double> @test_mm512_mask3_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbc,0xd1] ; X86-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbc,0xd1] ; X64-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fnmadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fnmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xac,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xac,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fnmsub_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xae,0xc2] ; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %sub1.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %sub1.i) #10 ret <8 x double> %0 } define <8 x double> @test_mm512_maskz_fnmsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fnmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xae,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xae,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %sub1.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %sub1.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <16 x float> @test_mm512_fmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmadd_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) ret <16 x float> %0 } declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) #1 define <16 x float> @test_mm512_mask_fmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x98,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x98,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb8,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb8,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa8,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa8,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmsub_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xaa,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) ret <16 x float> %0 } define <16 x float> @test_mm512_mask_fmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9a,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9a,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xaa,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xaa,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fnmadd_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xac,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) ret <16 x float> %0 } define <16 x float> @test_mm512_mask3_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbc,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbc,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fnmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fnmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xac,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xac,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fnmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fnmsub_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xae,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %sub1 = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %sub1, i32 8) ret <16 x float> %0 } define <16 x float> @test_mm512_maskz_fnmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fnmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xae,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xae,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %sub1 = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %sub1, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmadd_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 ret <16 x float> %0 } define <16 x float> @test_mm512_mask_fmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x98,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x98,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) + zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) + zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa8,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa8,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmsub_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xaa,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 ret <16 x float> %0 } define <16 x float> @test_mm512_mask_fmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9a,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9a,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xaa,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xaa,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fnmadd_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xac,0xc2] ; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 ret <16 x float> %0 } define <16 x float> @test_mm512_mask3_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbc,0xd1] ; X86-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbc,0xd1] ; X64-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fnmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fnmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xac,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xac,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fnmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fnmsub_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xae,0xc2] ; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %sub1.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %sub1.i) #10 ret <16 x float> %0 } define <16 x float> @test_mm512_maskz_fnmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fnmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xae,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fnmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xae,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = -(zmm1 * zmm0) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %sub1.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %sub1.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <8 x double> @test_mm512_fmaddsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmaddsub_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa6,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) ret <8 x double> %0 } declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) #1 define <8 x double> @test_mm512_mask_fmaddsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmaddsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x96,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmaddsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x96,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fmaddsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmaddsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb6,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmaddsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb6,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmaddsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmaddsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa6,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmaddsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa6,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmsubadd_round_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa7,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) ret <8 x double> %0 } define <8 x double> @test_mm512_mask_fmsubadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmsubadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x97,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsubadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x97,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_maskz_fmsubadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmsubadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa7,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsubadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa7,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer ret <8 x double> %2 } define <8 x double> @test_mm512_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmaddsub_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa6,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = fsub <8 x double> , %__C %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> ret <8 x double> %3 } define <8 x double> @test_mm512_mask_fmaddsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmaddsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x96,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmaddsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x96,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = fsub <8 x double> , %__C %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__A ret <8 x double> %5 } define <8 x double> @test_mm512_mask3_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmaddsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmaddsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = fsub <8 x double> , %__C %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__C ret <8 x double> %5 } define <8 x double> @test_mm512_maskz_fmaddsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmaddsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa6,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) +/- zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmaddsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa6,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) +/- zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %1 = fsub <8 x double> , %__C %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> %4 = bitcast i8 %__U to <8 x i1> %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> zeroinitializer ret <8 x double> %5 } define <8 x double> @test_mm512_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; CHECK-LABEL: test_mm512_fmsubadd_pd: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa7,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> ret <8 x double> %2 } define <8 x double> @test_mm512_mask_fmsubadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fmsubadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x97,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsubadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x97,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__A ret <8 x double> %4 } define <8 x double> @test_mm512_maskz_fmsubadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_maskz_fmsubadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa7,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) -/+ zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsubadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa7,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) -/+ zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> zeroinitializer ret <8 x double> %4 } define <16 x float> @test_mm512_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmaddsub_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa6,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) ret <16 x float> %0 } declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) #1 define <16 x float> @test_mm512_mask_fmaddsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmaddsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x96,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmaddsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x96,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmaddsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb6,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmaddsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb6,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmaddsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmaddsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa6,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmaddsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa6,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmsubadd_round_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa7,0xc2] ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) ret <16 x float> %0 } define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmsubadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x97,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsubadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x97,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa7,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsubadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa7,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer ret <16 x float> %2 } define <16 x float> @test_mm512_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmaddsub_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa6,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = fsub <16 x float> , %__C %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> ret <16 x float> %3 } define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmaddsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x96,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmaddsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x96,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = fsub <16 x float> , %__C %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> %4 = bitcast i16 %__U to <16 x i1> %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__A ret <16 x float> %5 } define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmaddsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmaddsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = fsub <16 x float> , %__C %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> %4 = bitcast i16 %__U to <16 x i1> %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__C ret <16 x float> %5 } define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmaddsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa6,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) +/- zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmaddsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa6,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) +/- zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %1 = fsub <16 x float> , %__C %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> %4 = bitcast i16 %__U to <16 x i1> %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> zeroinitializer ret <16 x float> %5 } define <16 x float> @test_mm512_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; CHECK-LABEL: test_mm512_fmsubadd_ps: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa7,0xc2] ; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 ; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> ret <16 x float> %2 } define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fmsubadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x97,0xc1] ; X86-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fmsubadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x97,0xc1] ; X64-NEXT: ## zmm0 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> %3 = bitcast i16 %__U to <16 x i1> %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__A ret <16 x float> %4 } define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_maskz_fmsubadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa7,0xc2] ; X86-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) -/+ zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_maskz_fmsubadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa7,0xc2] ; X64-NEXT: ## zmm0 {%k1} {z} = (zmm1 * zmm0) -/+ zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> %3 = bitcast i16 %__U to <16 x i1> %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> zeroinitializer ret <16 x float> %4 } define <8 x double> @test_mm512_mask3_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xba,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xba,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) - zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) - zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xba,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xba,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) - zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) - zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <8 x double> @test_mm512_mask3_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb7,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsubadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb7,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsubadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsubadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> %3 = bitcast i8 %__U to <8 x i1> %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__C ret <8 x double> %4 } define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb7,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsubadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb7,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fmsubadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd1] ; X86-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fmsubadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd1] ; X64-NEXT: ## zmm2 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> %3 = bitcast i16 %__U to <16 x i1> %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__C ret <16 x float> %4 } define <8 x double> @test_mm512_mask_fnmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fnmadd_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9c,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmadd_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9c,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask_fnmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fnmadd_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9c,0xc1] ; X86-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmadd_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9c,0xc1] ; X64-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__A %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fnmadd_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9c,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmadd_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9c,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fnmadd_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xc1] ; X86-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmadd_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xc1] ; X64-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__A %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <8 x double> @test_mm512_mask_fnmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fnmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9e,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9e,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__B %sub1 = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %sub, <8 x double> %sub1, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmsub_round_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbe,0xd1] ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmsub_round_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbe,0xd1] ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <8 x double> , %__B %sub1 = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %sub, <8 x double> %sub1, i32 8) %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <8 x double> @test_mm512_mask_fnmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_mask_fnmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9e,0xc1] ; X86-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9e,0xc1] ; X64-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__B %sub2.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %sub.i, <8 x double> %sub2.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A ret <8 x double> %2 } define <8 x double> @test_mm512_mask3_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmsub_pd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd1] ; X86-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmsub_pd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd1] ; X64-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <8 x double> , %__B %sub2.i = fsub <8 x double> , %__C %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %sub.i, <8 x double> %sub2.i) #10 %1 = bitcast i8 %__U to <8 x i1> %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C ret <8 x double> %2 } define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fnmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9e,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9e,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__B %sub1 = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %sub, <16 x float> %sub1, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fnmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmsub_round_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbe,0xd1] ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmsub_round_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbe,0xd1] ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <16 x float> , %__B %sub1 = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %sub, <16 x float> %sub1, i32 8) %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_mask_fnmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xc1] ; X86-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask_fnmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xc1] ; X64-NEXT: ## zmm0 {%k1} = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__B %sub1.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %sub.i, <16 x float> %sub1.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A ret <16 x float> %2 } define <16 x float> @test_mm512_mask3_fnmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { ; X86-LABEL: test_mm512_mask3_fnmsub_ps: ; X86: ## %bb.0: ## %entry ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd1] ; X86-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm512_mask3_fnmsub_ps: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd1] ; X64-NEXT: ## zmm2 {%k1} = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub.i = fsub <16 x float> , %__B %sub1.i = fsub <16 x float> , %__C %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %sub.i, <16 x float> %sub1.i) #10 %1 = bitcast i16 %__U to <16 x i1> %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C ret <16 x float> %2 } define <4 x float> @test_mm_mask_fmadd_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %1 = extractelement <4 x float> %__A, i64 0 %2 = extractelement <4 x float> %__B, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask_fmadd_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %__B, i8 %__U, i32 4) ret <4 x float> %0 } declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 define <4 x float> @test_mm_maskz_fmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %1 = extractelement <4 x float> %__B, i64 0 %2 = extractelement <4 x float> %__C, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_fmadd_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 %__U, i32 4) ret <4 x float> %0 } declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 define <4 x float> @test_mm_mask3_fmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb9,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb9,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %1 = extractelement <4 x float> %__X, i64 0 %2 = extractelement <4 x float> %__Y, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__Y, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask3_fmadd_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb9,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb9,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 4) ret <4 x float> %0 } declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 define <4 x float> @test_mm_mask_fmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %1 = extractelement <4 x float> %__A, i64 0 %.rhs.i = extractelement <4 x float> %__B, i64 0 %2 = fsub float -0.000000e+00, %.rhs.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask_fmsub_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__B %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %sub, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_maskz_fmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %1 = extractelement <4 x float> %__B, i64 0 %.rhs.i = extractelement <4 x float> %__C, i64 0 %2 = fsub float -0.000000e+00, %.rhs.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_fmsub_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__C %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_mask3_fmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %1 = extractelement <4 x float> %__X, i64 0 %.rhs.i = extractelement <4 x float> %__Y, i64 0 %2 = fsub float -0.000000e+00, %.rhs.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__Y, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask3_fmsub_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 4) ret <4 x float> %0 } declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 define <4 x float> @test_mm_mask_fnmadd_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %.rhs.i = extractelement <4 x float> %__A, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %2 = extractelement <4 x float> %__B, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask_fnmadd_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__A %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__B, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_maskz_fnmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fnmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %.rhs.i = extractelement <4 x float> %__B, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %2 = extractelement <4 x float> %__C, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_fnmadd_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fnmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__B %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %__C, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_mask3_fnmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmadd_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbd,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmadd_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbd,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %.rhs.i = extractelement <4 x float> %__X, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %2 = extractelement <4 x float> %__Y, i64 0 %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <4 x float> %__Y, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask3_fnmadd_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmadd_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbd,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmadd_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbd,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__X %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_mask_fnmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fnmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %.rhs.i = extractelement <4 x float> %__A, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %.rhs7.i = extractelement <4 x float> %__B, i64 0 %2 = fsub float -0.000000e+00, %.rhs7.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext2.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext2.i, float %3 %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask_fnmsub_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fnmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__A %sub1 = fsub <4 x float> , %__B %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_maskz_fnmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fnmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %.rhs.i = extractelement <4 x float> %__B, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %.rhs5.i = extractelement <4 x float> %__C, i64 0 %2 = fsub float -0.000000e+00, %.rhs5.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_fnmsub_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fnmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__B %sub1 = fsub <4 x float> , %__C %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 4) ret <4 x float> %0 } define <4 x float> @test_mm_mask3_fnmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmsub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbf,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmsub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbf,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__W, i64 0 %.rhs.i = extractelement <4 x float> %__X, i64 0 %1 = fsub float -0.000000e+00, %.rhs.i %.rhs7.i = extractelement <4 x float> %__Y, i64 0 %2 = fsub float -0.000000e+00, %.rhs7.i %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext2.i = extractelement <4 x float> %__Y, i32 0 %cond.i = select i1 %tobool.i, float %vecext2.i, float %3 %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_mask3_fnmsub_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmsub_round_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbf,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmsub_round_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbf,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <4 x float> , %__X %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 4) ret <4 x float> %0 } define <2 x double> @test_mm_mask_fmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %1 = extractelement <2 x double> %__A, i64 0 %2 = extractelement <2 x double> %__B, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask_fmadd_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %__B, i8 %__U, i32 4) ret <2 x double> %0 } declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 define <2 x double> @test_mm_maskz_fmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %1 = extractelement <2 x double> %__B, i64 0 %2 = extractelement <2 x double> %__C, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_fmadd_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 %__U, i32 4) ret <2 x double> %0 } declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 define <2 x double> @test_mm_mask3_fmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb9,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb9,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %1 = extractelement <2 x double> %__X, i64 0 %2 = extractelement <2 x double> %__Y, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__Y, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask3_fmadd_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb9,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb9,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 4) ret <2 x double> %0 } declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 define <2 x double> @test_mm_mask_fmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %1 = extractelement <2 x double> %__A, i64 0 %.rhs.i = extractelement <2 x double> %__B, i64 0 %2 = fsub double -0.000000e+00, %.rhs.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask_fmsub_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__B %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %sub, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_maskz_fmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %1 = extractelement <2 x double> %__B, i64 0 %.rhs.i = extractelement <2 x double> %__C, i64 0 %2 = fsub double -0.000000e+00, %.rhs.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_fmsub_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = (xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__C %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_mask3_fmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %1 = extractelement <2 x double> %__X, i64 0 %.rhs.i = extractelement <2 x double> %__Y, i64 0 %2 = fsub double -0.000000e+00, %.rhs.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__Y, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask3_fmsub_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] ; X86-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] ; X64-NEXT: ## xmm2 {%k1} = (xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 4) ret <2 x double> %0 } declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 define <2 x double> @test_mm_mask_fnmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %.rhs.i = extractelement <2 x double> %__A, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %2 = extractelement <2 x double> %__B, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask_fnmadd_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__A %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__B, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_maskz_fnmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fnmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %.rhs.i = extractelement <2 x double> %__B, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %2 = extractelement <2 x double> %__C, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_fnmadd_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fnmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) + xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__B %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %__C, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_mask3_fnmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmadd_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbd,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmadd_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbd,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %.rhs.i = extractelement <2 x double> %__X, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %2 = extractelement <2 x double> %__Y, i64 0 %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext1.i = extractelement <2 x double> %__Y, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask3_fnmadd_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmadd_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbd,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmadd_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbd,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) + xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__X %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_mask_fnmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fnmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %.rhs.i = extractelement <2 x double> %__A, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %.rhs7.i = extractelement <2 x double> %__B, i64 0 %2 = fsub double -0.000000e+00, %.rhs7.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext2.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext2.i, double %3 %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask_fnmsub_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fnmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_fnmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__A %sub1 = fsub <2 x double> , %__B %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_maskz_fnmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fnmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %.rhs.i = extractelement <2 x double> %__B, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %.rhs5.i = extractelement <2 x double> %__C, i64 0 %2 = fsub double -0.000000e+00, %.rhs5.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_fnmsub_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fnmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] ; X86-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_fnmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] ; X64-NEXT: ## xmm0 {%k1} {z} = -(xmm1 * xmm0) - xmm2 ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__B %sub1 = fsub <2 x double> , %__C %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 4) ret <2 x double> %0 } define <2 x double> @test_mm_mask3_fnmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmsub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbf,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmsub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbf,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__W, i64 0 %.rhs.i = extractelement <2 x double> %__X, i64 0 %1 = fsub double -0.000000e+00, %.rhs.i %.rhs7.i = extractelement <2 x double> %__Y, i64 0 %2 = fsub double -0.000000e+00, %.rhs7.i %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 %4 = and i8 %__U, 1 %tobool.i = icmp eq i8 %4, 0 %vecext2.i = extractelement <2 x double> %__Y, i32 0 %cond.i = select i1 %tobool.i, double %vecext2.i, double %3 %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_mask3_fnmsub_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fnmsub_round_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbf,0xd1] ; X86-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X86-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask3_fnmsub_round_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbf,0xd1] ; X64-NEXT: ## xmm2 {%k1} = -(xmm0 * xmm1) - xmm2 ; X64-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %sub = fsub <2 x double> , %__X %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 4) ret <2 x double> %0 } define <4 x float> @test_mm_mask_add_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_add_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vaddss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x58,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_add_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vaddss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x58,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %add.i.i = fadd float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %add.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_add_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_maskz_add_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vaddss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x58,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_add_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vaddss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x58,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %add.i.i = fadd float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %add.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <2 x double> @test_mm_mask_add_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_add_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vaddsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x58,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_add_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vaddsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x58,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %add.i.i = fadd double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %add.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_add_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_maskz_add_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vaddsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x58,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_add_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vaddsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x58,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %add.i.i = fadd double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %add.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <4 x float> @test_mm_mask_sub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_sub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vsubss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x5c,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_sub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vsubss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x5c,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %sub.i.i = fsub float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %sub.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_sub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_maskz_sub_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vsubss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x5c,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_sub_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vsubss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x5c,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %sub.i.i = fsub float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %sub.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <2 x double> @test_mm_mask_sub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_sub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vsubsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x5c,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_sub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vsubsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x5c,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %sub.i.i = fsub double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %sub.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_sub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_maskz_sub_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vsubsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x5c,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_sub_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vsubsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x5c,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %sub.i.i = fsub double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %sub.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <4 x float> @test_mm_mask_mul_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_mul_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vmulss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x59,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_mul_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vmulss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x59,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %mul.i.i = fmul float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <4 x float> %__W, i32 0 %cond.i = select i1 %tobool.i, float %vecext1.i, float %mul.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <4 x float> @test_mm_maskz_mul_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_maskz_mul_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vmulss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x59,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_mul_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vmulss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x59,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <4 x float> %__B, i32 0 %vecext1.i.i = extractelement <4 x float> %__A, i32 0 %mul.i.i = fmul float %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, float 0.000000e+00, float %mul.i.i %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 ret <4 x float> %vecins.i } define <2 x double> @test_mm_mask_mul_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_mul_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vmulsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x59,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_mul_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vmulsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x59,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %mul.i.i = fmul double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %vecext1.i = extractelement <2 x double> %__W, i32 0 %cond.i = select i1 %tobool.i, double %vecext1.i, double %mul.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <2 x double> @test_mm_maskz_mul_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_maskz_mul_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vmulsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x59,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_mul_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vmulsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x59,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %vecext.i.i = extractelement <2 x double> %__B, i32 0 %vecext1.i.i = extractelement <2 x double> %__A, i32 0 %mul.i.i = fmul double %vecext1.i.i, %vecext.i.i %0 = and i8 %__U, 1 %tobool.i = icmp eq i8 %0, 0 %cond.i = select i1 %tobool.i, double 0.000000e+00, double %mul.i.i %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 ret <2 x double> %vecins.i } define <4 x float> @test_mm_mask_div_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_div_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vdivss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x5e,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_div_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vdivss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x76,0x09,0x5e,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %1 = extractelement <4 x float> %__B, i64 0 %2 = extractelement <4 x float> %__W, i64 0 %3 = fdiv float %0, %1 %4 = bitcast i8 %__U to <8 x i1> %5 = extractelement <8 x i1> %4, i64 0 %6 = select i1 %5, float %3, float %2 %7 = insertelement <4 x float> %__A, float %6, i64 0 ret <4 x float> %7 } define <4 x float> @test_mm_maskz_div_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_maskz_div_ss: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vdivss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x5e,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_div_ss: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vdivss %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x5e,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <4 x float> %__A, i64 0 %1 = extractelement <4 x float> %__B, i64 0 %2 = fdiv float %0, %1 %3 = bitcast i8 %__U to <8 x i1> %4 = extractelement <8 x i1> %3, i64 0 %5 = select i1 %4, float %2, float 0.000000e+00 %6 = insertelement <4 x float> %__A, float %5, i64 0 ret <4 x float> %6 } define <2 x double> @test_mm_mask_div_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_div_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vdivsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x5e,0xc2] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_mask_div_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vdivsd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf7,0x09,0x5e,0xc2] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %1 = extractelement <2 x double> %__B, i64 0 %2 = extractelement <2 x double> %__W, i64 0 %3 = fdiv double %0, %1 %4 = bitcast i8 %__U to <8 x i1> %5 = extractelement <8 x i1> %4, i64 0 %6 = select i1 %5, double %3, double %2 %7 = insertelement <2 x double> %__A, double %6, i64 0 ret <2 x double> %7 } define <2 x double> @test_mm_maskz_div_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_maskz_div_sd: ; X86: ## %bb.0: ## %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ## encoding: [0x8a,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8] ; X86-NEXT: vdivsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x5e,0xc1] ; X86-NEXT: retl ## encoding: [0xc3] ; ; X64-LABEL: test_mm_maskz_div_sd: ; X64: ## %bb.0: ## %entry ; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; X64-NEXT: vdivsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x5e,0xc1] ; X64-NEXT: retq ## encoding: [0xc3] entry: %0 = extractelement <2 x double> %__A, i64 0 %1 = extractelement <2 x double> %__B, i64 0 %2 = fdiv double %0, %1 %3 = bitcast i8 %__U to <8 x i1> %4 = extractelement <8 x i1> %3, i64 0 %5 = select i1 %4, double %2, double 0.000000e+00 %6 = insertelement <2 x double> %__A, double %5, i64 0 ret <2 x double> %6 } declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) #9 declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) #9 declare float @llvm.fma.f32(float, float, float) #9 declare double @llvm.fma.f64(double, double, double) #9