1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown -mattr=-sse2,+sse | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=-sse2,+sse | FileCheck %s --check-prefix=X64 4 5define float @f32_pos(float %a, float %b) nounwind { 6; X86-LABEL: f32_pos: 7; X86: # %bb.0: 8; X86-NEXT: pushl %eax 9; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero 10; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 11; X86-NEXT: movss %xmm0, (%esp) 12; X86-NEXT: flds (%esp) 13; X86-NEXT: popl %eax 14; X86-NEXT: retl 15; 16; X64-LABEL: f32_pos: 17; X64: # %bb.0: 18; X64-NEXT: andps {{.*}}(%rip), %xmm0 19; X64-NEXT: retq 20 %tmp = tail call float @llvm.copysign.f32(float %a, float 1.0) 21 ret float %tmp 22} 23 24define float @f32_neg(float %a, float %b) nounwind { 25; X86-LABEL: f32_neg: 26; X86: # %bb.0: 27; X86-NEXT: pushl %eax 28; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero 29; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 30; X86-NEXT: movss %xmm0, (%esp) 31; X86-NEXT: flds (%esp) 32; X86-NEXT: popl %eax 33; X86-NEXT: retl 34; 35; X64-LABEL: f32_neg: 36; X64: # %bb.0: 37; X64-NEXT: orps {{.*}}(%rip), %xmm0 38; X64-NEXT: retq 39 %tmp = tail call float @llvm.copysign.f32(float %a, float -1.0) 40 ret float %tmp 41} 42 43define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind { 44; X86-LABEL: v4f32_pos: 45; X86: # %bb.0: 46; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 47; X86-NEXT: retl 48; 49; X64-LABEL: v4f32_pos: 50; X64: # %bb.0: 51; X64-NEXT: andps {{.*}}(%rip), %xmm0 52; X64-NEXT: retq 53 %tmp = tail call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>) 54 ret <4 x float> %tmp 55} 56 57define <4 x float> @v4f32_neg(<4 x float> %a, <4 x float> %b) nounwind { 58; X86-LABEL: v4f32_neg: 59; X86: # %bb.0: 60; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 61; X86-NEXT: retl 62; 63; X64-LABEL: v4f32_neg: 64; X64: # %bb.0: 65; X64-NEXT: orps {{.*}}(%rip), %xmm0 66; X64-NEXT: retq 67 %tmp = tail call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>) 68 ret <4 x float> %tmp 69} 70 71define <4 x float> @v4f32_const_mag(<4 x float> %a, <4 x float> %b) nounwind { 72; X86-LABEL: v4f32_const_mag: 73; X86: # %bb.0: 74; X86-NEXT: movaps %xmm1, %xmm0 75; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 76; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 77; X86-NEXT: retl 78; 79; X64-LABEL: v4f32_const_mag: 80; X64: # %bb.0: 81; X64-NEXT: movaps %xmm1, %xmm0 82; X64-NEXT: andps {{.*}}(%rip), %xmm0 83; X64-NEXT: orps {{.*}}(%rip), %xmm0 84; X64-NEXT: retq 85 %tmp = tail call <4 x float> @llvm.copysign.v4f32(<4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float> %b ) 86 ret <4 x float> %tmp 87} 88 89declare float @llvm.copysign.f32(float, float) 90declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) 91