1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s 3 4define <8 x float> @sitofp00(<8 x i32> %a) nounwind { 5; CHECK-LABEL: sitofp00: 6; CHECK: # BB#0: 7; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0 8; CHECK-NEXT: retq 9 %b = sitofp <8 x i32> %a to <8 x float> 10 ret <8 x float> %b 11} 12 13define <8 x i32> @fptosi00(<8 x float> %a) nounwind { 14; CHECK-LABEL: fptosi00: 15; CHECK: # BB#0: 16; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 17; CHECK-NEXT: retq 18 %b = fptosi <8 x float> %a to <8 x i32> 19 ret <8 x i32> %b 20} 21 22define <4 x double> @sitofp01(<4 x i32> %a) { 23; CHECK-LABEL: sitofp01: 24; CHECK: # BB#0: 25; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0 26; CHECK-NEXT: retq 27 %b = sitofp <4 x i32> %a to <4 x double> 28 ret <4 x double> %b 29} 30 31define <8 x float> @sitofp02(<8 x i16> %a) { 32; CHECK-LABEL: sitofp02: 33; CHECK: # BB#0: 34; CHECK-NEXT: vpmovsxwd %xmm0, %xmm1 35; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 36; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0 37; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 38; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0 39; CHECK-NEXT: retq 40 %b = sitofp <8 x i16> %a to <8 x float> 41 ret <8 x float> %b 42} 43 44define <4 x i32> @fptosi01(<4 x double> %a) { 45; CHECK-LABEL: fptosi01: 46; CHECK: # BB#0: 47; CHECK-NEXT: vcvttpd2dqy %ymm0, %xmm0 48; CHECK-NEXT: vzeroupper 49; CHECK-NEXT: retq 50 %b = fptosi <4 x double> %a to <4 x i32> 51 ret <4 x i32> %b 52} 53 54define <8 x float> @fptrunc00(<8 x double> %b) nounwind { 55; CHECK-LABEL: fptrunc00: 56; CHECK: # BB#0: 57; CHECK-NEXT: vcvtpd2psy %ymm0, %xmm0 58; CHECK-NEXT: vcvtpd2psy %ymm1, %xmm1 59; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 60; CHECK-NEXT: retq 61 %a = fptrunc <8 x double> %b to <8 x float> 62 ret <8 x float> %a 63} 64 65define <4 x double> @fpext00(<4 x float> %b) nounwind { 66; CHECK-LABEL: fpext00: 67; CHECK: # BB#0: 68; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0 69; CHECK-NEXT: retq 70 %a = fpext <4 x float> %b to <4 x double> 71 ret <4 x double> %a 72} 73 74define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp { 75; CHECK-LABEL: funcA: 76; CHECK: # BB#0: 77; CHECK-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0 78; CHECK-NEXT: retq 79 %tmp1 = load i64, i64* %e, align 8 80 %conv = sitofp i64 %tmp1 to double 81 ret double %conv 82} 83 84define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp { 85; CHECK-LABEL: funcB: 86; CHECK: # BB#0: 87; CHECK-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0 88; CHECK-NEXT: retq 89 %tmp1 = load i32, i32* %e, align 4 90 %conv = sitofp i32 %tmp1 to double 91 ret double %conv 92} 93 94define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp { 95; CHECK-LABEL: funcC: 96; CHECK: # BB#0: 97; CHECK-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0 98; CHECK-NEXT: retq 99 %tmp1 = load i32, i32* %e, align 4 100 %conv = sitofp i32 %tmp1 to float 101 ret float %conv 102} 103 104define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp { 105; CHECK-LABEL: funcD: 106; CHECK: # BB#0: 107; CHECK-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0 108; CHECK-NEXT: retq 109 %tmp1 = load i64, i64* %e, align 8 110 %conv = sitofp i64 %tmp1 to float 111 ret float %conv 112} 113 114define void @fpext() nounwind uwtable { 115; CHECK-LABEL: fpext: 116; CHECK: # BB#0: 117; CHECK-NEXT: vcvtss2sd -{{[0-9]+}}(%rsp), %xmm0, %xmm0 118; CHECK-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp) 119; CHECK-NEXT: retq 120 %f = alloca float, align 4 121 %d = alloca double, align 8 122 %tmp = load float, float* %f, align 4 123 %conv = fpext float %tmp to double 124 store double %conv, double* %d, align 8 125 ret void 126} 127 128define double @nearbyint_f64(double %a) { 129; CHECK-LABEL: nearbyint_f64: 130; CHECK: # BB#0: 131; CHECK-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0 132; CHECK-NEXT: retq 133 %res = call double @llvm.nearbyint.f64(double %a) 134 ret double %res 135} 136declare double @llvm.nearbyint.f64(double %p) 137 138define float @floor_f32(float %a) { 139; CHECK-LABEL: floor_f32: 140; CHECK: # BB#0: 141; CHECK-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0 142; CHECK-NEXT: retq 143 %res = call float @llvm.floor.f32(float %a) 144 ret float %res 145} 146declare float @llvm.floor.f32(float %p) 147 148 149