1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+sse2 -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 3; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX 4; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx512f -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX 5 6 7define double @long_to_double_rr(i64 %a) { 8; SSE2-LABEL: long_to_double_rr: 9; SSE2: # %bb.0: # %entry 10; SSE2-NEXT: cvtsi2sdq %rdi, %xmm0 11; SSE2-NEXT: retq 12; 13; AVX-LABEL: long_to_double_rr: 14; AVX: # %bb.0: # %entry 15; AVX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 16; AVX-NEXT: retq 17entry: 18 %0 = sitofp i64 %a to double 19 ret double %0 20} 21 22define double @long_to_double_rm(i64* %a) { 23; SSE2-LABEL: long_to_double_rm: 24; SSE2: # %bb.0: # %entry 25; SSE2-NEXT: movq (%rdi), %rax 26; SSE2-NEXT: cvtsi2sdq %rax, %xmm0 27; SSE2-NEXT: retq 28; 29; AVX-LABEL: long_to_double_rm: 30; AVX: # %bb.0: # %entry 31; AVX-NEXT: movq (%rdi), %rax 32; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 33; AVX-NEXT: retq 34entry: 35 %0 = load i64, i64* %a 36 %1 = sitofp i64 %0 to double 37 ret double %1 38} 39 40define double @long_to_double_rm_optsize(i64* %a) optsize { 41; SSE2-LABEL: long_to_double_rm_optsize: 42; SSE2: # %bb.0: # %entry 43; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0 44; SSE2-NEXT: retq 45; 46; AVX-LABEL: long_to_double_rm_optsize: 47; AVX: # %bb.0: # %entry 48; AVX-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0 49; AVX-NEXT: retq 50entry: 51 %0 = load i64, i64* %a 52 %1 = sitofp i64 %0 to double 53 ret double %1 54} 55 56define float @long_to_float_rr(i64 %a) { 57; SSE2-LABEL: long_to_float_rr: 58; SSE2: # %bb.0: # %entry 59; SSE2-NEXT: cvtsi2ssq %rdi, %xmm0 60; SSE2-NEXT: retq 61; 62; AVX-LABEL: long_to_float_rr: 63; AVX: # %bb.0: # %entry 64; AVX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 65; AVX-NEXT: retq 66entry: 67 %0 = sitofp i64 %a to float 68 ret float %0 69} 70 71define float @long_to_float_rm(i64* %a) { 72; SSE2-LABEL: long_to_float_rm: 73; SSE2: # %bb.0: # %entry 74; SSE2-NEXT: movq (%rdi), %rax 75; SSE2-NEXT: cvtsi2ssq %rax, %xmm0 76; SSE2-NEXT: retq 77; 78; AVX-LABEL: long_to_float_rm: 79; AVX: # %bb.0: # %entry 80; AVX-NEXT: movq (%rdi), %rax 81; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 82; AVX-NEXT: retq 83entry: 84 %0 = load i64, i64* %a 85 %1 = sitofp i64 %0 to float 86 ret float %1 87} 88 89define float @long_to_float_rm_optsize(i64* %a) optsize { 90; SSE2-LABEL: long_to_float_rm_optsize: 91; SSE2: # %bb.0: # %entry 92; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0 93; SSE2-NEXT: retq 94; 95; AVX-LABEL: long_to_float_rm_optsize: 96; AVX: # %bb.0: # %entry 97; AVX-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0 98; AVX-NEXT: retq 99entry: 100 %0 = load i64, i64* %a 101 %1 = sitofp i64 %0 to float 102 ret float %1 103} 104