• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
3; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
4; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
5; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
6
7;PR29079
8
9define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
10; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
11; X32-SSE:       # %bb.0:
12; X32-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
13; X32-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
14; X32-SSE-NEXT:    retl
15;
16; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
17; X32-AVX:       # %bb.0:
18; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
19; X32-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
20; X32-AVX-NEXT:    retl
21;
22; X64-SSE-LABEL: mask_ucvt_4i32_4f32:
23; X64-SSE:       # %bb.0:
24; X64-SSE-NEXT:    andps {{.*}}(%rip), %xmm0
25; X64-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
26; X64-SSE-NEXT:    retq
27;
28; X64-AVX-LABEL: mask_ucvt_4i32_4f32:
29; X64-AVX:       # %bb.0:
30; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
31; X64-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
32; X64-AVX-NEXT:    retq
33  %and = and <4 x i32> %a, <i32 127, i32 255, i32 4095, i32 65595>
34  %cvt = uitofp <4 x i32> %and to <4 x float>
35  ret <4 x float> %cvt
36}
37
38define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
39; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
40; X32-SSE:       # %bb.0:
41; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
42; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm2
43; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
44; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm1
45; X32-SSE-NEXT:    movaps %xmm2, %xmm0
46; X32-SSE-NEXT:    retl
47;
48; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
49; X32-AVX:       # %bb.0:
50; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
51; X32-AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
52; X32-AVX-NEXT:    retl
53;
54; X64-SSE-LABEL: mask_ucvt_4i32_4f64:
55; X64-SSE:       # %bb.0:
56; X64-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
57; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm2
58; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
59; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm1
60; X64-SSE-NEXT:    movaps %xmm2, %xmm0
61; X64-SSE-NEXT:    retq
62;
63; X64-AVX-LABEL: mask_ucvt_4i32_4f64:
64; X64-AVX:       # %bb.0:
65; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
66; X64-AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
67; X64-AVX-NEXT:    retq
68  %and = and <4 x i32> %a, <i32 127, i32 255, i32 4095, i32 65595>
69  %cvt = uitofp <4 x i32> %and to <4 x double>
70  ret <4 x double> %cvt
71}
72