• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X86-SSE
3; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X86-AVX --check-prefix=X86-AVX1
4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X86-AVX --check-prefix=X86-AVX2
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X64-SSE
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X64-AVX --check-prefix=X64-AVX1
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X64-AVX --check-prefix=X64-AVX2
8
9define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
10; SSE-LABEL: trunc_ashr_v4i64:
11; SSE:       # %bb.0:
12; SSE-NEXT:    psrad $31, %xmm1
13; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
14; SSE-NEXT:    psrad $31, %xmm0
15; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
16; SSE-NEXT:    packssdw %xmm1, %xmm0
17; SSE-NEXT:    ret{{[l|q]}}
18;
19; AVX1-LABEL: trunc_ashr_v4i64:
20; AVX1:       # %bb.0:
21; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
22; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
23; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm1
24; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
25; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
26; AVX1-NEXT:    vzeroupper
27; AVX1-NEXT:    ret{{[l|q]}}
28;
29; AVX2-LABEL: trunc_ashr_v4i64:
30; AVX2:       # %bb.0:
31; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
32; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
33; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
34; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
35; AVX2-NEXT:    vzeroupper
36; AVX2-NEXT:    ret{{[l|q]}}
37  %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
38  %2 = trunc <4 x i64> %1 to <4 x i32>
39  ret <4 x i32> %2
40}
41
42define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
43; SSE-LABEL: trunc_ashr_v8i32:
44; SSE:       # %bb.0:
45; SSE-NEXT:    psrad $31, %xmm1
46; SSE-NEXT:    psrad $31, %xmm0
47; SSE-NEXT:    packssdw %xmm1, %xmm0
48; SSE-NEXT:    ret{{[l|q]}}
49;
50; AVX1-LABEL: trunc_ashr_v8i32:
51; AVX1:       # %bb.0:
52; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
53; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
54; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
55; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
56; AVX1-NEXT:    vzeroupper
57; AVX1-NEXT:    ret{{[l|q]}}
58;
59; AVX2-LABEL: trunc_ashr_v8i32:
60; AVX2:       # %bb.0:
61; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
62; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
63; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
64; AVX2-NEXT:    vzeroupper
65; AVX2-NEXT:    ret{{[l|q]}}
66  %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
67  %2 = trunc <8 x i32> %1 to <8 x i16>
68  ret <8 x i16> %2
69}
70
71define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
72; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
73; X86-SSE:       # %bb.0:
74; X86-SSE-NEXT:    psrad $31, %xmm0
75; X86-SSE-NEXT:    pcmpgtd {{\.LCPI.*}}, %xmm1
76; X86-SSE-NEXT:    packssdw %xmm1, %xmm0
77; X86-SSE-NEXT:    retl
78;
79; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
80; X86-AVX:       # %bb.0:
81; X86-AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
82; X86-AVX-NEXT:    vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
83; X86-AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
84; X86-AVX-NEXT:    retl
85;
86; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
87; X64-SSE:       # %bb.0:
88; X64-SSE-NEXT:    psrad $31, %xmm0
89; X64-SSE-NEXT:    pcmpgtd {{.*}}(%rip), %xmm1
90; X64-SSE-NEXT:    packssdw %xmm1, %xmm0
91; X64-SSE-NEXT:    retq
92;
93; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
94; X64-AVX:       # %bb.0:
95; X64-AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
96; X64-AVX-NEXT:    vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
97; X64-AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
98; X64-AVX-NEXT:    retq
99  %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
100  %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
101  %3 = sext <4 x i1> %2 to <4 x i32>
102  %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
103  %5 = trunc <8 x i32> %4 to <8 x i16>
104  ret <8 x i16> %5
105}
106