• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
2; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
3
4define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
5; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbw
6; SSE41: pmovsxbw (%rdi), %xmm0
7; AVX:  vpmovsxbw (%rdi), %xmm0
8  %1 = load <16 x i8>, <16 x i8>* %a, align 1
9  %2 = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %1)
10  ret <8 x i16> %2
11}
12
13define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
14; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbd
15; SSE41: pmovsxbd (%rdi), %xmm0
16; AVX:  vpmovsxbd (%rdi), %xmm0
17  %1 = load <16 x i8>, <16 x i8>* %a, align 1
18  %2 = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %1)
19  ret <4 x i32> %2
20}
21
22define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
23; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbq
24; SSE41: pmovsxbq (%rdi), %xmm0
25; AVX:  vpmovsxbq (%rdi), %xmm0
26  %1 = load <16 x i8>, <16 x i8>* %a, align 1
27  %2 = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %1)
28  ret <2 x i64> %2
29}
30
31define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
32; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwd
33; SSE41: pmovsxwd (%rdi), %xmm0
34; AVX:  vpmovsxwd (%rdi), %xmm0
35  %1 = load <8 x i16>, <8 x i16>* %a, align 1
36  %2 = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1)
37  ret <4 x i32> %2
38}
39
40define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
41; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwq
42; SSE41: pmovsxwq (%rdi), %xmm0
43; AVX:  vpmovsxwq (%rdi), %xmm0
44  %1 = load <8 x i16>, <8 x i16>* %a, align 1
45  %2 = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %1)
46  ret <2 x i64> %2
47}
48
49define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
50; CHECK-LABEL: test_llvm_x86_sse41_pmovsxdq
51; SSE41: pmovsxdq (%rdi), %xmm0
52; AVX:  vpmovsxdq (%rdi), %xmm0
53  %1 = load <4 x i32>, <4 x i32>* %a, align 1
54  %2 = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %1)
55  ret <2 x i64> %2
56}
57
58define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
59; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbw
60; SSE41: pmovzxbw (%rdi), %xmm0
61; AVX:  vpmovzxbw (%rdi), %xmm0
62  %1 = load <16 x i8>, <16 x i8>* %a, align 1
63  %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %1)
64  ret <8 x i16> %2
65}
66
67define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
68; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbd
69; SSE41: pmovzxbd (%rdi), %xmm0
70; AVX:  vpmovzxbd (%rdi), %xmm0
71  %1 = load <16 x i8>, <16 x i8>* %a, align 1
72  %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %1)
73  ret <4 x i32> %2
74}
75
76define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
77; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbq
78; SSE41: pmovzxbq (%rdi), %xmm0
79; AVX:  vpmovzxbq (%rdi), %xmm0
80  %1 = load <16 x i8>, <16 x i8>* %a, align 1
81  %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %1)
82  ret <2 x i64> %2
83}
84
85define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
86; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwd
87; SSE41: pmovzxwd (%rdi), %xmm0
88; AVX:  vpmovzxwd (%rdi), %xmm0
89  %1 = load <8 x i16>, <8 x i16>* %a, align 1
90  %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %1)
91  ret <4 x i32> %2
92}
93
94define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
95; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwq
96; SSE41: pmovzxwq (%rdi), %xmm0
97; AVX:  vpmovzxwq (%rdi), %xmm0
98  %1 = load <8 x i16>, <8 x i16>* %a, align 1
99  %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %1)
100  ret <2 x i64> %2
101}
102
103define <2 x i64> @test_llvm_x86_sse41_pmovzxdq(<4 x i32>* %a) {
104; CHECK-LABEL: test_llvm_x86_sse41_pmovzxdq
105; SSE41: pmovzxdq (%rdi), %xmm0
106; AVX:  vpmovzxdq (%rdi), %xmm0
107  %1 = load <4 x i32>, <4 x i32>* %a, align 1
108  %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %1)
109  ret <2 x i64> %2
110}
111
112declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>)
113declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>)
114declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>)
115declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>)
116declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>)
117declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>)
118declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>)
119declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>)
120declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>)
121declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>)
122declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>)
123declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>)
124