• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -S | FileCheck %s
2; RUN: verify-uselistorder < %s
3; CHECK-NOT: {@llvm\\.palign}
4
5define <4 x i32> @align1(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
6entry:
7  %0 = bitcast <4 x i32> %b to <2 x i64>          ; <<2 x i64>> [#uses=1]
8  %1 = bitcast <4 x i32> %a to <2 x i64>          ; <<2 x i64>> [#uses=1]
9  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 15) ; <<2 x i64>> [#uses=1]
10  %3 = bitcast <2 x i64> %2 to <4 x i32>          ; <<4 x i32>> [#uses=1]
11  ret <4 x i32> %3
12}
13
14define double @align8(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
15entry:
16  %0 = bitcast <2 x i32> %b to <1 x i64>          ; <<1 x i64>> [#uses=1]
17  %1 = bitcast <2 x i32> %a to <1 x i64>          ; <<1 x i64>> [#uses=1]
18  %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 7) ; <<1 x i64>> [#uses=1]
19  %3 = extractelement <1 x i64> %2, i32 0         ; <i64> [#uses=1]
20  %retval12 = bitcast i64 %3 to double            ; <double> [#uses=1]
21  ret double %retval12
22}
23
24declare <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64>, <1 x i64>, i8) nounwind readnone
25
26define double @align7(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
27entry:
28  %0 = bitcast <2 x i32> %b to <1 x i64>          ; <<1 x i64>> [#uses=1]
29  %1 = bitcast <2 x i32> %a to <1 x i64>          ; <<1 x i64>> [#uses=1]
30  %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 16) ; <<1 x i64>> [#uses=1]
31  %3 = extractelement <1 x i64> %2, i32 0         ; <i64> [#uses=1]
32  %retval12 = bitcast i64 %3 to double            ; <double> [#uses=1]
33  ret double %retval12
34}
35
36define double @align6(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
37entry:
38  %0 = bitcast <2 x i32> %b to <1 x i64>          ; <<1 x i64>> [#uses=1]
39  %1 = bitcast <2 x i32> %a to <1 x i64>          ; <<1 x i64>> [#uses=1]
40  %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 9) ; <<1 x i64>> [#uses=1]
41  %3 = extractelement <1 x i64> %2, i32 0         ; <i64> [#uses=1]
42  %retval12 = bitcast i64 %3 to double            ; <double> [#uses=1]
43  ret double %retval12
44}
45
46define double @align5(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
47entry:
48  %0 = bitcast <2 x i32> %b to <1 x i64>          ; <<1 x i64>> [#uses=1]
49  %1 = bitcast <2 x i32> %a to <1 x i64>          ; <<1 x i64>> [#uses=1]
50  %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 8) ; <<1 x i64>> [#uses=1]
51  %3 = extractelement <1 x i64> %2, i32 0         ; <i64> [#uses=1]
52  %retval12 = bitcast i64 %3 to double            ; <double> [#uses=1]
53  ret double %retval12
54}
55
56define <4 x i32> @align4(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
57entry:
58  %0 = bitcast <4 x i32> %b to <2 x i64>          ; <<2 x i64>> [#uses=1]
59  %1 = bitcast <4 x i32> %a to <2 x i64>          ; <<2 x i64>> [#uses=1]
60  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) ; <<2 x i64>> [#uses=1]
61  %3 = bitcast <2 x i64> %2 to <4 x i32>          ; <<4 x i32>> [#uses=1]
62  ret <4 x i32> %3
63}
64
65declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
66
67define <4 x i32> @align3(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
68entry:
69  %0 = bitcast <4 x i32> %b to <2 x i64>          ; <<2 x i64>> [#uses=1]
70  %1 = bitcast <4 x i32> %a to <2 x i64>          ; <<2 x i64>> [#uses=1]
71  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 17) ; <<2 x i64>> [#uses=1]
72  %3 = bitcast <2 x i64> %2 to <4 x i32>          ; <<4 x i32>> [#uses=1]
73  ret <4 x i32> %3
74}
75
76define <4 x i32> @align2(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
77entry:
78  %0 = bitcast <4 x i32> %b to <2 x i64>          ; <<2 x i64>> [#uses=1]
79  %1 = bitcast <4 x i32> %a to <2 x i64>          ; <<2 x i64>> [#uses=1]
80  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 16) ; <<2 x i64>> [#uses=1]
81  %3 = bitcast <2 x i64> %2 to <4 x i32>          ; <<4 x i32>> [#uses=1]
82  ret <4 x i32> %3
83}
84