• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test the MSA intrinsics that are encoded with the 3RF instruction format and
2; use the result as a third operand and perform fixed-point operations.
3
4; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
5; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
6
7@llvm_mips_madd_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
8@llvm_mips_madd_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
9@llvm_mips_madd_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
10@llvm_mips_madd_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
11
12define void @llvm_mips_madd_q_h_test() nounwind {
13entry:
14  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1
15  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2
16  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3
17  %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
18  store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
19  ret void
20}
21
22declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
23
24; CHECK: llvm_mips_madd_q_h_test:
25; CHECK: ld.h
26; CHECK: ld.h
27; CHECK: ld.h
28; CHECK: madd_q.h
29; CHECK: st.h
30; CHECK: .size llvm_mips_madd_q_h_test
31;
32@llvm_mips_madd_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
33@llvm_mips_madd_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
34@llvm_mips_madd_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
35@llvm_mips_madd_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
36
37define void @llvm_mips_madd_q_w_test() nounwind {
38entry:
39  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1
40  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2
41  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3
42  %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
43  store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
44  ret void
45}
46
47declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
48
49; CHECK: llvm_mips_madd_q_w_test:
50; CHECK: ld.w
51; CHECK: ld.w
52; CHECK: ld.w
53; CHECK: madd_q.w
54; CHECK: st.w
55; CHECK: .size llvm_mips_madd_q_w_test
56;
57@llvm_mips_maddr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
58@llvm_mips_maddr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
59@llvm_mips_maddr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
60@llvm_mips_maddr_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
61
62define void @llvm_mips_maddr_q_h_test() nounwind {
63entry:
64  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1
65  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2
66  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3
67  %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
68  store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
69  ret void
70}
71
72declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
73
74; CHECK: llvm_mips_maddr_q_h_test:
75; CHECK: ld.h
76; CHECK: ld.h
77; CHECK: ld.h
78; CHECK: maddr_q.h
79; CHECK: st.h
80; CHECK: .size llvm_mips_maddr_q_h_test
81;
82@llvm_mips_maddr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
83@llvm_mips_maddr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
84@llvm_mips_maddr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
85@llvm_mips_maddr_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
86
87define void @llvm_mips_maddr_q_w_test() nounwind {
88entry:
89  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1
90  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2
91  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3
92  %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
93  store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
94  ret void
95}
96
97declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
98
99; CHECK: llvm_mips_maddr_q_w_test:
100; CHECK: ld.w
101; CHECK: ld.w
102; CHECK: ld.w
103; CHECK: maddr_q.w
104; CHECK: st.w
105; CHECK: .size llvm_mips_maddr_q_w_test
106;
107@llvm_mips_msub_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
108@llvm_mips_msub_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
109@llvm_mips_msub_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
110@llvm_mips_msub_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
111
112define void @llvm_mips_msub_q_h_test() nounwind {
113entry:
114  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1
115  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2
116  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3
117  %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
118  store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
119  ret void
120}
121
122declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
123
124; CHECK: llvm_mips_msub_q_h_test:
125; CHECK: ld.h
126; CHECK: ld.h
127; CHECK: ld.h
128; CHECK: msub_q.h
129; CHECK: st.h
130; CHECK: .size llvm_mips_msub_q_h_test
131;
132@llvm_mips_msub_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
133@llvm_mips_msub_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
134@llvm_mips_msub_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
135@llvm_mips_msub_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
136
137define void @llvm_mips_msub_q_w_test() nounwind {
138entry:
139  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1
140  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2
141  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3
142  %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
143  store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
144  ret void
145}
146
147declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
148
149; CHECK: llvm_mips_msub_q_w_test:
150; CHECK: ld.w
151; CHECK: ld.w
152; CHECK: ld.w
153; CHECK: msub_q.w
154; CHECK: st.w
155; CHECK: .size llvm_mips_msub_q_w_test
156;
157@llvm_mips_msubr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
158@llvm_mips_msubr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
159@llvm_mips_msubr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
160@llvm_mips_msubr_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
161
162define void @llvm_mips_msubr_q_h_test() nounwind {
163entry:
164  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1
165  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2
166  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3
167  %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
168  store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
169  ret void
170}
171
172declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
173
174; CHECK: llvm_mips_msubr_q_h_test:
175; CHECK: ld.h
176; CHECK: ld.h
177; CHECK: ld.h
178; CHECK: msubr_q.h
179; CHECK: st.h
180; CHECK: .size llvm_mips_msubr_q_h_test
181;
182@llvm_mips_msubr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
183@llvm_mips_msubr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
184@llvm_mips_msubr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
185@llvm_mips_msubr_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
186
187define void @llvm_mips_msubr_q_w_test() nounwind {
188entry:
189  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1
190  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2
191  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3
192  %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
193  store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
194  ret void
195}
196
197declare <4 x i32> @llvm.mips.msubr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
198
199; CHECK: llvm_mips_msubr_q_w_test:
200; CHECK: ld.w
201; CHECK: ld.w
202; CHECK: ld.w
203; CHECK: msubr_q.w
204; CHECK: st.w
205; CHECK: .size llvm_mips_msubr_q_w_test
206;
207