• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
2; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
3
4@vda = common global <2 x double> zeroinitializer, align 16
5@vdb = common global <2 x double> zeroinitializer, align 16
6@vdr = common global <2 x double> zeroinitializer, align 16
7@vfa = common global <4 x float> zeroinitializer, align 16
8@vfb = common global <4 x float> zeroinitializer, align 16
9@vfr = common global <4 x float> zeroinitializer, align 16
10@vbllr = common global <2 x i64> zeroinitializer, align 16
11@vbir = common global <4 x i32> zeroinitializer, align 16
12@vblla = common global <2 x i64> zeroinitializer, align 16
13@vbllb = common global <2 x i64> zeroinitializer, align 16
14@vbia = common global <4 x i32> zeroinitializer, align 16
15@vbib = common global <4 x i32> zeroinitializer, align 16
16
17; Function Attrs: nounwind
18define void @test1() {
19entry:
20  %0 = load <2 x double>, <2 x double>* @vda, align 16
21  %1 = load <2 x double>, <2 x double>* @vdb, align 16
22  %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
23  store <2 x double> %2, <2 x double>* @vdr, align 16
24  ret void
25; CHECK-LABEL: @test1
26; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
27}
28
29; Function Attrs: nounwind
30define void @test2() {
31entry:
32  %0 = load <4 x float>, <4 x float>* @vfa, align 16
33  %1 = load <4 x float>, <4 x float>* @vfb, align 16
34  %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
35  store <4 x float> %2, <4 x float>* @vfr, align 16
36  ret void
37; CHECK-LABEL: @test2
38; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
39}
40
41; Function Attrs: nounwind
42define void @test3() {
43entry:
44  %0 = load <2 x double>, <2 x double>* @vda, align 16
45  %1 = load <2 x double>, <2 x double>* @vda, align 16
46  %2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
47  store <2 x double> %2, <2 x double>* @vdr, align 16
48  ret void
49; CHECK-LABEL: @test3
50; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
51}
52
53; Function Attrs: nounwind
54define void @test4() {
55entry:
56  %0 = load <4 x float>, <4 x float>* @vfa, align 16
57  %1 = load <4 x float>, <4 x float>* @vfa, align 16
58  %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
59  store <4 x float> %2, <4 x float>* @vfr, align 16
60  ret void
61; CHECK-LABEL: @test4
62; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
63}
64
65; Function Attrs: nounwind
66define void @test5() {
67entry:
68  %0 = load <2 x double>, <2 x double>* @vda, align 16
69  %1 = load <2 x double>, <2 x double>* @vdb, align 16
70  %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
71  store <2 x i64> %2, <2 x i64>* @vbllr, align 16
72  ret void
73; CHECK-LABEL: @test5
74; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
75}
76
77; Function Attrs: nounwind
78define void @test6() {
79entry:
80  %0 = load <4 x float>, <4 x float>* @vfa, align 16
81  %1 = load <4 x float>, <4 x float>* @vfb, align 16
82  %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
83  store <4 x i32> %2, <4 x i32>* @vbir, align 16
84  ret void
85; CHECK-LABEL: @test6
86; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
87}
88
89; Function Attrs: nounwind
90define void @test7() {
91entry:
92  %0 = load <2 x double>, <2 x double>* @vda, align 16
93  %1 = load <2 x double>, <2 x double>* @vdb, align 16
94  %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
95  store <2 x i64> %2, <2 x i64>* @vbllr, align 16
96  ret void
97; CHECK-LABEL: @test7
98; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
99}
100
101; Function Attrs: nounwind
102define void @test8() {
103entry:
104  %0 = load <4 x float>, <4 x float>* @vfa, align 16
105  %1 = load <4 x float>, <4 x float>* @vfb, align 16
106  %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
107  store <4 x i32> %2, <4 x i32>* @vbir, align 16
108  ret void
109; CHECK-LABEL: @test8
110; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
111}
112
113; Function Attrs: nounwind
114define void @test9() {
115entry:
116  %0 = load <2 x double>, <2 x double>* @vda, align 16
117  %1 = load <2 x double>, <2 x double>* @vdb, align 16
118  %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
119  store <2 x i64> %2, <2 x i64>* @vbllr, align 16
120  ret void
121; CHECK-LABEL: @test9
122; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
123}
124
125; Function Attrs: nounwind
126define void @test10() {
127entry:
128  %0 = load <4 x float>, <4 x float>* @vfa, align 16
129  %1 = load <4 x float>, <4 x float>* @vfb, align 16
130  %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
131  store <4 x i32> %2, <4 x i32>* @vbir, align 16
132  ret void
133; CHECK-LABEL: @test10
134; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
135}
136
137; Function Attrs: nounwind
138define <4 x float> @emit_xvresp(<4 x float> %a) {
139entry:
140  %a.addr = alloca <4 x float>, align 16
141  store <4 x float> %a, <4 x float>* %a.addr, align 16
142  %0 = load <4 x float>, <4 x float>* %a.addr, align 16
143  %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
144  ret <4 x float> %1
145; CHECK-LABEL: @emit_xvresp
146; CHECK: xvresp {{[0-9]+}}, {{[0-9]+}}
147}
148
149; Function Attrs: nounwind
150define <2 x double> @emit_xvredp(<2 x double> %a) {
151entry:
152  %a.addr = alloca <2 x double>, align 16
153  store <2 x double> %a, <2 x double>* %a.addr, align 16
154  %0 = load <2 x double>, <2 x double>* %a.addr, align 16
155  %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
156  ret <2 x double> %1
157; CHECK-LABEL: @emit_xvredp
158; CHECK: xvredp {{[0-9]+}}, {{[0-9]+}}
159}
160
161; Function Attrs: nounwind readnone
162define <4 x i32> @emit_xvcvdpsxws(<2 x double> %a) {
163entry:
164  %0 = tail call <4 x i32> @llvm.ppc.vsx.xvcvdpsxws(<2 x double> %a)
165  ret <4 x i32> %0
166; CHECK-LABEL: @emit_xvcvdpsxws
167; CHECK: xvcvdpsxws 34, 34
168}
169
170; Function Attrs: nounwind readnone
171define <4 x i32> @emit_xvcvdpuxws(<2 x double> %a) {
172entry:
173  %0 = tail call <4 x i32> @llvm.ppc.vsx.xvcvdpuxws(<2 x double> %a)
174  ret <4 x i32> %0
175; CHECK-LABEL: @emit_xvcvdpuxws
176; CHECK: xvcvdpuxws 34, 34
177}
178
179; Function Attrs: nounwind readnone
180define <2 x double> @emit_xvcvsxwdp(<4 x i32> %a) {
181entry:
182  %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvsxwdp(<4 x i32> %a)
183  ret <2 x double> %0
184; CHECK-LABEL: @emit_xvcvsxwdp
185; CHECK: xvcvsxwdp 34, 34
186}
187
188; Function Attrs: nounwind readnone
189define <2 x double> @emit_xvcvuxwdp(<4 x i32> %a) {
190entry:
191  %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvuxwdp(<4 x i32> %a)
192  ret <2 x double> %0
193; CHECK-LABEL: @emit_xvcvuxwdp
194; CHECK: xvcvuxwdp 34, 34
195}
196
197; Function Attrs: nounwind readnone
198define <2 x double> @emit_xvcvspdp(<4 x float> %a) {
199entry:
200  %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float> %a)
201  ret <2 x double> %0
202; CHECK-LABEL: @emit_xvcvspdp
203; CHECK: xvcvspdp 34, 34
204}
205
206; Function Attrs: nounwind readnone
207define <4 x float> @emit_xvcvsxdsp(<2 x i64> %a) {
208entry:
209  %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsxdsp(<2 x i64> %a)
210  ret <4 x float> %0
211; CHECK-LABEL: @emit_xvcvsxdsp
212; CHECK: xvcvsxdsp 34, 34
213}
214
215; Function Attrs: nounwind readnone
216define <4 x float> @emit_xvcvuxdsp(<2 x i64> %a) {
217entry:
218  %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvuxdsp(<2 x i64> %a)
219  ret <4 x float> %0
220; CHECK-LABEL: @emit_xvcvuxdsp
221; CHECK: xvcvuxdsp 34, 34
222}
223
224; Function Attrs: nounwind readnone
225define <4 x float> @emit_xvcvdpsp(<2 x double> %a) {
226entry:
227  %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvdpsp(<2 x double> %a)
228  ret <4 x float> %0
229; CHECK-LABEL: @emit_xvcvdpsp
230; CHECK: xvcvdpsp 34, 34
231}
232
233; Function Attrs: nounwind readnone
234
235; Function Attrs: nounwind readnone
236declare <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>)
237
238; Function Attrs: nounwind readnone
239declare <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>)
240
241; Function Attrs: nounwind readnone
242declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
243
244; Function Attrs: nounwind readnone
245declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
246
247; Function Attrs: nounwind readnone
248declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
249
250; Function Attrs: nounwind readnone
251declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
252
253; Function Attrs: nounwind readnone
254declare <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double>, <2 x double>)
255
256; Function Attrs: nounwind readnone
257declare <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float>, <4 x float>)
258
259; Function Attrs: nounwind readnone
260declare <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double>, <2 x double>)
261
262; Function Attrs: nounwind readnone
263declare <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float>, <4 x float>)
264
265; Function Attrs: nounwind readnone
266declare <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double>, <2 x double>)
267
268; Function Attrs: nounwind readnone
269declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)
270declare <4 x float> @llvm.ppc.vsx.xvcvdpsp(<2 x double>) #1
271declare <4 x i32> @llvm.ppc.vsx.xvcvdpsxws(<2 x double>) #1
272declare <4 x i32> @llvm.ppc.vsx.xvcvdpuxws(<2 x double>) #1
273declare <2 x double> @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>) #1
274declare <2 x double> @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>) #1
275declare <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float>) #1
276declare <4 x float> @llvm.ppc.vsx.xvcvsxdsp(<2 x i64>) #1
277declare <4 x float> @llvm.ppc.vsx.xvcvuxdsp(<2 x i64>) #1
278