• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux \
3; RUN:   -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr  | FileCheck \
4; RUN:   --check-prefix=P8 %s
5; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux \
6; RUN:   -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr | FileCheck \
7; RUN:   --check-prefix=P9 %s
8
9; FIXME: Constrained fpext would fail if VSX feature disabled. Add no-vsx
10
11declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
12declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
13declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
14declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
15
16declare float @llvm.experimental.constrained.floor.f32(float, metadata)
17declare double @llvm.experimental.constrained.floor.f64(double, metadata)
18declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
19declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
20
21declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
22declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
23declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
24
25declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
26declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
27
28declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
29declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
30declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
31
32declare float @llvm.experimental.constrained.round.f32(float, metadata)
33declare double @llvm.experimental.constrained.round.f64(double, metadata)
34declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
35declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
36
37declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
38declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
39declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
40declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
41
42define float @ceil_f32(float %f1) {
43; P8-LABEL: ceil_f32:
44; P8:       # %bb.0:
45; P8-NEXT:    xsrdpip f1, f1
46; P8-NEXT:    blr
47;
48; P9-LABEL: ceil_f32:
49; P9:       # %bb.0:
50; P9-NEXT:    xsrdpip f1, f1
51; P9-NEXT:    blr
52  %res = call float @llvm.experimental.constrained.ceil.f32(
53                        float %f1,
54                        metadata !"fpexcept.strict")
55  ret float %res
56}
57
58define double @ceil_f64(double %f1) {
59; P8-LABEL: ceil_f64:
60; P8:       # %bb.0:
61; P8-NEXT:    xsrdpip f1, f1
62; P8-NEXT:    blr
63;
64; P9-LABEL: ceil_f64:
65; P9:       # %bb.0:
66; P9-NEXT:    xsrdpip f1, f1
67; P9-NEXT:    blr
68  %res = call double @llvm.experimental.constrained.ceil.f64(
69                        double %f1,
70                        metadata !"fpexcept.strict")
71  ret double %res
72}
73
74define <4 x float> @ceil_v4f32(<4 x float> %vf1) {
75; P8-LABEL: ceil_v4f32:
76; P8:       # %bb.0:
77; P8-NEXT:    xvrspip v2, v2
78; P8-NEXT:    blr
79;
80; P9-LABEL: ceil_v4f32:
81; P9:       # %bb.0:
82; P9-NEXT:    xvrspip v2, v2
83; P9-NEXT:    blr
84  %res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(
85                        <4 x float> %vf1,
86                        metadata !"fpexcept.strict")
87  ret <4 x float> %res
88}
89
90define <2 x double> @ceil_v2f64(<2 x double> %vf1) {
91; P8-LABEL: ceil_v2f64:
92; P8:       # %bb.0:
93; P8-NEXT:    xvrdpip v2, v2
94; P8-NEXT:    blr
95;
96; P9-LABEL: ceil_v2f64:
97; P9:       # %bb.0:
98; P9-NEXT:    xvrdpip v2, v2
99; P9-NEXT:    blr
100  %res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
101                        <2 x double> %vf1,
102                        metadata !"fpexcept.strict")
103  ret <2 x double> %res
104}
105
106define float @floor_f32(float %f1) {
107; P8-LABEL: floor_f32:
108; P8:       # %bb.0:
109; P8-NEXT:    xsrdpim f1, f1
110; P8-NEXT:    blr
111;
112; P9-LABEL: floor_f32:
113; P9:       # %bb.0:
114; P9-NEXT:    xsrdpim f1, f1
115; P9-NEXT:    blr
116  %res = call float @llvm.experimental.constrained.floor.f32(
117                        float %f1,
118                        metadata !"fpexcept.strict")
119  ret float %res
120}
121
122define double @floor_f64(double %f1) {
123; P8-LABEL: floor_f64:
124; P8:       # %bb.0:
125; P8-NEXT:    xsrdpim f1, f1
126; P8-NEXT:    blr
127;
128; P9-LABEL: floor_f64:
129; P9:       # %bb.0:
130; P9-NEXT:    xsrdpim f1, f1
131; P9-NEXT:    blr
132  %res = call double @llvm.experimental.constrained.floor.f64(
133                        double %f1,
134                        metadata !"fpexcept.strict")
135  ret double %res;
136}
137
138define <4 x float> @floor_v4f32(<4 x float> %vf1) {
139; P8-LABEL: floor_v4f32:
140; P8:       # %bb.0:
141; P8-NEXT:    xvrspim v2, v2
142; P8-NEXT:    blr
143;
144; P9-LABEL: floor_v4f32:
145; P9:       # %bb.0:
146; P9-NEXT:    xvrspim v2, v2
147; P9-NEXT:    blr
148  %res = call <4 x float> @llvm.experimental.constrained.floor.v4f32(
149                        <4 x float> %vf1,
150                        metadata !"fpexcept.strict")
151  ret <4 x float> %res;
152}
153
154define <2 x double> @floor_v2f64(<2 x double> %vf1) {
155; P8-LABEL: floor_v2f64:
156; P8:       # %bb.0:
157; P8-NEXT:    xvrdpim v2, v2
158; P8-NEXT:    blr
159;
160; P9-LABEL: floor_v2f64:
161; P9:       # %bb.0:
162; P9-NEXT:    xvrdpim v2, v2
163; P9-NEXT:    blr
164  %res = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
165                        <2 x double> %vf1,
166                        metadata !"fpexcept.strict")
167  ret <2 x double> %res;
168}
169
170define double @nearbyint_f64(double %f1, double %f2) {
171; P8-LABEL: nearbyint_f64:
172; P8:       # %bb.0:
173; P8-NEXT:    mflr r0
174; P8-NEXT:    std r0, 16(r1)
175; P8-NEXT:    stdu r1, -112(r1)
176; P8-NEXT:    .cfi_def_cfa_offset 112
177; P8-NEXT:    .cfi_offset lr, 16
178; P8-NEXT:    bl nearbyint
179; P8-NEXT:    nop
180; P8-NEXT:    addi r1, r1, 112
181; P8-NEXT:    ld r0, 16(r1)
182; P8-NEXT:    mtlr r0
183; P8-NEXT:    blr
184;
185; P9-LABEL: nearbyint_f64:
186; P9:       # %bb.0:
187; P9-NEXT:    mflr r0
188; P9-NEXT:    std r0, 16(r1)
189; P9-NEXT:    stdu r1, -32(r1)
190; P9-NEXT:    .cfi_def_cfa_offset 32
191; P9-NEXT:    .cfi_offset lr, 16
192; P9-NEXT:    bl nearbyint
193; P9-NEXT:    nop
194; P9-NEXT:    addi r1, r1, 32
195; P9-NEXT:    ld r0, 16(r1)
196; P9-NEXT:    mtlr r0
197; P9-NEXT:    blr
198  %res = call double @llvm.experimental.constrained.nearbyint.f64(
199                        double %f1,
200                        metadata !"round.dynamic",
201                        metadata !"fpexcept.strict")
202  ret double %res
203}
204
205define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
206; P8-LABEL: nearbyint_v4f32:
207; P8:       # %bb.0:
208; P8-NEXT:    mflr r0
209; P8-NEXT:    std r0, 16(r1)
210; P8-NEXT:    stdu r1, -176(r1)
211; P8-NEXT:    .cfi_def_cfa_offset 176
212; P8-NEXT:    .cfi_offset lr, 16
213; P8-NEXT:    .cfi_offset v30, -32
214; P8-NEXT:    .cfi_offset v31, -16
215; P8-NEXT:    xxsldwi vs0, v2, v2, 3
216; P8-NEXT:    li r3, 144
217; P8-NEXT:    stxvd2x v30, r1, r3 # 16-byte Folded Spill
218; P8-NEXT:    li r3, 160
219; P8-NEXT:    stxvd2x v31, r1, r3 # 16-byte Folded Spill
220; P8-NEXT:    vmr v31, v2
221; P8-NEXT:    xscvspdpn f1, vs0
222; P8-NEXT:    bl nearbyintf
223; P8-NEXT:    nop
224; P8-NEXT:    xxsldwi vs0, v31, v31, 1
225; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
226; P8-NEXT:    li r3, 128
227; P8-NEXT:    stxvd2x vs1, r1, r3 # 16-byte Folded Spill
228; P8-NEXT:    xscvspdpn f1, vs0
229; P8-NEXT:    bl nearbyintf
230; P8-NEXT:    nop
231; P8-NEXT:    li r3, 128
232; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
233; P8-NEXT:    lxvd2x vs0, r1, r3 # 16-byte Folded Reload
234; P8-NEXT:    xxmrghd vs0, vs1, vs0
235; P8-NEXT:    xscvspdpn f1, v31
236; P8-NEXT:    xvcvdpsp v30, vs0
237; P8-NEXT:    bl nearbyintf
238; P8-NEXT:    nop
239; P8-NEXT:    xxswapd vs0, v31
240; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
241; P8-NEXT:    li r3, 128
242; P8-NEXT:    stxvd2x vs1, r1, r3 # 16-byte Folded Spill
243; P8-NEXT:    xscvspdpn f1, vs0
244; P8-NEXT:    bl nearbyintf
245; P8-NEXT:    nop
246; P8-NEXT:    li r3, 128
247; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
248; P8-NEXT:    lxvd2x vs0, r1, r3 # 16-byte Folded Reload
249; P8-NEXT:    li r3, 160
250; P8-NEXT:    lxvd2x v31, r1, r3 # 16-byte Folded Reload
251; P8-NEXT:    li r3, 144
252; P8-NEXT:    xxmrghd vs0, vs0, vs1
253; P8-NEXT:    xvcvdpsp v2, vs0
254; P8-NEXT:    vmrgew v2, v2, v30
255; P8-NEXT:    lxvd2x v30, r1, r3 # 16-byte Folded Reload
256; P8-NEXT:    addi r1, r1, 176
257; P8-NEXT:    ld r0, 16(r1)
258; P8-NEXT:    mtlr r0
259; P8-NEXT:    blr
260;
261; P9-LABEL: nearbyint_v4f32:
262; P9:       # %bb.0:
263; P9-NEXT:    mflr r0
264; P9-NEXT:    std r0, 16(r1)
265; P9-NEXT:    stdu r1, -80(r1)
266; P9-NEXT:    .cfi_def_cfa_offset 80
267; P9-NEXT:    .cfi_offset lr, 16
268; P9-NEXT:    .cfi_offset v30, -32
269; P9-NEXT:    .cfi_offset v31, -16
270; P9-NEXT:    xxsldwi vs0, v2, v2, 3
271; P9-NEXT:    stxv v30, 48(r1) # 16-byte Folded Spill
272; P9-NEXT:    xscvspdpn f1, vs0
273; P9-NEXT:    stxv v31, 64(r1) # 16-byte Folded Spill
274; P9-NEXT:    vmr v31, v2
275; P9-NEXT:    bl nearbyintf
276; P9-NEXT:    nop
277; P9-NEXT:    xxsldwi vs0, v31, v31, 1
278; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
279; P9-NEXT:    stxv vs1, 32(r1) # 16-byte Folded Spill
280; P9-NEXT:    xscvspdpn f1, vs0
281; P9-NEXT:    bl nearbyintf
282; P9-NEXT:    nop
283; P9-NEXT:    lxv vs0, 32(r1) # 16-byte Folded Reload
284; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
285; P9-NEXT:    xxmrghd vs0, vs1, vs0
286; P9-NEXT:    xscvspdpn f1, v31
287; P9-NEXT:    xvcvdpsp v30, vs0
288; P9-NEXT:    bl nearbyintf
289; P9-NEXT:    nop
290; P9-NEXT:    xxswapd vs0, v31
291; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
292; P9-NEXT:    stxv vs1, 32(r1) # 16-byte Folded Spill
293; P9-NEXT:    xscvspdpn f1, vs0
294; P9-NEXT:    bl nearbyintf
295; P9-NEXT:    nop
296; P9-NEXT:    lxv vs0, 32(r1) # 16-byte Folded Reload
297; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
298; P9-NEXT:    lxv v31, 64(r1) # 16-byte Folded Reload
299; P9-NEXT:    xxmrghd vs0, vs0, vs1
300; P9-NEXT:    xvcvdpsp v2, vs0
301; P9-NEXT:    vmrgew v2, v2, v30
302; P9-NEXT:    lxv v30, 48(r1) # 16-byte Folded Reload
303; P9-NEXT:    addi r1, r1, 80
304; P9-NEXT:    ld r0, 16(r1)
305; P9-NEXT:    mtlr r0
306; P9-NEXT:    blr
307  %res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(
308                        <4 x float> %vf1,
309                        metadata !"round.dynamic",
310                        metadata !"fpexcept.strict")
311  ret <4 x float> %res
312}
313
314define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
315; P8-LABEL: nearbyint_v2f64:
316; P8:       # %bb.0:
317; P8-NEXT:    mflr r0
318; P8-NEXT:    std r0, 16(r1)
319; P8-NEXT:    stdu r1, -160(r1)
320; P8-NEXT:    .cfi_def_cfa_offset 160
321; P8-NEXT:    .cfi_offset lr, 16
322; P8-NEXT:    .cfi_offset v31, -16
323; P8-NEXT:    li r3, 144
324; P8-NEXT:    stxvd2x v31, r1, r3 # 16-byte Folded Spill
325; P8-NEXT:    vmr v31, v2
326; P8-NEXT:    xxlor f1, v31, v31
327; P8-NEXT:    bl nearbyint
328; P8-NEXT:    nop
329; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
330; P8-NEXT:    li r3, 128
331; P8-NEXT:    stxvd2x vs1, r1, r3 # 16-byte Folded Spill
332; P8-NEXT:    xxswapd vs1, v31
333; P8-NEXT:    # kill: def $f1 killed $f1 killed $vsl1
334; P8-NEXT:    bl nearbyint
335; P8-NEXT:    nop
336; P8-NEXT:    li r3, 128
337; P8-NEXT:    # kill: def $f1 killed $f1 def $vsl1
338; P8-NEXT:    lxvd2x vs0, r1, r3 # 16-byte Folded Reload
339; P8-NEXT:    li r3, 144
340; P8-NEXT:    lxvd2x v31, r1, r3 # 16-byte Folded Reload
341; P8-NEXT:    xxmrghd v2, vs0, vs1
342; P8-NEXT:    addi r1, r1, 160
343; P8-NEXT:    ld r0, 16(r1)
344; P8-NEXT:    mtlr r0
345; P8-NEXT:    blr
346;
347; P9-LABEL: nearbyint_v2f64:
348; P9:       # %bb.0:
349; P9-NEXT:    mflr r0
350; P9-NEXT:    std r0, 16(r1)
351; P9-NEXT:    stdu r1, -64(r1)
352; P9-NEXT:    .cfi_def_cfa_offset 64
353; P9-NEXT:    .cfi_offset lr, 16
354; P9-NEXT:    .cfi_offset v31, -16
355; P9-NEXT:    stxv v31, 48(r1) # 16-byte Folded Spill
356; P9-NEXT:    vmr v31, v2
357; P9-NEXT:    xscpsgndp f1, v31, v31
358; P9-NEXT:    bl nearbyint
359; P9-NEXT:    nop
360; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
361; P9-NEXT:    stxv vs1, 32(r1) # 16-byte Folded Spill
362; P9-NEXT:    xxswapd vs1, v31
363; P9-NEXT:    # kill: def $f1 killed $f1 killed $vsl1
364; P9-NEXT:    bl nearbyint
365; P9-NEXT:    nop
366; P9-NEXT:    lxv vs0, 32(r1) # 16-byte Folded Reload
367; P9-NEXT:    lxv v31, 48(r1) # 16-byte Folded Reload
368; P9-NEXT:    # kill: def $f1 killed $f1 def $vsl1
369; P9-NEXT:    xxmrghd v2, vs0, vs1
370; P9-NEXT:    addi r1, r1, 64
371; P9-NEXT:    ld r0, 16(r1)
372; P9-NEXT:    mtlr r0
373; P9-NEXT:    blr
374  %res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
375                        <2 x double> %vf1,
376                        metadata !"round.dynamic",
377                        metadata !"fpexcept.strict")
378  ret <2 x double> %res
379}
380
381define <4 x double> @fpext_v4f64_v4f32(<4 x float> %vf1) {
382; P8-LABEL: fpext_v4f64_v4f32:
383; P8:       # %bb.0:
384; P8-NEXT:    xxsldwi vs0, v2, v2, 1
385; P8-NEXT:    xxsldwi vs1, v2, v2, 3
386; P8-NEXT:    xxswapd vs3, v2
387; P8-NEXT:    xscvspdpn f2, v2
388; P8-NEXT:    xscvspdpn f0, vs0
389; P8-NEXT:    xscvspdpn f1, vs1
390; P8-NEXT:    xscvspdpn f3, vs3
391; P8-NEXT:    xxmrghd v2, vs2, vs0
392; P8-NEXT:    xxmrghd v3, vs3, vs1
393; P8-NEXT:    blr
394;
395; P9-LABEL: fpext_v4f64_v4f32:
396; P9:       # %bb.0:
397; P9-NEXT:    xxsldwi vs0, v2, v2, 3
398; P9-NEXT:    xxswapd vs1, v2
399; P9-NEXT:    xscvspdpn f0, vs0
400; P9-NEXT:    xscvspdpn f1, vs1
401; P9-NEXT:    xxsldwi vs2, v2, v2, 1
402; P9-NEXT:    xscvspdpn f2, vs2
403; P9-NEXT:    xxmrghd vs0, vs1, vs0
404; P9-NEXT:    xscvspdpn f1, v2
405; P9-NEXT:    xxmrghd v3, vs1, vs2
406; P9-NEXT:    xxlor v2, vs0, vs0
407; P9-NEXT:    blr
408  %res = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
409                        <4 x float> %vf1,
410                        metadata !"fpexcept.strict")
411  ret <4 x double> %res
412}
413
414define <2 x double> @fpext_v2f64_v2f32(<2 x float> %vf1) {
415; P8-LABEL: fpext_v2f64_v2f32:
416; P8:       # %bb.0:
417; P8-NEXT:    xxsldwi vs0, v2, v2, 1
418; P8-NEXT:    xscvspdpn f1, v2
419; P8-NEXT:    xscvspdpn f0, vs0
420; P8-NEXT:    xxmrghd v2, vs1, vs0
421; P8-NEXT:    blr
422;
423; P9-LABEL: fpext_v2f64_v2f32:
424; P9:       # %bb.0:
425; P9-NEXT:    xxsldwi vs0, v2, v2, 3
426; P9-NEXT:    xxswapd vs1, v2
427; P9-NEXT:    xscvspdpn f0, vs0
428; P9-NEXT:    xscvspdpn f1, vs1
429; P9-NEXT:    xxmrghd v2, vs1, vs0
430; P9-NEXT:    blr
431  %res = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
432                        <2 x float> %vf1,
433                        metadata !"fpexcept.strict")
434  ret <2 x double> %res
435}
436
437define float @fptrunc_f32_f64(double %f1) {
438; P8-LABEL: fptrunc_f32_f64:
439; P8:       # %bb.0:
440; P8-NEXT:    xsrsp f1, f1
441; P8-NEXT:    blr
442;
443; P9-LABEL: fptrunc_f32_f64:
444; P9:       # %bb.0:
445; P9-NEXT:    xsrsp f1, f1
446; P9-NEXT:    blr
447  %res = call float @llvm.experimental.constrained.fptrunc.f32.f64(
448                        double %f1,
449                        metadata !"round.dynamic",
450                        metadata !"fpexcept.strict")
451  ret float %res;
452}
453
454define <4 x float> @fptrunc_v4f32_v4f64(<4 x double> %vf1) {
455; P8-LABEL: fptrunc_v4f32_v4f64:
456; P8:       # %bb.0:
457; P8-NEXT:    xxmrgld vs0, v2, v3
458; P8-NEXT:    xxmrghd vs1, v2, v3
459; P8-NEXT:    xvcvdpsp v2, vs0
460; P8-NEXT:    xvcvdpsp v3, vs1
461; P8-NEXT:    vmrgew v2, v3, v2
462; P8-NEXT:    blr
463;
464; P9-LABEL: fptrunc_v4f32_v4f64:
465; P9:       # %bb.0:
466; P9-NEXT:    xxmrgld vs0, v3, v2
467; P9-NEXT:    xvcvdpsp v4, vs0
468; P9-NEXT:    xxmrghd vs0, v3, v2
469; P9-NEXT:    xvcvdpsp v2, vs0
470; P9-NEXT:    vmrgew v2, v2, v4
471; P9-NEXT:    blr
472  %res = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(
473                        <4 x double> %vf1,
474                        metadata !"round.dynamic",
475                        metadata !"fpexcept.strict")
476  ret <4 x float> %res
477}
478
479define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %vf1) {
480; P8-LABEL: fptrunc_v2f32_v2f64:
481; P8:       # %bb.0:
482; P8-NEXT:    xxswapd vs0, v2
483; P8-NEXT:    xsrsp f1, v2
484; P8-NEXT:    xsrsp f0, f0
485; P8-NEXT:    xscvdpspn v2, f1
486; P8-NEXT:    xscvdpspn v3, f0
487; P8-NEXT:    vmrghw v2, v2, v3
488; P8-NEXT:    blr
489;
490; P9-LABEL: fptrunc_v2f32_v2f64:
491; P9:       # %bb.0:
492; P9-NEXT:    xsrsp f0, v2
493; P9-NEXT:    xscvdpspn vs0, f0
494; P9-NEXT:    xxsldwi v3, vs0, vs0, 3
495; P9-NEXT:    xxswapd vs0, v2
496; P9-NEXT:    xsrsp f0, f0
497; P9-NEXT:    xscvdpspn vs0, f0
498; P9-NEXT:    xxsldwi v2, vs0, vs0, 3
499; P9-NEXT:    vmrghw v2, v3, v2
500; P9-NEXT:    blr
501  %res = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
502                        <2 x double> %vf1,
503                        metadata !"round.dynamic",
504                        metadata !"fpexcept.strict")
505  ret <2 x float> %res
506}
507
508define float @round_f32(float %f1) {
509; P8-LABEL: round_f32:
510; P8:       # %bb.0:
511; P8-NEXT:    xsrdpi f1, f1
512; P8-NEXT:    blr
513;
514; P9-LABEL: round_f32:
515; P9:       # %bb.0:
516; P9-NEXT:    xsrdpi f1, f1
517; P9-NEXT:    blr
518  %res = call float @llvm.experimental.constrained.round.f32(
519                        float %f1,
520                        metadata !"fpexcept.strict")
521  ret float %res
522}
523
524define double @round_f64(double %f1) {
525; P8-LABEL: round_f64:
526; P8:       # %bb.0:
527; P8-NEXT:    xsrdpi f1, f1
528; P8-NEXT:    blr
529;
530; P9-LABEL: round_f64:
531; P9:       # %bb.0:
532; P9-NEXT:    xsrdpi f1, f1
533; P9-NEXT:    blr
534  %res = call double @llvm.experimental.constrained.round.f64(
535                        double %f1,
536                        metadata !"fpexcept.strict")
537  ret double %res
538}
539
540define <4 x float> @round_v4f32(<4 x float> %vf1) {
541; P8-LABEL: round_v4f32:
542; P8:       # %bb.0:
543; P8-NEXT:    xvrspi v2, v2
544; P8-NEXT:    blr
545;
546; P9-LABEL: round_v4f32:
547; P9:       # %bb.0:
548; P9-NEXT:    xvrspi v2, v2
549; P9-NEXT:    blr
550  %res = call <4 x float> @llvm.experimental.constrained.round.v4f32(
551                        <4 x float> %vf1,
552                        metadata !"fpexcept.strict")
553  ret <4 x float> %res
554}
555
556define <2 x double> @round_v2f64(<2 x double> %vf1) {
557; P8-LABEL: round_v2f64:
558; P8:       # %bb.0:
559; P8-NEXT:    xvrdpi v2, v2
560; P8-NEXT:    blr
561;
562; P9-LABEL: round_v2f64:
563; P9:       # %bb.0:
564; P9-NEXT:    xvrdpi v2, v2
565; P9-NEXT:    blr
566  %res = call <2 x double> @llvm.experimental.constrained.round.v2f64(
567                        <2 x double> %vf1,
568                        metadata !"fpexcept.strict")
569  ret <2 x double> %res
570}
571
572define float @trunc_f32(float %f1) {
573; P8-LABEL: trunc_f32:
574; P8:       # %bb.0:
575; P8-NEXT:    xsrdpiz f1, f1
576; P8-NEXT:    blr
577;
578; P9-LABEL: trunc_f32:
579; P9:       # %bb.0:
580; P9-NEXT:    xsrdpiz f1, f1
581; P9-NEXT:    blr
582  %res = call float @llvm.experimental.constrained.trunc.f32(
583                        float %f1,
584                        metadata !"fpexcept.strict")
585  ret float %res
586}
587
588define double @trunc_f64(double %f1) {
589; P8-LABEL: trunc_f64:
590; P8:       # %bb.0:
591; P8-NEXT:    xsrdpiz f1, f1
592; P8-NEXT:    blr
593;
594; P9-LABEL: trunc_f64:
595; P9:       # %bb.0:
596; P9-NEXT:    xsrdpiz f1, f1
597; P9-NEXT:    blr
598  %res = call double @llvm.experimental.constrained.trunc.f64(
599                        double %f1,
600                        metadata !"fpexcept.strict")
601  ret double %res
602}
603
604define <4 x float> @trunc_v4f32(<4 x float> %vf1) {
605; P8-LABEL: trunc_v4f32:
606; P8:       # %bb.0:
607; P8-NEXT:    xvrspiz v2, v2
608; P8-NEXT:    blr
609;
610; P9-LABEL: trunc_v4f32:
611; P9:       # %bb.0:
612; P9-NEXT:    xvrspiz v2, v2
613; P9-NEXT:    blr
614  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
615                        <4 x float> %vf1,
616                        metadata !"fpexcept.strict")
617  ret <4 x float> %res
618}
619
620define <2 x double> @trunc_v2f64(<2 x double> %vf1) {
621; P8-LABEL: trunc_v2f64:
622; P8:       # %bb.0:
623; P8-NEXT:    xvrdpiz v2, v2
624; P8-NEXT:    blr
625;
626; P9-LABEL: trunc_v2f64:
627; P9:       # %bb.0:
628; P9-NEXT:    xvrdpiz v2, v2
629; P9-NEXT:    blr
630  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
631                        <2 x double> %vf1,
632                        metadata !"fpexcept.strict")
633  ret <2 x double> %res
634}
635