• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mcpu=pwr7 -mattr=+vsx -O0 -fast-isel=0 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
2target datalayout = "E-m:e-i64:64-n32:64"
3target triple = "powerpc64-unknown-linux-gnu"
4
5@vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16
6@vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16
7@d = global double 2.340000e+01, align 8
8@vf1 = common global <4 x float> zeroinitializer, align 16
9@vd1 = common global <2 x double> zeroinitializer, align 16
10@vf2 = common global <4 x float> zeroinitializer, align 16
11@vf3 = common global <4 x float> zeroinitializer, align 16
12@vd2 = common global <2 x double> zeroinitializer, align 16
13@vf4 = common global <4 x float> zeroinitializer, align 16
14@d1 = common global double 0.000000e+00, align 8
15@d2 = common global double 0.000000e+00, align 8
16
17; Function Attrs: nounwind
18define void @test1() #0 {
19; CHECK-LABEL: @test1
20entry:
21  %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
22  %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
23  %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
24; CHECK: xvmaxsp
25  store <4 x float> %2, <4 x float>* @vf1, align 16
26  %3 = load <2 x double>, <2 x double>* @vd, align 16
27  %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
28; CHECK: xvmaxdp
29  store <2 x double> %4, <2 x double>* @vd1, align 16
30  %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
31  %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
32  %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
33; CHECK: xvmaxsp
34  store <4 x float> %7, <4 x float>* @vf2, align 16
35  %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
36  %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
37  %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
38; CHECK: xvminsp
39  store <4 x float> %10, <4 x float>* @vf3, align 16
40  %11 = load <2 x double>, <2 x double>* @vd, align 16
41  %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
42; CHECK: xvmindp
43  store <2 x double> %12, <2 x double>* @vd2, align 16
44  %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
45  %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
46  %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
47; CHECK: xvminsp
48  store <4 x float> %15, <4 x float>* @vf4, align 16
49  %16 = load double, double* @d, align 8
50  %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
51; CHECK: xsmaxdp
52  store double %17, double* @d1, align 8
53  %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
54; CHECK: xsmindp
55  store double %18, double* @d2, align 8
56  ret void
57}
58
59; Function Attrs: nounwind readnone
60declare double @llvm.ppc.vsx.xsmaxdp(double, double)
61
62; Function Attrs: nounwind readnone
63declare double @llvm.ppc.vsx.xsmindp(double, double)
64
65; Function Attrs: nounwind readnone
66declare <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float>, <4 x float>)
67
68; Function Attrs: nounwind readnone
69declare <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double>, <2 x double>)
70
71; Function Attrs: nounwind readnone
72declare <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float>, <4 x float>)
73
74; Function Attrs: nounwind readnone
75declare <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double>, <2 x double>)
76
77; Generated from C source:
78
79; % clang -O1 -maltivec -mvsx -S -emit-llvm vsx-minmax.c
80;
81;volatile vector float vf = { -1.5, 2.5, -3.5, 4.5 };
82;vector double vd = { 3.5, -7.5 };
83;double d = 23.4;
84;
85;vector float vf1, vf2, vf3, vf4;
86;vector double vd1, vd2;
87;double d1, d2;
88;
89;void test1() {
90;  vf1 = vec_max(vf, vf);
91;  vd1 = vec_max(vd, vd);
92;  vf2 = vec_vmaxfp(vf, vf);
93;  vf3 = vec_min(vf, vf);
94;  vd2 = vec_min(vd, vd);
95;  vf4 = vec_vminfp(vf, vf);
96;  d1 = __builtin_vsx_xsmaxdp(d, d);
97;  d2 = __builtin_vsx_xsmindp(d, d);
98;}
99