• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
2; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
3
4declare float @fminf(float, float)
5declare double @fmin(double, double)
6declare x86_fp80 @fminl(x86_fp80, x86_fp80)
7declare float @llvm.minnum.f32(float, float)
8declare double @llvm.minnum.f64(double, double)
9declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80)
10
11declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
12declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
13declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
14declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
15declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
16
17; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
18
19; CHECK-LABEL: @test_fminf
20; SSE:         movaps %xmm0, %xmm2
21; SSE-NEXT:    cmpunordss %xmm2, %xmm2
22; SSE-NEXT:    movaps %xmm2, %xmm3
23; SSE-NEXT:    andps %xmm1, %xmm3
24; SSE-NEXT:    minss %xmm0, %xmm1
25; SSE-NEXT:    andnps %xmm1, %xmm2
26; SSE-NEXT:    orps %xmm3, %xmm2
27; SSE-NEXT:    movaps %xmm2, %xmm0
28; SSE-NEXT:    retq
29;
30; AVX:         vminss %xmm0, %xmm1, %xmm2
31; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
32; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
33; AVX-NEXT:    retq
34define float @test_fminf(float %x, float %y) {
35  %z = call float @fminf(float %x, float %y) readnone
36  ret float %z
37}
38
39; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
40
41; CHECK-LABEL: @test_fmin
42; SSE:         movapd %xmm0, %xmm2
43; SSE-NEXT:    cmpunordsd %xmm2, %xmm2
44; SSE-NEXT:    movapd %xmm2, %xmm3
45; SSE-NEXT:    andpd %xmm1, %xmm3
46; SSE-NEXT:    minsd %xmm0, %xmm1
47; SSE-NEXT:    andnpd %xmm1, %xmm2
48; SSE-NEXT:    orpd %xmm3, %xmm2
49; SSE-NEXT:    movapd %xmm2, %xmm0
50; SSE-NEXT:    retq
51;
52; AVX:         vminsd %xmm0, %xmm1, %xmm2
53; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
54; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
55; AVX-NEXT:    retq
56define double @test_fmin(double %x, double %y) {
57  %z = call double @fmin(double %x, double %y) readnone
58  ret double %z
59}
60
61; CHECK-LABEL: @test_fminl
62; CHECK: callq fminl
63define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
64  %z = call x86_fp80 @fminl(x86_fp80 %x, x86_fp80 %y) readnone
65  ret x86_fp80 %z
66}
67
68; CHECK-LABEL: @test_intrinsic_fminf
69; SSE:         movaps %xmm0, %xmm2
70; SSE-NEXT:    cmpunordss %xmm2, %xmm2
71; SSE-NEXT:    movaps %xmm2, %xmm3
72; SSE-NEXT:    andps %xmm1, %xmm3
73; SSE-NEXT:    minss %xmm0, %xmm1
74; SSE-NEXT:    andnps %xmm1, %xmm2
75; SSE-NEXT:    orps %xmm3, %xmm2
76; SSE-NEXT:    movaps %xmm2, %xmm0
77; SSE-NEXT:    retq
78;
79; AVX:         vminss %xmm0, %xmm1, %xmm2
80; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
81; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
82; AVX-NEXT:    retq
83define float @test_intrinsic_fminf(float %x, float %y) {
84  %z = call float @llvm.minnum.f32(float %x, float %y) readnone
85  ret float %z
86}
87
88; CHECK-LABEL: @test_intrinsic_fmin
89; SSE:         movapd %xmm0, %xmm2
90; SSE-NEXT:    cmpunordsd %xmm2, %xmm2
91; SSE-NEXT:    movapd %xmm2, %xmm3
92; SSE-NEXT:    andpd %xmm1, %xmm3
93; SSE-NEXT:    minsd %xmm0, %xmm1
94; SSE-NEXT:    andnpd %xmm1, %xmm2
95; SSE-NEXT:    orpd %xmm3, %xmm2
96; SSE-NEXT:    movapd %xmm2, %xmm0
97; SSE-NEXT:    retq
98;
99; AVX:         vminsd %xmm0, %xmm1, %xmm2
100; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
101; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
102; AVX-NEXT:    retq
103define double @test_intrinsic_fmin(double %x, double %y) {
104  %z = call double @llvm.minnum.f64(double %x, double %y) readnone
105  ret double %z
106}
107
108; CHECK-LABEL: @test_intrinsic_fminl
109; CHECK: callq fminl
110define x86_fp80 @test_intrinsic_fminl(x86_fp80 %x, x86_fp80 %y) {
111  %z = call x86_fp80 @llvm.minnum.f80(x86_fp80 %x, x86_fp80 %y) readnone
112  ret x86_fp80 %z
113}
114
115; CHECK-LABEL: @test_intrinsic_fmin_v2f32
116; SSE:         movaps %xmm1, %xmm2
117; SSE-NEXT:    minps %xmm0, %xmm2
118; SSE-NEXT:    cmpunordps %xmm0, %xmm0
119; SSE-NEXT:    andps %xmm0, %xmm1
120; SSE-NEXT:    andnps %xmm2, %xmm0
121; SSE-NEXT:    orps %xmm1, %xmm0
122; SSE-NEXT:    retq
123;
124; AVX:         vminps %xmm0, %xmm1, %xmm2
125; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
126; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
127; AVX-NEXT:    retq
128define <2 x float> @test_intrinsic_fmin_v2f32(<2 x float> %x, <2 x float> %y) {
129  %z = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
130  ret <2 x float> %z
131}
132
133; CHECK-LABEL: @test_intrinsic_fmin_v4f32
134; SSE:         movaps %xmm1, %xmm2
135; SSE-NEXT:    minps %xmm0, %xmm2
136; SSE-NEXT:    cmpunordps %xmm0, %xmm0
137; SSE-NEXT:    andps %xmm0, %xmm1
138; SSE-NEXT:    andnps %xmm2, %xmm0
139; SSE-NEXT:    orps %xmm1, %xmm0
140; SSE-NEXT:    retq
141;
142; AVX:         vminps %xmm0, %xmm1, %xmm2
143; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
144; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
145; AVX-NEXT:    retq
146define <4 x float> @test_intrinsic_fmin_v4f32(<4 x float> %x, <4 x float> %y) {
147  %z = call <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
148  ret <4 x float> %z
149}
150
151; CHECK-LABEL: @test_intrinsic_fmin_v2f64
152; SSE:         movapd %xmm1, %xmm2
153; SSE-NEXT:    minpd %xmm0, %xmm2
154; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
155; SSE-NEXT:    andpd %xmm0, %xmm1
156; SSE-NEXT:    andnpd %xmm2, %xmm0
157; SSE-NEXT:    orpd %xmm1, %xmm0
158; SSE-NEXT:    retq
159;
160; AVX:         vminpd %xmm0, %xmm1, %xmm2
161; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
162; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
163; AVX-NEXT:    retq
164define <2 x double> @test_intrinsic_fmin_v2f64(<2 x double> %x, <2 x double> %y) {
165  %z = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
166  ret <2 x double> %z
167}
168
169; CHECK-LABEL: @test_intrinsic_fmin_v4f64
170; SSE:         movapd  %xmm2, %xmm4
171; SSE-NEXT:    minpd %xmm0, %xmm4
172; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
173; SSE-NEXT:    andpd %xmm0, %xmm2
174; SSE-NEXT:    andnpd  %xmm4, %xmm0
175; SSE-NEXT:    orpd  %xmm2, %xmm0
176; SSE-NEXT:    movapd  %xmm3, %xmm2
177; SSE-NEXT:    minpd %xmm1, %xmm2
178; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
179; SSE-NEXT:    andpd %xmm1, %xmm3
180; SSE-NEXT:    andnpd  %xmm2, %xmm1
181; SSE-NEXT:    orpd  %xmm3, %xmm1
182; SSE-NEXT:    retq
183;
184; AVX:         vminpd  %ymm0, %ymm1, %ymm2
185; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
186; AVX-NEXT:    vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
187; AVX-NEXT:    retq
188define <4 x double> @test_intrinsic_fmin_v4f64(<4 x double> %x, <4 x double> %y) {
189  %z = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
190  ret <4 x double> %z
191}
192
193; CHECK-LABEL: @test_intrinsic_fmin_v8f64
194; SSE:         movapd  %xmm4, %xmm8
195; SSE-NEXT:    minpd %xmm0, %xmm8
196; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
197; SSE-NEXT:    andpd %xmm0, %xmm4
198; SSE-NEXT:    andnpd  %xmm8, %xmm0
199; SSE-NEXT:    orpd  %xmm4, %xmm0
200; SSE-NEXT:    movapd  %xmm5, %xmm4
201; SSE-NEXT:    minpd %xmm1, %xmm4
202; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
203; SSE-NEXT:    andpd %xmm1, %xmm5
204; SSE-NEXT:    andnpd  %xmm4, %xmm1
205; SSE-NEXT:    orpd  %xmm5, %xmm1
206; SSE-NEXT:    movapd  %xmm6, %xmm4
207; SSE-NEXT:    minpd %xmm2, %xmm4
208; SSE-NEXT:    cmpunordpd  %xmm2, %xmm2
209; SSE-NEXT:    andpd %xmm2, %xmm6
210; SSE-NEXT:    andnpd  %xmm4, %xmm2
211; SSE-NEXT:    orpd  %xmm6, %xmm2
212; SSE-NEXT:    movapd  %xmm7, %xmm4
213; SSE-NEXT:    minpd %xmm3, %xmm4
214; SSE-NEXT:    cmpunordpd  %xmm3, %xmm3
215; SSE-NEXT:    andpd %xmm3, %xmm7
216; SSE-NEXT:    andnpd  %xmm4, %xmm3
217; SSE-NEXT:    orpd  %xmm7, %xmm3
218; SSE-NEXT:    retq
219;
220; AVX:         vminpd  %ymm0, %ymm2, %ymm4
221; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
222; AVX-NEXT:    vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
223; AVX-NEXT:    vminpd  %ymm1, %ymm3, %ymm2
224; AVX-NEXT:    vcmpunordpd %ymm1, %ymm1, %ymm1
225; AVX-NEXT:    vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
226; AVX-NEXT:    retq
227define <8 x double> @test_intrinsic_fmin_v8f64(<8 x double> %x, <8 x double> %y) {
228  %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
229  ret <8 x double> %z
230}
231