• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=CHECK_UNSAFE --check-prefix=AVX512F_UNSAFE
3; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
4
5define <16 x float> @test_max_v16f32(<16 x float> * %a_ptr, <16 x float> %b)  {
6; CHECK_UNSAFE-LABEL: test_max_v16f32:
7; CHECK_UNSAFE:       # %bb.0:
8; CHECK_UNSAFE-NEXT:    vmaxps (%rdi), %zmm0, %zmm0
9; CHECK_UNSAFE-NEXT:    retq
10;
11; CHECK-LABEL: test_max_v16f32:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    vmovaps (%rdi), %zmm1
14; CHECK-NEXT:    vmaxps %zmm0, %zmm1, %zmm0
15; CHECK-NEXT:    retq
16  %a = load <16 x float>, <16 x float>* %a_ptr
17  %tmp = fcmp fast ogt <16 x float> %a, %b
18  %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b
19  ret <16 x float> %tmp4;
20}
21
22define <16 x float> @test_min_v16f32(<16 x float>* %a_ptr, <16 x float> %b)  {
23; CHECK_UNSAFE-LABEL: test_min_v16f32:
24; CHECK_UNSAFE:       # %bb.0:
25; CHECK_UNSAFE-NEXT:    vminps (%rdi), %zmm0, %zmm0
26; CHECK_UNSAFE-NEXT:    retq
27;
28; CHECK-LABEL: test_min_v16f32:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vmovaps (%rdi), %zmm1
31; CHECK-NEXT:    vminps %zmm0, %zmm1, %zmm0
32; CHECK-NEXT:    retq
33  %a = load <16 x float>, <16 x float>* %a_ptr
34  %tmp = fcmp fast olt <16 x float> %a, %b
35  %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b
36  ret <16 x float> %tmp4;
37}
38
39define <8 x double> @test_max_v8f64(<8 x double> * %a_ptr, <8 x double> %b)  {
40; CHECK_UNSAFE-LABEL: test_max_v8f64:
41; CHECK_UNSAFE:       # %bb.0:
42; CHECK_UNSAFE-NEXT:    vmaxpd (%rdi), %zmm0, %zmm0
43; CHECK_UNSAFE-NEXT:    retq
44;
45; CHECK-LABEL: test_max_v8f64:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vmovapd (%rdi), %zmm1
48; CHECK-NEXT:    vmaxpd %zmm0, %zmm1, %zmm0
49; CHECK-NEXT:    retq
50  %a = load <8 x double>, <8 x double>* %a_ptr
51  %tmp = fcmp fast ogt <8 x double> %a, %b
52  %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b
53  ret <8 x double> %tmp4;
54}
55
56define <8 x double> @test_min_v8f64(<8 x double>* %a_ptr, <8 x double> %b)  {
57; CHECK_UNSAFE-LABEL: test_min_v8f64:
58; CHECK_UNSAFE:       # %bb.0:
59; CHECK_UNSAFE-NEXT:    vminpd (%rdi), %zmm0, %zmm0
60; CHECK_UNSAFE-NEXT:    retq
61;
62; CHECK-LABEL: test_min_v8f64:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    vmovapd (%rdi), %zmm1
65; CHECK-NEXT:    vminpd %zmm0, %zmm1, %zmm0
66; CHECK-NEXT:    retq
67  %a = load <8 x double>, <8 x double>* %a_ptr
68  %tmp = fcmp fast olt <8 x double> %a, %b
69  %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b
70  ret <8 x double> %tmp4;
71}
72
73define float @test_min_f32(float %a, float* %ptr) {
74; CHECK_UNSAFE-LABEL: test_min_f32:
75; CHECK_UNSAFE:       # %bb.0: # %entry
76; CHECK_UNSAFE-NEXT:    vminss (%rdi), %xmm0, %xmm0
77; CHECK_UNSAFE-NEXT:    retq
78;
79; CHECK-LABEL: test_min_f32:
80; CHECK:       # %bb.0: # %entry
81; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
82; CHECK-NEXT:    vminss %xmm0, %xmm1, %xmm0
83; CHECK-NEXT:    retq
84entry:
85  %0 = load float, float* %ptr
86  %1 = fcmp fast olt float %0, %a
87  %2 = select i1 %1, float %0, float %a
88  ret float %2
89}
90
91define double @test_max_f64(double %a, double* %ptr) {
92; CHECK_UNSAFE-LABEL: test_max_f64:
93; CHECK_UNSAFE:       # %bb.0: # %entry
94; CHECK_UNSAFE-NEXT:    vmaxsd (%rdi), %xmm0, %xmm0
95; CHECK_UNSAFE-NEXT:    retq
96;
97; CHECK-LABEL: test_max_f64:
98; CHECK:       # %bb.0: # %entry
99; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
100; CHECK-NEXT:    vmaxsd %xmm0, %xmm1, %xmm0
101; CHECK-NEXT:    retq
102entry:
103  %0 = load double, double* %ptr
104  %1 = fcmp fast ogt double %0, %a
105  %2 = select i1 %1, double %0, double %a
106  ret double %2
107}
108