• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64
4
5define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
6; X32-LABEL: and_masks:
7; X32:       ## BB#0:
8; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
9; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
10; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
11; X32-NEXT:    vmovups (%edx), %ymm0
12; X32-NEXT:    vmovups (%ecx), %ymm1
13; X32-NEXT:    vcmpltps %ymm0, %ymm1, %ymm1
14; X32-NEXT:    vmovups (%eax), %ymm2
15; X32-NEXT:    vcmpltps %ymm0, %ymm2, %ymm0
16; X32-NEXT:    vandps %ymm1, %ymm0, %ymm0
17; X32-NEXT:    vandps LCPI0_0, %ymm0, %ymm0
18; X32-NEXT:    vmovaps %ymm0, (%eax)
19; X32-NEXT:    vzeroupper
20; X32-NEXT:    retl
21;
22; X64-LABEL: and_masks:
23; X64:       ## BB#0:
24; X64-NEXT:    vmovups (%rdi), %ymm0
25; X64-NEXT:    vmovups (%rsi), %ymm1
26; X64-NEXT:    vcmpltps %ymm0, %ymm1, %ymm1
27; X64-NEXT:    vmovups (%rdx), %ymm2
28; X64-NEXT:    vcmpltps %ymm0, %ymm2, %ymm0
29; X64-NEXT:    vandps %ymm1, %ymm0, %ymm0
30; X64-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
31; X64-NEXT:    vmovaps %ymm0, (%rax)
32; X64-NEXT:    vzeroupper
33; X64-NEXT:    retq
34  %v0 = load <8 x float>, <8 x float>* %a, align 16
35  %v1 = load <8 x float>, <8 x float>* %b, align 16
36  %m0 = fcmp olt <8 x float> %v1, %v0
37  %v2 = load <8 x float>, <8 x float>* %c, align 16
38  %m1 = fcmp olt <8 x float> %v2, %v0
39  %mand = and <8 x i1> %m1, %m0
40  %r = zext <8 x i1> %mand to <8 x i32>
41  store <8 x i32> %r, <8 x i32>* undef, align 32
42  ret void
43}
44
45define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
46; X32-LABEL: neg_masks:
47; X32:       ## BB#0:
48; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
49; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
50; X32-NEXT:    vmovups (%ecx), %ymm0
51; X32-NEXT:    vcmpltps (%eax), %ymm0, %ymm0
52; X32-NEXT:    vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
53; X32-NEXT:    vxorps %ymm1, %ymm0, %ymm0
54; X32-NEXT:    vandps %ymm1, %ymm0, %ymm0
55; X32-NEXT:    vmovaps %ymm0, (%eax)
56; X32-NEXT:    vzeroupper
57; X32-NEXT:    retl
58;
59; X64-LABEL: neg_masks:
60; X64:       ## BB#0:
61; X64-NEXT:    vmovups (%rsi), %ymm0
62; X64-NEXT:    vcmpltps (%rdi), %ymm0, %ymm0
63; X64-NEXT:    vmovaps {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
64; X64-NEXT:    vxorps %ymm1, %ymm0, %ymm0
65; X64-NEXT:    vandps %ymm1, %ymm0, %ymm0
66; X64-NEXT:    vmovaps %ymm0, (%rax)
67; X64-NEXT:    vzeroupper
68; X64-NEXT:    retq
69  %v0 = load <8 x float>, <8 x float>* %a, align 16
70  %v1 = load <8 x float>, <8 x float>* %b, align 16
71  %m0 = fcmp olt <8 x float> %v1, %v0
72  %mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
73  %r = zext <8 x i1> %mand to <8 x i32>
74  store <8 x i32> %r, <8 x i32>* undef, align 32
75  ret void
76}
77
78