• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s
3
4; add (sext i1 X), 1 -> zext (not i1 X)
5
6define i32 @sext_inc(i1 zeroext %x) nounwind {
7; CHECK-LABEL: sext_inc:
8; CHECK:       # %bb.0:
9; CHECK-NEXT:    xorb $1, %dil
10; CHECK-NEXT:    movzbl %dil, %eax
11; CHECK-NEXT:    retq
12  %ext = sext i1 %x to i32
13  %add = add i32 %ext, 1
14  ret i32 %add
15}
16
17; add (sext i1 X), 1 -> zext (not i1 X)
18
19define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
20; CHECK-LABEL: sext_inc_vec:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
23; CHECK-NEXT:    vandnps %xmm1, %xmm0, %xmm0
24; CHECK-NEXT:    retq
25  %ext = sext <4 x i1> %x to <4 x i32>
26  %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
27  ret <4 x i32> %add
28}
29
30define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
31; CHECK-LABEL: cmpgt_sext_inc_vec:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
34; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
35; CHECK-NEXT:    vpandn %xmm1, %xmm0, %xmm0
36; CHECK-NEXT:    retq
37  %cmp = icmp sgt <4 x i32> %x, %y
38  %ext = sext <4 x i1> %cmp to <4 x i32>
39  %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
40  ret <4 x i32> %add
41}
42
43define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
44; CHECK-LABEL: cmpne_sext_inc_vec:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
47; CHECK-NEXT:    vpsrld $31, %xmm0, %xmm0
48; CHECK-NEXT:    retq
49  %cmp = icmp ne <4 x i32> %x, %y
50  %ext = sext <4 x i1> %cmp to <4 x i32>
51  %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
52  ret <4 x i32> %add
53}
54
55define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
56; CHECK-LABEL: cmpgt_sext_inc_vec256:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
59; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
60; CHECK-NEXT:    vpandn %ymm1, %ymm0, %ymm0
61; CHECK-NEXT:    retq
62  %cmp = icmp sgt <4 x i64> %x, %y
63  %ext = sext <4 x i1> %cmp to <4 x i64>
64  %add = add <4 x i64> %ext, <i64 1, i64 1, i64 1, i64 1>
65  ret <4 x i64> %add
66}
67
68define i32 @bool_logic_and_math(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
69; CHECK-LABEL: bool_logic_and_math:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    cmpl %esi, %edi
72; CHECK-NEXT:    sete %al
73; CHECK-NEXT:    cmpl %ecx, %edx
74; CHECK-NEXT:    sete %cl
75; CHECK-NEXT:    orb %al, %cl
76; CHECK-NEXT:    movzbl %cl, %eax
77; CHECK-NEXT:    retq
78  %cmp1 = icmp ne i32 %a, %b
79  %cmp2 = icmp ne i32 %c, %d
80  %and = and i1 %cmp1, %cmp2
81  %ext = sext i1 %and to i32
82  %add = add i32 %ext, 1
83  ret i32 %add
84}
85
86define <4 x i32> @bool_logic_and_math_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
87; CHECK-LABEL: bool_logic_and_math_vec:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
90; CHECK-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm1
91; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
92; CHECK-NEXT:    vpxor %xmm2, %xmm1, %xmm1
93; CHECK-NEXT:    vpandn %xmm1, %xmm0, %xmm0
94; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
95; CHECK-NEXT:    vpandn %xmm1, %xmm0, %xmm0
96; CHECK-NEXT:    retq
97  %cmp1 = icmp ne <4 x i32> %a, %b
98  %cmp2 = icmp ne <4 x i32> %c, %d
99  %and = and <4 x i1> %cmp1, %cmp2
100  %ext = sext <4 x i1> %and to <4 x i32>
101  %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
102  ret <4 x i32> %add
103}
104
105