• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
3
4; cmp with single-use load, should not form branch.
5define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y)  {
6; CHECK-LABEL: test1:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    ucomisd (%rdi), %xmm0
9; CHECK-NEXT:    cmovbel %edx, %esi
10; CHECK-NEXT:    movl %esi, %eax
11; CHECK-NEXT:    retq
12  %load = load double, double* %b, align 8
13  %cmp = fcmp olt double %load, %a
14  %cond = select i1 %cmp, i32 %x, i32 %y
15  ret i32 %cond
16}
17
18; Sanity check: no load.
19define i32 @test2(double %a, double %b, i32 %x, i32 %y)  {
20; CHECK-LABEL: test2:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    ucomisd %xmm1, %xmm0
23; CHECK-NEXT:    cmovbel %esi, %edi
24; CHECK-NEXT:    movl %edi, %eax
25; CHECK-NEXT:    retq
26  %cmp = fcmp ogt double %a, %b
27  %cond = select i1 %cmp, i32 %x, i32 %y
28  ret i32 %cond
29}
30
31; Multiple uses of the load.
32define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y)  {
33; CHECK-LABEL: test4:
34; CHECK:       # %bb.0:
35; CHECK-NEXT:    movl (%rsi), %eax
36; CHECK-NEXT:    cmpl %edi, %eax
37; CHECK-NEXT:    cmovael %ecx, %edx
38; CHECK-NEXT:    addl %edx, %eax
39; CHECK-NEXT:    retq
40  %load = load i32, i32* %b, align 4
41  %cmp = icmp ult i32 %load, %a
42  %cond = select i1 %cmp, i32 %x, i32 %y
43  %add = add i32 %cond, %load
44  ret i32 %add
45}
46
47; Multiple uses of the cmp.
48define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
49; CHECK-LABEL: test5:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    cmpl %edi, (%rsi)
52; CHECK-NEXT:    cmoval %edi, %ecx
53; CHECK-NEXT:    cmovael %edx, %ecx
54; CHECK-NEXT:    movl %ecx, %eax
55; CHECK-NEXT:    retq
56  %load = load i32, i32* %b, align 4
57  %cmp = icmp ult i32 %load, %a
58  %cmp1 = icmp ugt i32 %load, %a
59  %cond = select i1 %cmp1, i32 %a, i32 %y
60  %cond5 = select i1 %cmp, i32 %cond, i32 %x
61  ret i32 %cond5
62}
63
64; Zero-extended select.
65define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
66; CHECK-LABEL: test6:
67; CHECK:       # %bb.0: # %entry
68; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
69; CHECK-NEXT:    testl %edi, %edi
70; CHECK-NEXT:    cmovnsl (%rdx), %esi
71; CHECK-NEXT:    movq %rsi, (%rcx)
72; CHECK-NEXT:    retq
73entry:
74  %y = load i32, i32* %y.ptr
75  %cmp = icmp slt i32 %a, 0
76  %z = select i1 %cmp, i32 %x, i32 %y
77  %z.ext = zext i32 %z to i64
78  store i64 %z.ext, i64* %z.ptr
79  ret void
80}
81
82; If a select is not obviously predictable, don't turn it into a branch.
83define i32 @weighted_select1(i32 %a, i32 %b) {
84; CHECK-LABEL: weighted_select1:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    testl %edi, %edi
87; CHECK-NEXT:    cmovnel %edi, %esi
88; CHECK-NEXT:    movl %esi, %eax
89; CHECK-NEXT:    retq
90  %cmp = icmp ne i32 %a, 0
91  %sel = select i1 %cmp, i32 %a, i32 %b, !prof !0
92  ret i32 %sel
93}
94
95; If a select is obviously predictable, turn it into a branch.
96define i32 @weighted_select2(i32 %a, i32 %b) {
97; CHECK-LABEL: weighted_select2:
98; CHECK:       # %bb.0:
99; CHECK-NEXT:    testl %edi, %edi
100; CHECK-NEXT:    jne .LBB6_2
101; CHECK-NEXT:  # %bb.1: # %select.false
102; CHECK-NEXT:    movl %esi, %edi
103; CHECK-NEXT:  .LBB6_2: # %select.end
104; CHECK-NEXT:    movl %edi, %eax
105; CHECK-NEXT:    retq
106  %cmp = icmp ne i32 %a, 0
107  %sel = select i1 %cmp, i32 %a, i32 %b, !prof !1
108  ret i32 %sel
109}
110
111; Note the reversed profile weights: it doesn't matter if it's
112; obviously true or obviously false.
113; Either one should become a branch rather than conditional move.
114; TODO: But likely true vs. likely false should affect basic block placement?
115define i32 @weighted_select3(i32 %a, i32 %b) {
116; CHECK-LABEL: weighted_select3:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    testl %edi, %edi
119; CHECK-NEXT:    je .LBB7_1
120; CHECK-NEXT:  # %bb.2: # %select.end
121; CHECK-NEXT:    movl %edi, %eax
122; CHECK-NEXT:    retq
123; CHECK-NEXT:  .LBB7_1: # %select.false
124; CHECK-NEXT:    movl %esi, %edi
125; CHECK-NEXT:    movl %edi, %eax
126; CHECK-NEXT:    retq
127  %cmp = icmp ne i32 %a, 0
128  %sel = select i1 %cmp, i32 %a, i32 %b, !prof !2
129  ret i32 %sel
130}
131
132; Weightlessness is no reason to die.
133define i32 @unweighted_select(i32 %a, i32 %b) {
134; CHECK-LABEL: unweighted_select:
135; CHECK:       # %bb.0:
136; CHECK-NEXT:    testl %edi, %edi
137; CHECK-NEXT:    cmovnel %edi, %esi
138; CHECK-NEXT:    movl %esi, %eax
139; CHECK-NEXT:    retq
140  %cmp = icmp ne i32 %a, 0
141  %sel = select i1 %cmp, i32 %a, i32 %b, !prof !3
142  ret i32 %sel
143}
144
145!0 = !{!"branch_weights", i32 1, i32 99}
146!1 = !{!"branch_weights", i32 1, i32 100}
147!2 = !{!"branch_weights", i32 100, i32 1}
148!3 = !{!"branch_weights", i32 0, i32 0}
149
150