• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -debugify-and-strip-all-safe -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="opt_brcond_by_inverting_cond" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
3
4# Need asserts for the only-enable-rule to work.
5
6# REQUIRES: asserts
7
8--- |
9  target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
10  target triple = "arm64-apple-ios5.0.0"
11
12  define i32 @foo(i32 %a, i32 %b) {
13  entry:
14    %cmp = icmp sgt i32 %a, 0
15    br i1 %cmp, label %if.then, label %if.end
16
17  if.then:
18    %add = add nsw i32 %b, %a
19    %add1 = add nsw i32 %a, %b
20    br label %return
21
22  if.end:
23    %mul = mul nsw i32 %b, %b
24    %add2 = add nuw nsw i32 %mul, 2
25    br label %return
26
27  return:
28    %retval.0 = phi i32 [ %add1, %if.then ], [ %add2, %if.end ]
29    ret i32 %retval.0
30  }
31
32
33...
34---
35name:            foo
36tracksRegLiveness: true
37body:             |
38  ; CHECK-LABEL: name: foo
39  ; CHECK: bb.0.entry:
40  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
41  ; CHECK:   liveins: $w0, $w1
42  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
43  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
44  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
45  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
46  ; CHECK:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
47  ; CHECK:   [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
48  ; CHECK:   [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C2]]
49  ; CHECK:   G_BRCOND [[XOR]](s1), %bb.2
50  ; CHECK:   G_BR %bb.1
51  ; CHECK: bb.1.if.then:
52  ; CHECK:   successors: %bb.3(0x80000000)
53  ; CHECK:   [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY1]], [[COPY]]
54  ; CHECK:   [[ADD1:%[0-9]+]]:_(s32) = nsw G_ADD [[ADD]], [[COPY1]]
55  ; CHECK:   G_BR %bb.3
56  ; CHECK: bb.2.if.end:
57  ; CHECK:   successors: %bb.3(0x80000000)
58  ; CHECK:   [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[COPY1]], [[COPY1]]
59  ; CHECK:   [[ADD2:%[0-9]+]]:_(s32) = nuw nsw G_ADD [[MUL]], [[C1]]
60  ; CHECK: bb.3.return:
61  ; CHECK:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.1, [[ADD2]](s32), %bb.2
62  ; CHECK:   $w0 = COPY [[PHI]](s32)
63  ; CHECK:   RET_ReallyLR implicit $w0
64  bb.1.entry:
65    liveins: $w0, $w1
66
67    %0:_(s32) = COPY $w0
68    %1:_(s32) = COPY $w1
69    %2:_(s32) = G_CONSTANT i32 0
70    %5:_(s32) = G_CONSTANT i32 2
71    %3:_(s1) = G_ICMP intpred(sgt), %0(s32), %2
72    G_BRCOND %3(s1), %bb.2
73    G_BR %bb.3
74
75  bb.2.if.then:
76    %7:_(s32) = nsw G_ADD %1, %0
77    %8:_(s32) = nsw G_ADD %7, %1
78    G_BR %bb.4
79
80  bb.3.if.end:
81    %4:_(s32) = nsw G_MUL %1, %1
82    %6:_(s32) = nuw nsw G_ADD %4, %5
83
84  bb.4.return:
85    %10:_(s32) = G_PHI %8(s32), %bb.2, %6(s32), %bb.3
86    $w0 = COPY %10(s32)
87    RET_ReallyLR implicit $w0
88
89...
90