• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
3
4...
5---
6name:            fold
7legalized:       true
8regBankSelected: true
9tracksRegLiveness: true
10body:             |
11  bb.0:
12    liveins: $w0, $w1
13
14    ; This should not have an UBFMXri, since ADDWrr implicitly gives us the
15    ; zext.
16
17    ; CHECK-LABEL: name: fold
18    ; CHECK: liveins: $w0, $w1
19    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
20    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
21    ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
22    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ADDWrr]], %subreg.sub_32
23    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
24    ; CHECK: RET_ReallyLR implicit $x0
25    %0:gpr(s32) = COPY $w0
26    %1:gpr(s32) = COPY $w1
27    %2:gpr(s32) = G_ADD %1, %0
28    %3:gpr(s64) = G_ZEXT %2(s32)
29    $x0 = COPY %3(s64)
30    RET_ReallyLR implicit $x0
31
32...
33---
34name:            dont_fold_s16
35legalized:       true
36regBankSelected: true
37tracksRegLiveness: true
38body:             |
39  bb.0:
40    liveins: $w0, $w1
41
42    ; We should have a UBFMXri here, because we only do this for zero extends
43    ; from 32 bits to 64 bits.
44
45    ; CHECK-LABEL: name: dont_fold_s16
46    ; CHECK: liveins: $w0, $w1
47    ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
48    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[DEF]], %subreg.sub_32
49    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 15
50    ; CHECK: $x0 = COPY [[UBFMXri]]
51    ; CHECK: RET_ReallyLR implicit $x0
52    %0:gpr(s16) = G_IMPLICIT_DEF
53    %3:gpr(s64) = G_ZEXT %0(s16)
54    $x0 = COPY %3(s64)
55    RET_ReallyLR implicit $x0
56
57...
58---
59name:            dont_fold_copy
60legalized:       true
61regBankSelected: true
62tracksRegLiveness: true
63body:             |
64  bb.0:
65    liveins: $w0
66
67    ; We should have a UBFMXri here, because isDef32 disallows copies.
68
69    ; CHECK-LABEL: name: dont_fold_copy
70    ; CHECK: liveins: $w0
71    ; CHECK: %copy:gpr32 = COPY $w0
72    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %copy, %subreg.sub_32
73    ; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
74    ; CHECK: $x0 = COPY %zext
75    ; CHECK: RET_ReallyLR implicit $x0
76    %copy:gpr(s32) = COPY $w0
77    %zext:gpr(s64) = G_ZEXT %copy(s32)
78    $x0 = COPY %zext(s64)
79    RET_ReallyLR implicit $x0
80
81...
82---
83name:            dont_fold_bitcast
84legalized:       true
85regBankSelected: true
86tracksRegLiveness: true
87body:             |
88  bb.0:
89    liveins: $w0
90
91    ; We should have a UBFMXri here, because isDef32 disallows bitcasts.
92
93    ; CHECK-LABEL: name: dont_fold_bitcast
94    ; CHECK: liveins: $w0
95    ; CHECK: %copy:gpr32all = COPY $w0
96    ; CHECK: %bitcast1:gpr32 = COPY %copy
97    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %bitcast1, %subreg.sub_32
98    ; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
99    ; CHECK: $x0 = COPY %zext
100    ; CHECK: RET_ReallyLR implicit $x0
101    %copy:gpr(s32) = COPY $w0
102    %bitcast0:gpr(<4 x s8>) = G_BITCAST %copy(s32)
103    %bitcast1:gpr(s32) = G_BITCAST %bitcast0
104    %zext:gpr(s64) = G_ZEXT %bitcast1(s32)
105    $x0 = COPY %zext(s64)
106    RET_ReallyLR implicit $x0
107
108...
109---
110name:            dont_fold_trunc
111legalized:       true
112regBankSelected: true
113tracksRegLiveness: true
114body:             |
115  bb.0:
116    liveins: $x0
117
118    ; We should have a UBFMXri here, because isDef32 disallows truncs.
119
120    ; CHECK-LABEL: name: dont_fold_trunc
121    ; CHECK: liveins: $x0
122    ; CHECK: %copy:gpr64sp = COPY $x0
123    ; CHECK: %trunc:gpr32common = COPY %copy.sub_32
124    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %trunc, %subreg.sub_32
125    ; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
126    ; CHECK: $x0 = COPY %zext
127    ; CHECK: RET_ReallyLR implicit $x0
128    %copy:gpr(s64) = COPY $x0
129    %trunc:gpr(s32) = G_TRUNC %copy(s64)
130    %zext:gpr(s64) = G_ZEXT %trunc(s32)
131    $x0 = COPY %zext(s64)
132    RET_ReallyLR implicit $x0
133
134...
135---
136name:            dont_fold_phi
137legalized:       true
138regBankSelected: true
139tracksRegLiveness: true
140body:             |
141  ; CHECK-LABEL: name: dont_fold_phi
142  ; CHECK: bb.0:
143  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
144  ; CHECK:   liveins: $w0, $w1, $w2
145  ; CHECK:   %copy1:gpr32all = COPY $w0
146  ; CHECK:   %copy2:gpr32all = COPY $w1
147  ; CHECK:   %cond_wide:gpr32 = COPY $w2
148  ; CHECK:   TBNZW %cond_wide, 0, %bb.1
149  ; CHECK:   B %bb.2
150  ; CHECK: bb.1:
151  ; CHECK:   successors: %bb.2(0x80000000)
152  ; CHECK: bb.2:
153  ; CHECK:   %phi:gpr32 = PHI %copy1, %bb.0, %copy2, %bb.1
154  ; CHECK:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %phi, %subreg.sub_32
155  ; CHECK:   [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
156  ; CHECK:   $x0 = COPY [[UBFMXri]]
157  ; CHECK:   RET_ReallyLR implicit $x0
158  ; We should have a UBFMXri here, because isDef32 disallows phis.
159
160  bb.0:
161    liveins: $w0, $w1, $w2
162
163    %copy1:gpr(s32) = COPY $w0
164    %copy2:gpr(s32) = COPY $w1
165    %cond_wide:gpr(s32) = COPY $w2
166    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
167    G_BRCOND %cond(s1), %bb.1
168    G_BR %bb.2
169
170  bb.1:
171
172  bb.2:
173    %phi:gpr(s32) = G_PHI %copy1(s32), %bb.0, %copy2(s32), %bb.1
174    %5:gpr(s64) = G_ZEXT %phi(s32)
175    $x0 = COPY %5(s64)
176    RET_ReallyLR implicit $x0
177
178...
179---
180name:            dont_look_through_copy
181legalized:       true
182regBankSelected: true
183tracksRegLiveness: true
184body:             |
185  bb.0:
186    liveins: $w0, $w1
187
188    ; Make sure we don't walk past the copy.
189
190    ; CHECK-LABEL: name: dont_look_through_copy
191    ; CHECK: liveins: $w0, $w1
192    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
193    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
194    ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
195    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ADDWrr]], %subreg.sub_32
196    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
197    ; CHECK: $x0 = COPY [[UBFMXri]]
198    ; CHECK: RET_ReallyLR implicit $x0
199    %0:gpr(s32) = COPY $w0
200    %1:gpr(s32) = COPY $w1
201    %2:gpr(s32) = G_ADD %1, %0
202    %3:gpr(s32) = COPY %2(s32)
203    %4:gpr(s64) = G_ZEXT %3(s32)
204    $x0 = COPY %4(s64)
205    RET_ReallyLR implicit $x0
206