• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
3
4--- |
5  define void @ldrxrox_breg_oreg(i64* %addr) { ret void }
6  define void @ldrdrox_breg_oreg(i64* %addr) { ret void }
7  define void @more_than_one_use(i64* %addr) { ret void }
8  define void @ldrxrox_shl(i64* %addr) { ret void }
9  define void @ldrdrox_shl(i64* %addr) { ret void }
10  define void @ldrxrox_mul_rhs(i64* %addr) { ret void }
11  define void @ldrdrox_mul_rhs(i64* %addr) { ret void }
12  define void @ldrxrox_mul_lhs(i64* %addr) { ret void }
13  define void @ldrdrox_mul_lhs(i64* %addr) { ret void }
14  define void @mul_not_pow_2(i64* %addr) { ret void }
15  define void @mul_wrong_pow_2(i64* %addr) { ret void }
16  define void @more_than_one_use_shl_1(i64* %addr) { ret void }
17  define void @more_than_one_use_shl_2(i64* %addr) { ret void }
18  define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void }
19  define void @more_than_one_use_shl_lsl_slow(i64* %addr) { ret void }
20  define void @more_than_one_use_shl_minsize(i64* %addr) #0 { ret void }
21  define void @ldrwrox(i64* %addr) { ret void }
22  define void @ldrsrox(i64* %addr) { ret void }
23  define void @ldrhrox(i64* %addr) { ret void }
24  define void @ldbbrox(i64* %addr) { ret void }
25  define void @ldrqrox(i64* %addr) { ret void }
26  attributes #0 = { optsize minsize }
27  attributes #1 = { "target-features"="+lsl-fast" }
28...
29
30---
31name:            ldrxrox_breg_oreg
32alignment:       4
33legalized:       true
34regBankSelected: true
35tracksRegLiveness: true
36machineFunctionInfo: {}
37body:             |
38  bb.0:
39    liveins: $x0, $x1
40
41    ; CHECK-LABEL: name: ldrxrox_breg_oreg
42    ; CHECK: liveins: $x0, $x1
43    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
44    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
45    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
46    ; CHECK: $x0 = COPY [[LDRXroX]]
47    ; CHECK: RET_ReallyLR implicit $x0
48    %0:gpr(p0) = COPY $x0
49    %1:gpr(s64) = COPY $x1
50    %2:gpr(p0) = G_PTR_ADD %0, %1
51    %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
52    $x0 = COPY %4(s64)
53    RET_ReallyLR implicit $x0
54...
55
56---
57name:            ldrdrox_breg_oreg
58alignment:       4
59legalized:       true
60regBankSelected: true
61tracksRegLiveness: true
62machineFunctionInfo: {}
63body:             |
64  bb.0:
65    liveins: $d0, $x1
66    ; CHECK-LABEL: name: ldrdrox_breg_oreg
67    ; CHECK: liveins: $d0, $x1
68    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
69    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
70    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
71    ; CHECK: $d0 = COPY [[LDRDroX]]
72    ; CHECK: RET_ReallyLR implicit $d0
73    %0:gpr(p0) = COPY $d0
74    %1:gpr(s64) = COPY $x1
75    %2:gpr(p0) = G_PTR_ADD %0, %1
76    %4:fpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
77    $d0 = COPY %4(s64)
78    RET_ReallyLR implicit $d0
79...
80---
81name:            more_than_one_use
82alignment:       4
83legalized:       true
84regBankSelected: true
85tracksRegLiveness: true
86machineFunctionInfo: {}
87body:             |
88  bb.0:
89    liveins: $x0, $x1
90    ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
91    ; the G_LOAD
92    ; CHECK-LABEL: name: more_than_one_use
93    ; CHECK: liveins: $x0, $x1
94    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
95    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
96    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
97    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
98    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
99    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
100    ; CHECK: $x0 = COPY [[ADDXrr1]]
101    ; CHECK: RET_ReallyLR implicit $x0
102    %0:gpr(p0) = COPY $x0
103    %1:gpr(s64) = COPY $x1
104    %2:gpr(p0) = G_PTR_ADD %0, %1
105    %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
106    %5:gpr(s64) = G_PTRTOINT %2
107    %6:gpr(s64) = G_ADD %5, %4
108    $x0 = COPY %6(s64)
109    RET_ReallyLR implicit $x0
110
111...
112---
113name:            ldrxrox_shl
114alignment:       4
115legalized:       true
116regBankSelected: true
117tracksRegLiveness: true
118machineFunctionInfo: {}
119body:             |
120  bb.0:
121    liveins: $x0, $x1, $x2
122    ; CHECK-LABEL: name: ldrxrox_shl
123    ; CHECK: liveins: $x0, $x1, $x2
124    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
125    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
126    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
127    ; CHECK: $x2 = COPY [[LDRXroX]]
128    ; CHECK: RET_ReallyLR implicit $x2
129    %0:gpr(s64) = COPY $x0
130    %1:gpr(s64) = G_CONSTANT i64 3
131    %2:gpr(s64) = G_SHL %0, %1(s64)
132    %3:gpr(p0) = COPY $x1
133    %4:gpr(p0) = G_PTR_ADD %3, %2
134    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
135    $x2 = COPY %5(s64)
136    RET_ReallyLR implicit $x2
137
138...
139---
140name:            ldrdrox_shl
141alignment:       4
142legalized:       true
143regBankSelected: true
144tracksRegLiveness: true
145machineFunctionInfo: {}
146body:             |
147  bb.0:
148    liveins: $x0, $x1, $d2
149    ; CHECK-LABEL: name: ldrdrox_shl
150    ; CHECK: liveins: $x0, $x1, $d2
151    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
152    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
153    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
154    ; CHECK: $d2 = COPY [[LDRDroX]]
155    ; CHECK: RET_ReallyLR implicit $d2
156    %0:gpr(s64) = COPY $x0
157    %1:gpr(s64) = G_CONSTANT i64 3
158    %2:gpr(s64) = G_SHL %0, %1(s64)
159    %3:gpr(p0) = COPY $x1
160    %4:gpr(p0) = G_PTR_ADD %3, %2
161    %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
162    $d2 = COPY %5(s64)
163    RET_ReallyLR implicit $d2
164
165...
166---
167name:            ldrxrox_mul_rhs
168alignment:       4
169legalized:       true
170regBankSelected: true
171tracksRegLiveness: true
172machineFunctionInfo: {}
173body:             |
174  bb.0:
175    liveins: $x0, $x1, $x2
176    ; CHECK-LABEL: name: ldrxrox_mul_rhs
177    ; CHECK: liveins: $x0, $x1, $x2
178    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
179    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
180    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
181    ; CHECK: $x2 = COPY [[LDRXroX]]
182    ; CHECK: RET_ReallyLR implicit $x2
183    %0:gpr(s64) = COPY $x0
184    %1:gpr(s64) = G_CONSTANT i64 8
185    %2:gpr(s64) = G_MUL %0, %1(s64)
186    %3:gpr(p0) = COPY $x1
187    %4:gpr(p0) = G_PTR_ADD %3, %2
188    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
189    $x2 = COPY %5(s64)
190    RET_ReallyLR implicit $x2
191
192...
193---
194name:            ldrdrox_mul_rhs
195alignment:       4
196legalized:       true
197regBankSelected: true
198tracksRegLiveness: true
199machineFunctionInfo: {}
200body:             |
201  bb.0:
202    liveins: $x0, $x1, $d2
203    ; CHECK-LABEL: name: ldrdrox_mul_rhs
204    ; CHECK: liveins: $x0, $x1, $d2
205    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
206    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
207    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
208    ; CHECK: $d2 = COPY [[LDRDroX]]
209    ; CHECK: RET_ReallyLR implicit $d2
210    %0:gpr(s64) = COPY $x0
211    %1:gpr(s64) = G_CONSTANT i64 8
212    %2:gpr(s64) = G_MUL %0, %1(s64)
213    %3:gpr(p0) = COPY $x1
214    %4:gpr(p0) = G_PTR_ADD %3, %2
215    %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
216    $d2 = COPY %5(s64)
217    RET_ReallyLR implicit $d2
218
219...
220---
221name:            ldrxrox_mul_lhs
222alignment:       4
223legalized:       true
224regBankSelected: true
225tracksRegLiveness: true
226machineFunctionInfo: {}
227body:             |
228  bb.0:
229    liveins: $x0, $x1, $x2
230    ; CHECK-LABEL: name: ldrxrox_mul_lhs
231    ; CHECK: liveins: $x0, $x1, $x2
232    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
233    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
234    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
235    ; CHECK: $x2 = COPY [[LDRXroX]]
236    ; CHECK: RET_ReallyLR implicit $x2
237    %0:gpr(s64) = COPY $x0
238    %1:gpr(s64) = G_CONSTANT i64 8
239    %2:gpr(s64) = G_MUL %1, %0(s64)
240    %3:gpr(p0) = COPY $x1
241    %4:gpr(p0) = G_PTR_ADD %3, %2
242    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
243    $x2 = COPY %5(s64)
244    RET_ReallyLR implicit $x2
245
246...
247---
248name:            ldrdrox_mul_lhs
249alignment:       4
250legalized:       true
251regBankSelected: true
252tracksRegLiveness: true
253machineFunctionInfo: {}
254body:             |
255  bb.0:
256    liveins: $x0, $x1, $d2
257    ; CHECK-LABEL: name: ldrdrox_mul_lhs
258    ; CHECK: liveins: $x0, $x1, $d2
259    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
260    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
261    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
262    ; CHECK: $d2 = COPY [[LDRDroX]]
263    ; CHECK: RET_ReallyLR implicit $d2
264    %0:gpr(s64) = COPY $x0
265    %1:gpr(s64) = G_CONSTANT i64 8
266    %2:gpr(s64) = G_MUL %1, %0(s64)
267    %3:gpr(p0) = COPY $x1
268    %4:gpr(p0) = G_PTR_ADD %3, %2
269    %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
270    $d2 = COPY %5(s64)
271    RET_ReallyLR implicit $d2
272
273...
274---
275name:            mul_not_pow_2
276alignment:       4
277legalized:       true
278regBankSelected: true
279tracksRegLiveness: true
280machineFunctionInfo: {}
281body:             |
282  bb.0:
283    ; Show that we don't get a shifted load from a mul when we don't have a
284    ; power of 2. (The bit isn't set on the load.)
285    liveins: $x0, $x1, $d2
286    ; CHECK-LABEL: name: mul_not_pow_2
287    ; CHECK: liveins: $x0, $x1, $d2
288    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
289    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
290    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
291    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
292    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
293    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
294    ; CHECK: $d2 = COPY [[LDRDroX]]
295    ; CHECK: RET_ReallyLR implicit $d2
296    %0:gpr(s64) = COPY $x0
297    %1:gpr(s64) = G_CONSTANT i64 7
298    %2:gpr(s64) = G_MUL %1, %0(s64)
299    %3:gpr(p0) = COPY $x1
300    %4:gpr(p0) = G_PTR_ADD %3, %2
301    %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
302    $d2 = COPY %5(s64)
303    RET_ReallyLR implicit $d2
304
305...
306---
307name:            mul_wrong_pow_2
308alignment:       4
309legalized:       true
310regBankSelected: true
311tracksRegLiveness: true
312machineFunctionInfo: {}
313body:             |
314  bb.0:
315    ; Show that we don't get a shifted load from a mul when we don't have
316    ; the right power of 2. (The bit isn't set on the load.)
317    liveins: $x0, $x1, $d2
318    ; CHECK-LABEL: name: mul_wrong_pow_2
319    ; CHECK: liveins: $x0, $x1, $d2
320    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
321    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
322    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
323    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
324    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
325    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
326    ; CHECK: $d2 = COPY [[LDRDroX]]
327    ; CHECK: RET_ReallyLR implicit $d2
328    %0:gpr(s64) = COPY $x0
329    %1:gpr(s64) = G_CONSTANT i64 16
330    %2:gpr(s64) = G_MUL %1, %0(s64)
331    %3:gpr(p0) = COPY $x1
332    %4:gpr(p0) = G_PTR_ADD %3, %2
333    %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
334    $d2 = COPY %5(s64)
335    RET_ReallyLR implicit $d2
336
337...
338---
339name:            more_than_one_use_shl_1
340alignment:       4
341legalized:       true
342regBankSelected: true
343tracksRegLiveness: true
344machineFunctionInfo: {}
345body:             |
346  bb.0:
347    ; Show that we can still fall back to the register-register addressing
348    ; mode when we fail to pull in the shift.
349    liveins: $x0, $x1, $x2
350    ; CHECK-LABEL: name: more_than_one_use_shl_1
351    ; CHECK: liveins: $x0, $x1, $x2
352    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
353    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
354    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
355    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load 8 from %ir.addr)
356    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
357    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
358    ; CHECK: $x2 = COPY [[ADDXrr]]
359    ; CHECK: RET_ReallyLR implicit $x2
360    %0:gpr(s64) = COPY $x0
361    %1:gpr(s64) = G_CONSTANT i64 3
362    %2:gpr(s64) = G_SHL %0, %1(s64)
363    %3:gpr(p0) = COPY $x1
364    %4:gpr(p0) = G_PTR_ADD %3, %2
365    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
366    %6:gpr(s64) = G_ADD %2, %1
367    %7:gpr(s64) = G_ADD %5, %6
368    $x2 = COPY %7(s64)
369    RET_ReallyLR implicit $x2
370
371...
372---
373name:            more_than_one_use_shl_2
374alignment:       4
375legalized:       true
376regBankSelected: true
377tracksRegLiveness: true
378machineFunctionInfo: {}
379body:             |
380  bb.0:
381    ; Show that when the GEP is used outside a memory op, we don't do any
382    ; folding at all.
383    liveins: $x0, $x1, $x2
384    ; CHECK-LABEL: name: more_than_one_use_shl_2
385    ; CHECK: liveins: $x0, $x1, $x2
386    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
387    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
388    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
389    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
390    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
391    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
392    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
393    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
394    ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
395    ; CHECK: $x2 = COPY [[ADDXrr2]]
396    ; CHECK: RET_ReallyLR implicit $x2
397    %0:gpr(s64) = COPY $x0
398    %1:gpr(s64) = G_CONSTANT i64 3
399    %2:gpr(s64) = G_SHL %0, %1(s64)
400    %3:gpr(p0) = COPY $x1
401    %4:gpr(p0) = G_PTR_ADD %3, %2
402    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
403    %6:gpr(s64) = G_ADD %2, %1
404    %7:gpr(s64) = G_ADD %5, %6
405    %8:gpr(s64) = G_PTRTOINT %4
406    %9:gpr(s64) = G_ADD %8, %7
407    $x2 = COPY %9(s64)
408    RET_ReallyLR implicit $x2
409
410...
411---
412name:            more_than_one_use_shl_lsl_fast
413alignment:       4
414legalized:       true
415regBankSelected: true
416tracksRegLiveness: true
417machineFunctionInfo: {}
418body:             |
419  bb.0:
420    ; Show that when we have a fastpath for shift-left, we perform the folding
421    ; if it has more than one use.
422    liveins: $x0, $x1, $x2
423    ; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
424    ; CHECK: liveins: $x0, $x1, $x2
425    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
426    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
427    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
428    ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
429    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
430    ; CHECK: $x2 = COPY [[ADDXrr]]
431    ; CHECK: RET_ReallyLR implicit $x2
432    %0:gpr(s64) = COPY $x0
433    %1:gpr(s64) = G_CONSTANT i64 3
434    %2:gpr(s64) = G_SHL %0, %1(s64)
435    %3:gpr(p0) = COPY $x1
436    %4:gpr(p0) = G_PTR_ADD %3, %2
437    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
438    %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
439    %7:gpr(s64) = G_ADD %5, %6
440    $x2 = COPY %7(s64)
441    RET_ReallyLR implicit $x2
442
443...
444---
445name:            more_than_one_use_shl_lsl_slow
446alignment:       4
447legalized:       true
448regBankSelected: true
449tracksRegLiveness: true
450machineFunctionInfo: {}
451body:             |
452  bb.0:
453    ; Show that we don't fold into multiple memory ops when we don't have a
454    ; fastpath for shift-left.
455    liveins: $x0, $x1, $x2
456    ; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
457    ; CHECK: liveins: $x0, $x1, $x2
458    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
459    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
460    ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
461    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load 8 from %ir.addr)
462    ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load 8 from %ir.addr)
463    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
464    ; CHECK: $x2 = COPY [[ADDXrr]]
465    ; CHECK: RET_ReallyLR implicit $x2
466    %0:gpr(s64) = COPY $x0
467    %1:gpr(s64) = G_CONSTANT i64 3
468    %2:gpr(s64) = G_SHL %0, %1(s64)
469    %3:gpr(p0) = COPY $x1
470    %4:gpr(p0) = G_PTR_ADD %3, %2
471    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
472    %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
473    %7:gpr(s64) = G_ADD %5, %6
474    $x2 = COPY %7(s64)
475    RET_ReallyLR implicit $x2
476
477...
478---
479name:            more_than_one_use_shl_minsize
480alignment:       4
481legalized:       true
482regBankSelected: true
483tracksRegLiveness: true
484machineFunctionInfo: {}
485body:             |
486  bb.0:
487    ; Show that when we're optimizing for size, we'll do the folding no matter
488    ; what.
489    liveins: $x0, $x1, $x2
490    ; CHECK-LABEL: name: more_than_one_use_shl_minsize
491    ; CHECK: liveins: $x0, $x1, $x2
492    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
493    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
494    ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
495    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
496    ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
497    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
498    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
499    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
500    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
501    ; CHECK: $x2 = COPY [[ADDXrr1]]
502    ; CHECK: RET_ReallyLR implicit $x2
503    %0:gpr(s64) = COPY $x0
504    %1:gpr(s64) = G_CONSTANT i64 3
505    %2:gpr(s64) = G_SHL %0, %1(s64)
506    %3:gpr(p0) = COPY $x1
507    %4:gpr(p0) = G_PTR_ADD %3, %2
508    %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
509    %6:gpr(s64) = G_ADD %2, %1
510    %7:gpr(s64) = G_ADD %5, %6
511    %8:gpr(s64) = G_PTRTOINT %4
512    %9:gpr(s64) = G_ADD %8, %7
513    $x2 = COPY %9(s64)
514    RET_ReallyLR implicit $x2
515...
516---
517name:            ldrwrox
518alignment:       4
519legalized:       true
520regBankSelected: true
521tracksRegLiveness: true
522machineFunctionInfo: {}
523body:             |
524  bb.0:
525    liveins: $x0, $x1
526    ; CHECK-LABEL: name: ldrwrox
527    ; CHECK: liveins: $x0, $x1
528    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
529    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
530    ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
531    ; CHECK: $w2 = COPY [[LDRWroX]]
532    ; CHECK: RET_ReallyLR implicit $w2
533    %0:gpr(p0) = COPY $x0
534    %1:gpr(s64) = COPY $x1
535    %2:gpr(p0) = G_PTR_ADD %0, %1
536    %4:gpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
537    $w2 = COPY %4(s32)
538    RET_ReallyLR implicit $w2
539...
540---
541name:            ldrsrox
542alignment:       4
543legalized:       true
544regBankSelected: true
545tracksRegLiveness: true
546machineFunctionInfo: {}
547body:             |
548  bb.0:
549    liveins: $d0, $x1
550    ; CHECK-LABEL: name: ldrsrox
551    ; CHECK: liveins: $d0, $x1
552    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
553    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
554    ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
555    ; CHECK: $s2 = COPY [[LDRSroX]]
556    ; CHECK: RET_ReallyLR implicit $h2
557    %0:gpr(p0) = COPY $d0
558    %1:gpr(s64) = COPY $x1
559    %2:gpr(p0) = G_PTR_ADD %0, %1
560    %4:fpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
561    $s2 = COPY %4(s32)
562    RET_ReallyLR implicit $h2
563...
564---
565name:            ldrhrox
566alignment:       4
567legalized:       true
568regBankSelected: true
569tracksRegLiveness: true
570machineFunctionInfo: {}
571body:             |
572  bb.0:
573    liveins: $x0, $x1
574    ; CHECK-LABEL: name: ldrhrox
575    ; CHECK: liveins: $x0, $x1
576    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
577    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
578    ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load 2 from %ir.addr)
579    ; CHECK: $h2 = COPY [[LDRHroX]]
580    ; CHECK: RET_ReallyLR implicit $h2
581    %0:gpr(p0) = COPY $x0
582    %1:gpr(s64) = COPY $x1
583    %2:gpr(p0) = G_PTR_ADD %0, %1
584    %4:fpr(s16) = G_LOAD %2(p0) :: (load 2 from %ir.addr)
585    $h2 = COPY %4(s16)
586    RET_ReallyLR implicit $h2
587...
588---
589name:            ldbbrox
590alignment:       4
591legalized:       true
592regBankSelected: true
593tracksRegLiveness: true
594machineFunctionInfo: {}
595body:             |
596  bb.0:
597    liveins: $x0, $x1
598    ; CHECK-LABEL: name: ldbbrox
599    ; CHECK: liveins: $x0, $x1
600    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
601    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
602    ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load 1 from %ir.addr)
603    ; CHECK: $w2 = COPY [[LDRBBroX]]
604    ; CHECK: RET_ReallyLR implicit $w2
605    %0:gpr(p0) = COPY $x0
606    %1:gpr(s64) = COPY $x1
607    %2:gpr(p0) = G_PTR_ADD %0, %1
608    %4:gpr(s32) = G_LOAD %2(p0) :: (load 1 from %ir.addr)
609    $w2 = COPY %4(s32)
610    RET_ReallyLR implicit $w2
611...
612---
613name:            ldrqrox
614alignment:       4
615legalized:       true
616regBankSelected: true
617tracksRegLiveness: true
618machineFunctionInfo: {}
619body:             |
620  bb.0:
621    liveins: $d0, $x1
622    ; CHECK-LABEL: name: ldrqrox
623    ; CHECK: liveins: $d0, $x1
624    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
625    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
626    ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load 16 from %ir.addr)
627    ; CHECK: $q0 = COPY [[LDRQroX]]
628    ; CHECK: RET_ReallyLR implicit $q0
629    %0:gpr(p0) = COPY $d0
630    %1:gpr(s64) = COPY $x1
631    %2:gpr(p0) = G_PTR_ADD %0, %1
632    %4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load 16 from %ir.addr)
633    $q0 = COPY %4(<2 x s64>)
634    RET_ReallyLR implicit $q0
635