• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu                                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SSE
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512F
5# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
6
7--- |
8  define i8 @test_load_i8(i8* %p1) {
9    %r = load atomic i8, i8* %p1 unordered, align 1
10    ret i8 %r
11  }
12
13  define i16 @test_load_i16(i16* %p1) {
14    %r = load atomic i16, i16* %p1 unordered, align 2
15    ret i16 %r
16  }
17
18  define i32 @test_load_i32(i32* %p1) {
19    %r = load atomic i32, i32* %p1 unordered, align 4
20    ret i32 %r
21  }
22
23  define i64 @test_load_i64(i64* %p1) {
24    %r = load atomic i64, i64* %p1 unordered, align 8
25    ret i64 %r
26  }
27
28  define float @test_load_float(float* %p1) {
29    %r = load atomic float, float* %p1 unordered, align 4
30    ret float %r
31  }
32
33  define float @test_load_float_vecreg(float* %p1) {
34    %r = load atomic float, float* %p1 unordered, align 8
35    ret float %r
36  }
37
38  define double @test_load_double(double* %p1) {
39    %r = load atomic double, double* %p1 unordered, align 8
40    ret double %r
41  }
42
43  define double @test_load_double_vecreg(double* %p1) {
44    %r = load atomic double, double* %p1 unordered, align 8
45    ret double %r
46  }
47
48  define i32* @test_store_i32(i32 %val, i32* %p1) {
49    store atomic i32 %val, i32* %p1 unordered, align 4
50    ret i32* %p1
51  }
52
53  define i64* @test_store_i64(i64 %val, i64* %p1) {
54    store atomic i64 %val, i64* %p1 unordered, align 8
55    ret i64* %p1
56  }
57
58  define float* @test_store_float(float %val, float* %p1) {
59    store atomic float %val, float* %p1 unordered, align 4
60    ret float* %p1
61  }
62
63  define float* @test_store_float_vec(float %val, float* %p1) {
64    store atomic float %val, float* %p1 unordered, align 4
65    ret float* %p1
66  }
67
68  define double* @test_store_double(double %val, double* %p1) {
69    store atomic double %val, double* %p1 unordered, align 8
70    ret double* %p1
71  }
72
73  define double* @test_store_double_vec(double %val, double* %p1) {
74    store atomic double %val, double* %p1 unordered, align 8
75    ret double* %p1
76  }
77
78  define i32* @test_load_ptr(i32** %ptr1) {
79    %p = load atomic i32*, i32** %ptr1 unordered, align 8
80    ret i32* %p
81  }
82
83  define void @test_store_ptr(i32** %ptr1, i32* %a) {
84    store atomic i32* %a, i32** %ptr1 unordered, align 8
85    ret void
86  }
87
88  define i32 @test_gep_folding(i32* %arr, i32 %val) {
89    %arrayidx = getelementptr i32, i32* %arr, i32 5
90    store atomic i32 %val, i32* %arrayidx unordered, align 8
91    %r = load atomic i32, i32* %arrayidx unordered, align 8
92    ret i32 %r
93  }
94
95  define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
96    %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
97    store atomic i32 %val, i32* %arrayidx unordered, align 8
98    %r = load atomic i32, i32* %arrayidx unordered, align 8
99    ret i32 %r
100  }
101...
102---
103name:            test_load_i8
104alignment:       16
105legalized:       true
106regBankSelected: true
107registers:
108  - { id: 0, class: gpr }
109  - { id: 1, class: gpr }
110body:             |
111  bb.1 (%ir-block.0):
112    liveins: $rdi
113
114    ; SSE-LABEL: name: test_load_i8
115    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
116    ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 1 from %ir.p1)
117    ; SSE: $al = COPY [[MOV8rm]]
118    ; SSE: RET 0, implicit $al
119    ; AVX-LABEL: name: test_load_i8
120    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
121    ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 1 from %ir.p1)
122    ; AVX: $al = COPY [[MOV8rm]]
123    ; AVX: RET 0, implicit $al
124    ; AVX512F-LABEL: name: test_load_i8
125    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
126    ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 1 from %ir.p1)
127    ; AVX512F: $al = COPY [[MOV8rm]]
128    ; AVX512F: RET 0, implicit $al
129    ; AVX512VL-LABEL: name: test_load_i8
130    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
131    ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 1 from %ir.p1)
132    ; AVX512VL: $al = COPY [[MOV8rm]]
133    ; AVX512VL: RET 0, implicit $al
134    %0(p0) = COPY $rdi
135    %1(s8) = G_LOAD %0(p0) :: (load unordered 1 from %ir.p1)
136    $al = COPY %1(s8)
137    RET 0, implicit $al
138
139...
140---
141name:            test_load_i16
142alignment:       16
143legalized:       true
144regBankSelected: true
145registers:
146  - { id: 0, class: gpr }
147  - { id: 1, class: gpr }
148body:             |
149  bb.1 (%ir-block.0):
150    liveins: $rdi
151
152    ; SSE-LABEL: name: test_load_i16
153    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
154    ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 2 from %ir.p1)
155    ; SSE: $ax = COPY [[MOV16rm]]
156    ; SSE: RET 0, implicit $ax
157    ; AVX-LABEL: name: test_load_i16
158    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
159    ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 2 from %ir.p1)
160    ; AVX: $ax = COPY [[MOV16rm]]
161    ; AVX: RET 0, implicit $ax
162    ; AVX512F-LABEL: name: test_load_i16
163    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
164    ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 2 from %ir.p1)
165    ; AVX512F: $ax = COPY [[MOV16rm]]
166    ; AVX512F: RET 0, implicit $ax
167    ; AVX512VL-LABEL: name: test_load_i16
168    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
169    ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 2 from %ir.p1)
170    ; AVX512VL: $ax = COPY [[MOV16rm]]
171    ; AVX512VL: RET 0, implicit $ax
172    %0(p0) = COPY $rdi
173    %1(s16) = G_LOAD %0(p0) :: (load unordered 2 from %ir.p1)
174    $ax = COPY %1(s16)
175    RET 0, implicit $ax
176
177...
178---
179name:            test_load_i32
180alignment:       16
181legalized:       true
182regBankSelected: true
183registers:
184  - { id: 0, class: gpr }
185  - { id: 1, class: gpr }
186body:             |
187  bb.1 (%ir-block.0):
188    liveins: $rdi
189
190    ; SSE-LABEL: name: test_load_i32
191    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
192    ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
193    ; SSE: $eax = COPY [[MOV32rm]]
194    ; SSE: RET 0, implicit $eax
195    ; AVX-LABEL: name: test_load_i32
196    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
197    ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
198    ; AVX: $eax = COPY [[MOV32rm]]
199    ; AVX: RET 0, implicit $eax
200    ; AVX512F-LABEL: name: test_load_i32
201    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
202    ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
203    ; AVX512F: $eax = COPY [[MOV32rm]]
204    ; AVX512F: RET 0, implicit $eax
205    ; AVX512VL-LABEL: name: test_load_i32
206    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
207    ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
208    ; AVX512VL: $eax = COPY [[MOV32rm]]
209    ; AVX512VL: RET 0, implicit $eax
210    %0(p0) = COPY $rdi
211    %1(s32) = G_LOAD %0(p0) :: (load unordered 4 from %ir.p1)
212    $eax = COPY %1(s32)
213    RET 0, implicit $eax
214
215...
216---
217name:            test_load_i64
218alignment:       16
219legalized:       true
220regBankSelected: true
221registers:
222  - { id: 0, class: gpr }
223  - { id: 1, class: gpr }
224body:             |
225  bb.1 (%ir-block.0):
226    liveins: $rdi
227
228    ; SSE-LABEL: name: test_load_i64
229    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
230    ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
231    ; SSE: $rax = COPY [[MOV64rm]]
232    ; SSE: RET 0, implicit $rax
233    ; AVX-LABEL: name: test_load_i64
234    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
235    ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
236    ; AVX: $rax = COPY [[MOV64rm]]
237    ; AVX: RET 0, implicit $rax
238    ; AVX512F-LABEL: name: test_load_i64
239    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
240    ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
241    ; AVX512F: $rax = COPY [[MOV64rm]]
242    ; AVX512F: RET 0, implicit $rax
243    ; AVX512VL-LABEL: name: test_load_i64
244    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
245    ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
246    ; AVX512VL: $rax = COPY [[MOV64rm]]
247    ; AVX512VL: RET 0, implicit $rax
248    %0(p0) = COPY $rdi
249    %1(s64) = G_LOAD %0(p0) :: (load unordered 8 from %ir.p1)
250    $rax = COPY %1(s64)
251    RET 0, implicit $rax
252
253...
254---
255name:            test_load_float
256alignment:       16
257legalized:       true
258regBankSelected: true
259registers:
260  - { id: 0, class: gpr, preferred-register: '' }
261  - { id: 1, class: gpr, preferred-register: '' }
262  - { id: 2, class: vecr, preferred-register: '' }
263  - { id: 3, class: vecr, preferred-register: '' }
264body:             |
265  bb.1 (%ir-block.0):
266    liveins: $rdi
267
268    ; SSE-LABEL: name: test_load_float
269    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
270    ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
271    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
272    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
273    ; SSE: $xmm0 = COPY [[COPY2]]
274    ; SSE: RET 0, implicit $xmm0
275    ; AVX-LABEL: name: test_load_float
276    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
277    ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
278    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
279    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
280    ; AVX: $xmm0 = COPY [[COPY2]]
281    ; AVX: RET 0, implicit $xmm0
282    ; AVX512F-LABEL: name: test_load_float
283    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
284    ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
285    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
286    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
287    ; AVX512F: $xmm0 = COPY [[COPY2]]
288    ; AVX512F: RET 0, implicit $xmm0
289    ; AVX512VL-LABEL: name: test_load_float
290    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
291    ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
292    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
293    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
294    ; AVX512VL: $xmm0 = COPY [[COPY2]]
295    ; AVX512VL: RET 0, implicit $xmm0
296    %0:gpr(p0) = COPY $rdi
297    %1:gpr(s32) = G_LOAD %0(p0) :: (load unordered 4 from %ir.p1)
298    %3:vecr(s32) = COPY %1(s32)
299    %2:vecr(s128) = G_ANYEXT %3(s32)
300    $xmm0 = COPY %2(s128)
301    RET 0, implicit $xmm0
302
303...
304---
305name:            test_load_float_vecreg
306alignment:       16
307legalized:       true
308regBankSelected: true
309registers:
310  - { id: 0, class: gpr, preferred-register: '' }
311  - { id: 1, class: gpr, preferred-register: '' }
312  - { id: 2, class: vecr, preferred-register: '' }
313  - { id: 3, class: vecr, preferred-register: '' }
314body:             |
315  bb.1 (%ir-block.0):
316    liveins: $rdi
317
318    ; SSE-LABEL: name: test_load_float_vecreg
319    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
320    ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
321    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
322    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
323    ; SSE: $xmm0 = COPY [[COPY2]]
324    ; SSE: RET 0, implicit $xmm0
325    ; AVX-LABEL: name: test_load_float_vecreg
326    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
327    ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
328    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
329    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
330    ; AVX: $xmm0 = COPY [[COPY2]]
331    ; AVX: RET 0, implicit $xmm0
332    ; AVX512F-LABEL: name: test_load_float_vecreg
333    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
334    ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
335    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
336    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
337    ; AVX512F: $xmm0 = COPY [[COPY2]]
338    ; AVX512F: RET 0, implicit $xmm0
339    ; AVX512VL-LABEL: name: test_load_float_vecreg
340    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
341    ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.p1)
342    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
343    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
344    ; AVX512VL: $xmm0 = COPY [[COPY2]]
345    ; AVX512VL: RET 0, implicit $xmm0
346    %0:gpr(p0) = COPY $rdi
347    %1:gpr(s32) = G_LOAD %0(p0) :: (load unordered 4 from %ir.p1)
348    %3:vecr(s32) = COPY %1(s32)
349    %2:vecr(s128) = G_ANYEXT %3(s32)
350    $xmm0 = COPY %2(s128)
351    RET 0, implicit $xmm0
352
353...
354---
355name:            test_load_double
356alignment:       16
357legalized:       true
358regBankSelected: true
359registers:
360  - { id: 0, class: gpr, preferred-register: '' }
361  - { id: 1, class: gpr, preferred-register: '' }
362  - { id: 2, class: vecr, preferred-register: '' }
363  - { id: 3, class: vecr, preferred-register: '' }
364body:             |
365  bb.1 (%ir-block.0):
366    liveins: $rdi
367
368    ; SSE-LABEL: name: test_load_double
369    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
370    ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
371    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
372    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
373    ; SSE: $xmm0 = COPY [[COPY2]]
374    ; SSE: RET 0, implicit $xmm0
375    ; AVX-LABEL: name: test_load_double
376    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
377    ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
378    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
379    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
380    ; AVX: $xmm0 = COPY [[COPY2]]
381    ; AVX: RET 0, implicit $xmm0
382    ; AVX512F-LABEL: name: test_load_double
383    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
384    ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
385    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
386    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
387    ; AVX512F: $xmm0 = COPY [[COPY2]]
388    ; AVX512F: RET 0, implicit $xmm0
389    ; AVX512VL-LABEL: name: test_load_double
390    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
391    ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
392    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
393    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
394    ; AVX512VL: $xmm0 = COPY [[COPY2]]
395    ; AVX512VL: RET 0, implicit $xmm0
396    %0:gpr(p0) = COPY $rdi
397    %1:gpr(s64) = G_LOAD %0(p0) :: (load unordered 8 from %ir.p1)
398    %3:vecr(s64) = COPY %1(s64)
399    %2:vecr(s128) = G_ANYEXT %3(s64)
400    $xmm0 = COPY %2(s128)
401    RET 0, implicit $xmm0
402
403...
404---
405name:            test_load_double_vecreg
406alignment:       16
407legalized:       true
408regBankSelected: true
409registers:
410  - { id: 0, class: gpr, preferred-register: '' }
411  - { id: 1, class: gpr, preferred-register: '' }
412  - { id: 2, class: vecr, preferred-register: '' }
413  - { id: 3, class: vecr, preferred-register: '' }
414body:             |
415  bb.1 (%ir-block.0):
416    liveins: $rdi
417
418    ; SSE-LABEL: name: test_load_double_vecreg
419    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
420    ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
421    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
422    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
423    ; SSE: $xmm0 = COPY [[COPY2]]
424    ; SSE: RET 0, implicit $xmm0
425    ; AVX-LABEL: name: test_load_double_vecreg
426    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
427    ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
428    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
429    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
430    ; AVX: $xmm0 = COPY [[COPY2]]
431    ; AVX: RET 0, implicit $xmm0
432    ; AVX512F-LABEL: name: test_load_double_vecreg
433    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
434    ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
435    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
436    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
437    ; AVX512F: $xmm0 = COPY [[COPY2]]
438    ; AVX512F: RET 0, implicit $xmm0
439    ; AVX512VL-LABEL: name: test_load_double_vecreg
440    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
441    ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.p1)
442    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
443    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
444    ; AVX512VL: $xmm0 = COPY [[COPY2]]
445    ; AVX512VL: RET 0, implicit $xmm0
446    %0:gpr(p0) = COPY $rdi
447    %1:gpr(s64) = G_LOAD %0(p0) :: (load unordered 8 from %ir.p1)
448    %3:vecr(s64) = COPY %1(s64)
449    %2:vecr(s128) = G_ANYEXT %3(s64)
450    $xmm0 = COPY %2(s128)
451    RET 0, implicit $xmm0
452
453...
454---
455name:            test_store_i32
456alignment:       16
457legalized:       true
458regBankSelected: true
459registers:
460  - { id: 0, class: gpr }
461  - { id: 1, class: gpr }
462body:             |
463  bb.1 (%ir-block.0):
464    liveins: $edi, $rsi
465
466    ; SSE-LABEL: name: test_store_i32
467    ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
468    ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
469    ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 4 into %ir.p1)
470    ; SSE: $rax = COPY [[COPY1]]
471    ; SSE: RET 0, implicit $rax
472    ; AVX-LABEL: name: test_store_i32
473    ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
474    ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
475    ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 4 into %ir.p1)
476    ; AVX: $rax = COPY [[COPY1]]
477    ; AVX: RET 0, implicit $rax
478    ; AVX512F-LABEL: name: test_store_i32
479    ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
480    ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
481    ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 4 into %ir.p1)
482    ; AVX512F: $rax = COPY [[COPY1]]
483    ; AVX512F: RET 0, implicit $rax
484    ; AVX512VL-LABEL: name: test_store_i32
485    ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
486    ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
487    ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 4 into %ir.p1)
488    ; AVX512VL: $rax = COPY [[COPY1]]
489    ; AVX512VL: RET 0, implicit $rax
490    %0(s32) = COPY $edi
491    %1(p0) = COPY $rsi
492    G_STORE %0(s32), %1(p0) :: (store unordered 4 into %ir.p1)
493    $rax = COPY %1(p0)
494    RET 0, implicit $rax
495
496...
497---
498name:            test_store_i64
499alignment:       16
500legalized:       true
501regBankSelected: true
502registers:
503  - { id: 0, class: gpr }
504  - { id: 1, class: gpr }
505body:             |
506  bb.1 (%ir-block.0):
507    liveins: $rdi, $rsi
508
509    ; SSE-LABEL: name: test_store_i64
510    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
511    ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
512    ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 8 into %ir.p1)
513    ; SSE: $rax = COPY [[COPY1]]
514    ; SSE: RET 0, implicit $rax
515    ; AVX-LABEL: name: test_store_i64
516    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
517    ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
518    ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 8 into %ir.p1)
519    ; AVX: $rax = COPY [[COPY1]]
520    ; AVX: RET 0, implicit $rax
521    ; AVX512F-LABEL: name: test_store_i64
522    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
523    ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
524    ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 8 into %ir.p1)
525    ; AVX512F: $rax = COPY [[COPY1]]
526    ; AVX512F: RET 0, implicit $rax
527    ; AVX512VL-LABEL: name: test_store_i64
528    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
529    ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
530    ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered 8 into %ir.p1)
531    ; AVX512VL: $rax = COPY [[COPY1]]
532    ; AVX512VL: RET 0, implicit $rax
533    %0(s64) = COPY $rdi
534    %1(p0) = COPY $rsi
535    G_STORE %0(s64), %1(p0) :: (store unordered 8 into %ir.p1)
536    $rax = COPY %1(p0)
537    RET 0, implicit $rax
538
539...
540---
541name:            test_store_float
542alignment:       16
543legalized:       true
544regBankSelected: true
545registers:
546  - { id: 0, class: vecr, preferred-register: '' }
547  - { id: 1, class: gpr, preferred-register: '' }
548  - { id: 2, class: vecr, preferred-register: '' }
549  - { id: 3, class: gpr, preferred-register: '' }
550body:             |
551  bb.1 (%ir-block.0):
552    liveins: $rdi, $xmm0
553
554    ; SSE-LABEL: name: test_store_float
555    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
556    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
557    ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
558    ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
559    ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
560    ; SSE: $rax = COPY [[COPY2]]
561    ; SSE: RET 0, implicit $rax
562    ; AVX-LABEL: name: test_store_float
563    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
564    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
565    ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
566    ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
567    ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
568    ; AVX: $rax = COPY [[COPY2]]
569    ; AVX: RET 0, implicit $rax
570    ; AVX512F-LABEL: name: test_store_float
571    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
572    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
573    ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
574    ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
575    ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
576    ; AVX512F: $rax = COPY [[COPY2]]
577    ; AVX512F: RET 0, implicit $rax
578    ; AVX512VL-LABEL: name: test_store_float
579    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
580    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
581    ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
582    ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
583    ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
584    ; AVX512VL: $rax = COPY [[COPY2]]
585    ; AVX512VL: RET 0, implicit $rax
586    %2:vecr(s128) = COPY $xmm0
587    %0:vecr(s32) = G_TRUNC %2(s128)
588    %1:gpr(p0) = COPY $rdi
589    %3:gpr(s32) = COPY %0(s32)
590    G_STORE %3(s32), %1(p0) :: (store unordered 4 into %ir.p1)
591    $rax = COPY %1(p0)
592    RET 0, implicit $rax
593
594...
595---
596name:            test_store_float_vec
597alignment:       16
598legalized:       true
599regBankSelected: true
600registers:
601  - { id: 0, class: vecr, preferred-register: '' }
602  - { id: 1, class: gpr, preferred-register: '' }
603  - { id: 2, class: vecr, preferred-register: '' }
604  - { id: 3, class: gpr, preferred-register: '' }
605body:             |
606  bb.1 (%ir-block.0):
607    liveins: $rdi, $xmm0
608
609    ; SSE-LABEL: name: test_store_float_vec
610    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
611    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
612    ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
613    ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
614    ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
615    ; SSE: $rax = COPY [[COPY2]]
616    ; SSE: RET 0, implicit $rax
617    ; AVX-LABEL: name: test_store_float_vec
618    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
619    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
620    ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
621    ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
622    ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
623    ; AVX: $rax = COPY [[COPY2]]
624    ; AVX: RET 0, implicit $rax
625    ; AVX512F-LABEL: name: test_store_float_vec
626    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
627    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
628    ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
629    ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
630    ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
631    ; AVX512F: $rax = COPY [[COPY2]]
632    ; AVX512F: RET 0, implicit $rax
633    ; AVX512VL-LABEL: name: test_store_float_vec
634    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
635    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
636    ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
637    ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
638    ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 4 into %ir.p1)
639    ; AVX512VL: $rax = COPY [[COPY2]]
640    ; AVX512VL: RET 0, implicit $rax
641    %2:vecr(s128) = COPY $xmm0
642    %0:vecr(s32) = G_TRUNC %2(s128)
643    %1:gpr(p0) = COPY $rdi
644    %3:gpr(s32) = COPY %0(s32)
645    G_STORE %3(s32), %1(p0) :: (store unordered 4 into %ir.p1)
646    $rax = COPY %1(p0)
647    RET 0, implicit $rax
648
649...
650---
651name:            test_store_double
652alignment:       16
653legalized:       true
654regBankSelected: true
655registers:
656  - { id: 0, class: vecr, preferred-register: '' }
657  - { id: 1, class: gpr, preferred-register: '' }
658  - { id: 2, class: vecr, preferred-register: '' }
659  - { id: 3, class: gpr, preferred-register: '' }
660# NO_AVX512X: %0:fr64 = COPY $xmm0
661body:             |
662  bb.1 (%ir-block.0):
663    liveins: $rdi, $xmm0
664
665    ; SSE-LABEL: name: test_store_double
666    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
667    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
668    ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
669    ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
670    ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
671    ; SSE: $rax = COPY [[COPY2]]
672    ; SSE: RET 0, implicit $rax
673    ; AVX-LABEL: name: test_store_double
674    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
675    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
676    ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
677    ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
678    ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
679    ; AVX: $rax = COPY [[COPY2]]
680    ; AVX: RET 0, implicit $rax
681    ; AVX512F-LABEL: name: test_store_double
682    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
683    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
684    ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
685    ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
686    ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
687    ; AVX512F: $rax = COPY [[COPY2]]
688    ; AVX512F: RET 0, implicit $rax
689    ; AVX512VL-LABEL: name: test_store_double
690    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
691    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
692    ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
693    ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
694    ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
695    ; AVX512VL: $rax = COPY [[COPY2]]
696    ; AVX512VL: RET 0, implicit $rax
697    %2:vecr(s128) = COPY $xmm0
698    %0:vecr(s64) = G_TRUNC %2(s128)
699    %1:gpr(p0) = COPY $rdi
700    %3:gpr(s64) = COPY %0(s64)
701    G_STORE %3(s64), %1(p0) :: (store unordered 8 into %ir.p1)
702    $rax = COPY %1(p0)
703    RET 0, implicit $rax
704
705...
706---
707name:            test_store_double_vec
708alignment:       16
709legalized:       true
710regBankSelected: true
711registers:
712  - { id: 0, class: vecr, preferred-register: '' }
713  - { id: 1, class: gpr, preferred-register: '' }
714  - { id: 2, class: vecr, preferred-register: '' }
715  - { id: 3, class: gpr, preferred-register: '' }
716body:             |
717  bb.1 (%ir-block.0):
718    liveins: $rdi, $xmm0
719
720    ; SSE-LABEL: name: test_store_double_vec
721    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
722    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
723    ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
724    ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
725    ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
726    ; SSE: $rax = COPY [[COPY2]]
727    ; SSE: RET 0, implicit $rax
728    ; AVX-LABEL: name: test_store_double_vec
729    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
730    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
731    ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
732    ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
733    ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
734    ; AVX: $rax = COPY [[COPY2]]
735    ; AVX: RET 0, implicit $rax
736    ; AVX512F-LABEL: name: test_store_double_vec
737    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
738    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
739    ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
740    ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
741    ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
742    ; AVX512F: $rax = COPY [[COPY2]]
743    ; AVX512F: RET 0, implicit $rax
744    ; AVX512VL-LABEL: name: test_store_double_vec
745    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
746    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
747    ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
748    ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
749    ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered 8 into %ir.p1)
750    ; AVX512VL: $rax = COPY [[COPY2]]
751    ; AVX512VL: RET 0, implicit $rax
752    %2:vecr(s128) = COPY $xmm0
753    %0:vecr(s64) = G_TRUNC %2(s128)
754    %1:gpr(p0) = COPY $rdi
755    %3:gpr(s64) = COPY %0(s64)
756    G_STORE %3(s64), %1(p0) :: (store unordered 8 into %ir.p1)
757    $rax = COPY %1(p0)
758    RET 0, implicit $rax
759
760...
761---
762name:            test_load_ptr
763alignment:       16
764legalized:       true
765regBankSelected: true
766selected:        false
767registers:
768  - { id: 0, class: gpr }
769  - { id: 1, class: gpr }
770body:             |
771  bb.1 (%ir-block.0):
772    liveins: $rdi
773
774    ; SSE-LABEL: name: test_load_ptr
775    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
776    ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.ptr1)
777    ; SSE: $rax = COPY [[MOV64rm]]
778    ; SSE: RET 0, implicit $rax
779    ; AVX-LABEL: name: test_load_ptr
780    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
781    ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.ptr1)
782    ; AVX: $rax = COPY [[MOV64rm]]
783    ; AVX: RET 0, implicit $rax
784    ; AVX512F-LABEL: name: test_load_ptr
785    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
786    ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.ptr1)
787    ; AVX512F: $rax = COPY [[MOV64rm]]
788    ; AVX512F: RET 0, implicit $rax
789    ; AVX512VL-LABEL: name: test_load_ptr
790    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
791    ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered 8 from %ir.ptr1)
792    ; AVX512VL: $rax = COPY [[MOV64rm]]
793    ; AVX512VL: RET 0, implicit $rax
794    %0(p0) = COPY $rdi
795    %1(p0) = G_LOAD %0(p0) :: (load unordered 8 from %ir.ptr1)
796    $rax = COPY %1(p0)
797    RET 0, implicit $rax
798
799...
800---
801name:            test_store_ptr
802alignment:       16
803legalized:       true
804regBankSelected: true
805selected:        false
806registers:
807  - { id: 0, class: gpr }
808  - { id: 1, class: gpr }
809body:             |
810  bb.1 (%ir-block.0):
811    liveins: $rdi, $rsi
812
813    ; SSE-LABEL: name: test_store_ptr
814    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
815    ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
816    ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 8 into %ir.ptr1)
817    ; SSE: RET 0
818    ; AVX-LABEL: name: test_store_ptr
819    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
820    ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
821    ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 8 into %ir.ptr1)
822    ; AVX: RET 0
823    ; AVX512F-LABEL: name: test_store_ptr
824    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
825    ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
826    ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 8 into %ir.ptr1)
827    ; AVX512F: RET 0
828    ; AVX512VL-LABEL: name: test_store_ptr
829    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
830    ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
831    ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 8 into %ir.ptr1)
832    ; AVX512VL: RET 0
833    %0(p0) = COPY $rdi
834    %1(p0) = COPY $rsi
835    G_STORE %1(p0), %0(p0) :: (store unordered 8 into %ir.ptr1)
836    RET 0
837
838...
839---
840name:            test_gep_folding
841alignment:       16
842legalized:       true
843regBankSelected: true
844registers:
845  - { id: 0, class: gpr }
846  - { id: 1, class: gpr }
847  - { id: 2, class: gpr }
848  - { id: 3, class: gpr }
849  - { id: 4, class: gpr }
850body:             |
851  bb.1 (%ir-block.0):
852    liveins: $esi, $rdi
853
854    ; SSE-LABEL: name: test_gep_folding
855    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
856    ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
857    ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
858    ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered 4 from %ir.arrayidx)
859    ; SSE: $eax = COPY [[MOV32rm]]
860    ; SSE: RET 0, implicit $eax
861    ; AVX-LABEL: name: test_gep_folding
862    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
863    ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
864    ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
865    ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered 4 from %ir.arrayidx)
866    ; AVX: $eax = COPY [[MOV32rm]]
867    ; AVX: RET 0, implicit $eax
868    ; AVX512F-LABEL: name: test_gep_folding
869    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
870    ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
871    ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
872    ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered 4 from %ir.arrayidx)
873    ; AVX512F: $eax = COPY [[MOV32rm]]
874    ; AVX512F: RET 0, implicit $eax
875    ; AVX512VL-LABEL: name: test_gep_folding
876    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
877    ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
878    ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
879    ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered 4 from %ir.arrayidx)
880    ; AVX512VL: $eax = COPY [[MOV32rm]]
881    ; AVX512VL: RET 0, implicit $eax
882    %0(p0) = COPY $rdi
883    %1(s32) = COPY $esi
884    %2(s64) = G_CONSTANT i64 20
885    %3(p0) = G_PTR_ADD %0, %2(s64)
886    G_STORE %1(s32), %3(p0) :: (store unordered 4 into %ir.arrayidx)
887    %4(s32) = G_LOAD %3(p0) :: (load unordered 4 from %ir.arrayidx)
888    $eax = COPY %4(s32)
889    RET 0, implicit $eax
890
891...
892---
893name:            test_gep_folding_largeGepIndex
894alignment:       16
895legalized:       true
896regBankSelected: true
897registers:
898  - { id: 0, class: gpr }
899  - { id: 1, class: gpr }
900  - { id: 2, class: gpr }
901  - { id: 3, class: gpr }
902  - { id: 4, class: gpr }
903body:             |
904  bb.1 (%ir-block.0):
905    liveins: $esi, $rdi
906
907    ; SSE-LABEL: name: test_gep_folding_largeGepIndex
908    ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
909    ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
910    ; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
911    ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
912    ; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
913    ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.arrayidx)
914    ; SSE: $eax = COPY [[MOV32rm]]
915    ; SSE: RET 0, implicit $eax
916    ; AVX-LABEL: name: test_gep_folding_largeGepIndex
917    ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
918    ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
919    ; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
920    ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
921    ; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
922    ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.arrayidx)
923    ; AVX: $eax = COPY [[MOV32rm]]
924    ; AVX: RET 0, implicit $eax
925    ; AVX512F-LABEL: name: test_gep_folding_largeGepIndex
926    ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
927    ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
928    ; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
929    ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
930    ; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
931    ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.arrayidx)
932    ; AVX512F: $eax = COPY [[MOV32rm]]
933    ; AVX512F: RET 0, implicit $eax
934    ; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex
935    ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
936    ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
937    ; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
938    ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
939    ; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered 4 into %ir.arrayidx)
940    ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered 4 from %ir.arrayidx)
941    ; AVX512VL: $eax = COPY [[MOV32rm]]
942    ; AVX512VL: RET 0, implicit $eax
943    %0(p0) = COPY $rdi
944    %1(s32) = COPY $esi
945    %2(s64) = G_CONSTANT i64 228719476720
946    %3(p0) = G_PTR_ADD %0, %2(s64)
947    G_STORE %1(s32), %3(p0) :: (store unordered 4 into %ir.arrayidx)
948    %4(s32) = G_LOAD %3(p0) :: (load unordered 4 from %ir.arrayidx)
949    $eax = COPY %4(s32)
950    RET 0, implicit $eax
951
952...
953