• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -O0 -stop-after=irtranslator -global-isel -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
3
4target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
5target triple = "aarch64-linux-gnu"
6
7define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
8  ; CHECK-LABEL: name: args_i32
9  ; CHECK: bb.1 (%ir-block.0):
10  ; CHECK:   liveins: $w0, $w1, $w2, $w3, $w4, $w5, $w6, $w7
11  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
12  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
13  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
14  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY $w3
15  ; CHECK:   [[COPY4:%[0-9]+]]:_(s32) = COPY $w4
16  ; CHECK:   [[COPY5:%[0-9]+]]:_(s32) = COPY $w5
17  ; CHECK:   [[COPY6:%[0-9]+]]:_(s32) = COPY $w6
18  ; CHECK:   [[COPY7:%[0-9]+]]:_(s32) = COPY $w7
19  ; CHECK:   $w0 = COPY [[COPY]](s32)
20  ; CHECK:   RET_ReallyLR implicit $w0
21                     i32 %w4, i32 %w5, i32 %w6, i32 %w7) {
22  ret i32 %w0
23}
24
25define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
26  ; CHECK-LABEL: name: args_i64
27  ; CHECK: bb.1 (%ir-block.0):
28  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
29  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
30  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
31  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
32  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
33  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
34  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
35  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
36  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
37  ; CHECK:   $x0 = COPY [[COPY]](s64)
38  ; CHECK:   RET_ReallyLR implicit $x0
39                     i64 %x4, i64 %x5, i64 %x6, i64 %x7) {
40  ret i64 %x0
41}
42
43
44define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
45  ; CHECK-LABEL: name: args_ptrs
46  ; CHECK: bb.1 (%ir-block.0):
47  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
48  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
49  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
50  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
51  ; CHECK:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x3
52  ; CHECK:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x4
53  ; CHECK:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x5
54  ; CHECK:   [[COPY6:%[0-9]+]]:_(p0) = COPY $x6
55  ; CHECK:   [[COPY7:%[0-9]+]]:_(p0) = COPY $x7
56  ; CHECK:   $x0 = COPY [[COPY]](p0)
57  ; CHECK:   RET_ReallyLR implicit $x0
58                      [3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
59  ret i8* %x0
60}
61
62define [1 x double] @args_arr([1 x double] %d0) {
63  ; CHECK-LABEL: name: args_arr
64  ; CHECK: bb.1 (%ir-block.0):
65  ; CHECK:   liveins: $d0
66  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $d0
67  ; CHECK:   $d0 = COPY [[COPY]](s64)
68  ; CHECK:   RET_ReallyLR implicit $d0
69  ret [1 x double] %d0
70}
71
72declare void @varargs(i32, double, i64, ...)
73define void @test_varargs() {
74  ; CHECK-LABEL: name: test_varargs
75  ; CHECK: bb.1 (%ir-block.0):
76  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
77  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
78  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
79  ; CHECK:   [[C3:%[0-9]+]]:_(s8) = G_CONSTANT i8 3
80  ; CHECK:   [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
81  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
82  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
83  ; CHECK:   [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
84  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
85  ; CHECK:   $w0 = COPY [[C]](s32)
86  ; CHECK:   $d0 = COPY [[C1]](s64)
87  ; CHECK:   $x1 = COPY [[C2]](s64)
88  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C3]](s8)
89  ; CHECK:   $w2 = COPY [[ANYEXT]](s32)
90  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C4]](s16)
91  ; CHECK:   $w3 = COPY [[ANYEXT1]](s32)
92  ; CHECK:   $w4 = COPY [[C5]](s32)
93  ; CHECK:   $s1 = COPY [[C6]](s32)
94  ; CHECK:   $d2 = COPY [[C7]](s64)
95  ; CHECK:   BL @varargs, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1, implicit $w2, implicit $w3, implicit $w4, implicit $s1, implicit $d2
96  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
97  ; CHECK:   RET_ReallyLR
98  call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i8 3, i16 1, i32 4, float 1.0, double 2.0)
99  ret void
100}
101
102; signext/zeroext parameters on the stack: not part of any real ABI as far as I
103; know, but ELF currently allocates 8 bytes for a signext parameter on the
104; stack. The ADJCALLSTACK ops should reflect this, even if the difference is
105; theoretical.
106declare void @stack_ext_needed([8 x i64], i8 signext %in)
107define void @test_stack_ext_needed() {
108  ; CHECK-LABEL: name: test_stack_ext_needed
109  ; CHECK: bb.1 (%ir-block.0):
110  ; CHECK:   [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
111  ; CHECK:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 42
112  ; CHECK:   ADJCALLSTACKDOWN 8, 0, implicit-def $sp, implicit $sp
113  ; CHECK:   $x0 = COPY [[DEF]](s64)
114  ; CHECK:   $x1 = COPY [[DEF]](s64)
115  ; CHECK:   $x2 = COPY [[DEF]](s64)
116  ; CHECK:   $x3 = COPY [[DEF]](s64)
117  ; CHECK:   $x4 = COPY [[DEF]](s64)
118  ; CHECK:   $x5 = COPY [[DEF]](s64)
119  ; CHECK:   $x6 = COPY [[DEF]](s64)
120  ; CHECK:   $x7 = COPY [[DEF]](s64)
121  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $sp
122  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
123  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
124  ; CHECK:   G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
125  ; CHECK:   BL @stack_ext_needed, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7
126  ; CHECK:   ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp
127  ; CHECK:   RET_ReallyLR
128  call void @stack_ext_needed([8 x i64] undef, i8 signext 42)
129  ret void
130}
131
132; Check that we can lower incoming i128 types into constituent s64 gprs.
133define void @callee_s128(i128 %a, i128 %b, i128 *%ptr) {
134  ; CHECK-LABEL: name: callee_s128
135  ; CHECK: bb.1 (%ir-block.0):
136  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4
137  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
138  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
139  ; CHECK:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
140  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
141  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
142  ; CHECK:   [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
143  ; CHECK:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x4
144  ; CHECK:   G_STORE [[MV1]](s128), [[COPY4]](p0) :: (store 16 into %ir.ptr)
145  ; CHECK:   RET_ReallyLR
146  store i128 %b, i128 *%ptr
147  ret void
148}
149
150; Check we can lower outgoing s128 arguments into s64 gprs.
151define void @caller_s128(i128 *%ptr) {
152  ; CHECK-LABEL: name: caller_s128
153  ; CHECK: bb.1 (%ir-block.0):
154  ; CHECK:   liveins: $x0
155  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
156  ; CHECK:   [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.ptr)
157  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
158  ; CHECK:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
159  ; CHECK:   [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
160  ; CHECK:   $x0 = COPY [[UV]](s64)
161  ; CHECK:   $x1 = COPY [[UV1]](s64)
162  ; CHECK:   $x2 = COPY [[UV2]](s64)
163  ; CHECK:   $x3 = COPY [[UV3]](s64)
164  ; CHECK:   $x4 = COPY [[COPY]](p0)
165  ; CHECK:   BL @callee_s128, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4
166  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
167  ; CHECK:   RET_ReallyLR
168  %v = load i128, i128 *%ptr
169  call void @callee_s128(i128 %v, i128 %v, i128 *%ptr)
170  ret void
171}
172
173
174declare i64 @i8i16callee(i64 %a1, i64 %a2, i64 %a3, i8 signext %a4, i16 signext %a5, i64 %a6, i64 %a7, i64 %a8, i8 signext %b1, i16 signext %b2, i8 signext %b3, i8 signext %b4) nounwind readnone noinline
175
176define i32 @i8i16caller() nounwind readnone {
177  ; CHECK-LABEL: name: i8i16caller
178  ; CHECK: bb.1.entry:
179  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
180  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
181  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
182  ; CHECK:   [[C3:%[0-9]+]]:_(s8) = G_CONSTANT i8 3
183  ; CHECK:   [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
184  ; CHECK:   [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
185  ; CHECK:   [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
186  ; CHECK:   [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
187  ; CHECK:   [[C8:%[0-9]+]]:_(s8) = G_CONSTANT i8 97
188  ; CHECK:   [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 98
189  ; CHECK:   [[C10:%[0-9]+]]:_(s8) = G_CONSTANT i8 99
190  ; CHECK:   [[C11:%[0-9]+]]:_(s8) = G_CONSTANT i8 100
191  ; CHECK:   ADJCALLSTACKDOWN 32, 0, implicit-def $sp, implicit $sp
192  ; CHECK:   $x0 = COPY [[C]](s64)
193  ; CHECK:   $x1 = COPY [[C1]](s64)
194  ; CHECK:   $x2 = COPY [[C2]](s64)
195  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C3]](s8)
196  ; CHECK:   $w3 = COPY [[SEXT]](s32)
197  ; CHECK:   [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[C4]](s16)
198  ; CHECK:   $w4 = COPY [[SEXT1]](s32)
199  ; CHECK:   $x5 = COPY [[C5]](s64)
200  ; CHECK:   $x6 = COPY [[C6]](s64)
201  ; CHECK:   $x7 = COPY [[C7]](s64)
202  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $sp
203  ; CHECK:   [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
204  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
205  ; CHECK:   G_STORE [[C8]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
206  ; CHECK:   [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
207  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
208  ; CHECK:   G_STORE [[C9]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 8, align 1)
209  ; CHECK:   [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
210  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C14]](s64)
211  ; CHECK:   G_STORE [[C10]](s8), [[PTR_ADD2]](p0) :: (store 1 into stack + 16)
212  ; CHECK:   [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
213  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C15]](s64)
214  ; CHECK:   G_STORE [[C11]](s8), [[PTR_ADD3]](p0) :: (store 1 into stack + 24)
215  ; CHECK:   BL @i8i16callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3, implicit $w4, implicit $x5, implicit $x6, implicit $x7, implicit-def $x0
216  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x0
217  ; CHECK:   ADJCALLSTACKUP 32, 0, implicit-def $sp, implicit $sp
218  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
219  ; CHECK:   $w0 = COPY [[TRUNC]](s32)
220  ; CHECK:   RET_ReallyLR implicit $w0
221entry:
222  %call = tail call i64 @i8i16callee(i64 0, i64 1, i64 2, i8 signext 3, i16 signext 4, i64 5, i64 6, i64 7, i8 97, i16  98, i8  99, i8  100)
223  %conv = trunc i64 %call to i32
224  ret i32 %conv
225}
226
227