1; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=32 2; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=64 3 4define void @t1(i32 %x) nounwind ssp { 5entry: 6; 32: t1: 7; 32: jmp {{_?}}foo 8 9; 64: t1: 10; 64: jmp {{_?}}foo 11 tail call void @foo() nounwind 12 ret void 13} 14 15declare void @foo() 16 17define void @t2() nounwind ssp { 18entry: 19; 32: t2: 20; 32: jmp {{_?}}foo2 21 22; 64: t2: 23; 64: jmp {{_?}}foo2 24 %0 = tail call i32 @foo2() nounwind 25 ret void 26} 27 28declare i32 @foo2() 29 30define void @t3() nounwind ssp { 31entry: 32; 32: t3: 33; 32: jmp {{_?}}foo3 34 35; 64: t3: 36; 64: jmp {{_?}}foo3 37 %0 = tail call i32 @foo3() nounwind 38 ret void 39} 40 41declare i32 @foo3() 42 43define void @t4(void (i32)* nocapture %x) nounwind ssp { 44entry: 45; 32: t4: 46; 32: calll * 47; FIXME: gcc can generate a tailcall for this. But it's tricky. 48 49; 64: t4: 50; 64-NOT: call 51; 64: jmpq * 52 tail call void %x(i32 0) nounwind 53 ret void 54} 55 56define void @t5(void ()* nocapture %x) nounwind ssp { 57entry: 58; 32: t5: 59; 32-NOT: call 60; 32: jmpl *4(%esp) 61 62; 64: t5: 63; 64-NOT: call 64; 64: jmpq *%rdi 65 tail call void %x() nounwind 66 ret void 67} 68 69define i32 @t6(i32 %x) nounwind ssp { 70entry: 71; 32: t6: 72; 32: calll {{_?}}t6 73; 32: jmp {{_?}}bar 74 75; 64: t6: 76; 64: jmp {{_?}}t6 77; 64: jmp {{_?}}bar 78 %0 = icmp slt i32 %x, 10 79 br i1 %0, label %bb, label %bb1 80 81bb: 82 %1 = add nsw i32 %x, -1 83 %2 = tail call i32 @t6(i32 %1) nounwind ssp 84 ret i32 %2 85 86bb1: 87 %3 = tail call i32 @bar(i32 %x) nounwind 88 ret i32 %3 89} 90 91declare i32 @bar(i32) 92 93define i32 @t7(i32 %a, i32 %b, i32 %c) nounwind ssp { 94entry: 95; 32: t7: 96; 32: jmp {{_?}}bar2 97 98; 64: t7: 99; 64: jmp {{_?}}bar2 100 %0 = tail call i32 @bar2(i32 %a, i32 %b, i32 %c) nounwind 101 ret i32 %0 102} 103 104declare i32 @bar2(i32, i32, i32) 105 106define signext i16 @t8() nounwind ssp { 107entry: 108; 32: t8: 109; 32: calll {{_?}}bar3 110 111; 64: t8: 112; 64: callq {{_?}}bar3 113 %0 = tail call signext i16 @bar3() nounwind ; <i16> [#uses=1] 114 ret i16 %0 115} 116 117declare signext i16 @bar3() 118 119define signext i16 @t9(i32 (i32)* nocapture %x) nounwind ssp { 120entry: 121; 32: t9: 122; 32: calll * 123 124; 64: t9: 125; 64: callq * 126 %0 = bitcast i32 (i32)* %x to i16 (i32)* 127 %1 = tail call signext i16 %0(i32 0) nounwind 128 ret i16 %1 129} 130 131define void @t10() nounwind ssp { 132entry: 133; 32: t10: 134; 32: calll 135 136; 64: t10: 137; 64: callq 138 %0 = tail call i32 @foo4() noreturn nounwind 139 unreachable 140} 141 142declare i32 @foo4() 143 144define i32 @t11(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind ssp { 145; In 32-bit mode, it's emitting a bunch of dead loads that are not being 146; eliminated currently. 147 148; 32: t11: 149; 32-NOT: subl ${{[0-9]+}}, %esp 150; 32: je 151; 32-NOT: movl 152; 32-NOT: addl ${{[0-9]+}}, %esp 153; 32: jmp {{_?}}foo5 154 155; 64: t11: 156; 64-NOT: subq ${{[0-9]+}}, %esp 157; 64-NOT: addq ${{[0-9]+}}, %esp 158; 64: jmp {{_?}}foo5 159entry: 160 %0 = icmp eq i32 %x, 0 161 br i1 %0, label %bb6, label %bb 162 163bb: 164 %1 = tail call i32 @foo5(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind 165 ret i32 %1 166 167bb6: 168 ret i32 0 169} 170 171declare i32 @foo5(i32, i32, i32, i32, i32) 172 173%struct.t = type { i32, i32, i32, i32, i32 } 174 175define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp { 176; 32: t12: 177; 32-NOT: subl ${{[0-9]+}}, %esp 178; 32-NOT: addl ${{[0-9]+}}, %esp 179; 32: jmp {{_?}}foo6 180 181; 64: t12: 182; 64-NOT: subq ${{[0-9]+}}, %esp 183; 64-NOT: addq ${{[0-9]+}}, %esp 184; 64: jmp {{_?}}foo6 185entry: 186 %0 = icmp eq i32 %x, 0 187 br i1 %0, label %bb2, label %bb 188 189bb: 190 %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind 191 ret i32 %1 192 193bb2: 194 ret i32 0 195} 196 197declare i32 @foo6(i32, i32, %struct.t* byval align 4) 198 199; rdar://r7717598 200%struct.ns = type { i32, i32 } 201%struct.cp = type { float, float, float, float, float } 202 203define %struct.ns* @t13(%struct.cp* %yy) nounwind ssp { 204; 32: t13: 205; 32-NOT: jmp 206; 32: calll 207; 32: ret 208 209; 64: t13: 210; 64-NOT: jmp 211; 64: callq 212; 64: ret 213entry: 214 %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind 215 ret %struct.ns* %0 216} 217 218; rdar://6195379 219; llvm can't do sibcall for this in 32-bit mode (yet). 220declare fastcc %struct.ns* @foo7(%struct.cp* byval align 4, i8 signext) nounwind ssp 221 222%struct.__block_descriptor = type { i64, i64 } 223%struct.__block_descriptor_withcopydispose = type { i64, i64, i8*, i8* } 224%struct.__block_literal_1 = type { i8*, i32, i32, i8*, %struct.__block_descriptor* } 225%struct.__block_literal_2 = type { i8*, i32, i32, i8*, %struct.__block_descriptor_withcopydispose*, void ()* } 226 227define void @t14(%struct.__block_literal_2* nocapture %.block_descriptor) nounwind ssp { 228entry: 229; 64: t14: 230; 64: movq 32(%rdi) 231; 64-NOT: movq 16(%rdi) 232; 64: jmpq *16({{%rdi|%rax}}) 233 %0 = getelementptr inbounds %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1] 234 %1 = load void ()** %0, align 8 ; <void ()*> [#uses=2] 235 %2 = bitcast void ()* %1 to %struct.__block_literal_1* ; <%struct.__block_literal_1*> [#uses=1] 236 %3 = getelementptr inbounds %struct.__block_literal_1* %2, i64 0, i32 3 ; <i8**> [#uses=1] 237 %4 = load i8** %3, align 8 ; <i8*> [#uses=1] 238 %5 = bitcast i8* %4 to void (i8*)* ; <void (i8*)*> [#uses=1] 239 %6 = bitcast void ()* %1 to i8* ; <i8*> [#uses=1] 240 tail call void %5(i8* %6) nounwind 241 ret void 242} 243 244; rdar://7726868 245%struct.foo = type { [4 x i32] } 246 247define void @t15(%struct.foo* noalias sret %agg.result) nounwind { 248; 32: t15: 249; 32: calll {{_?}}f 250; 32: ret $4 251 252; 64: t15: 253; 64: callq {{_?}}f 254; 64: ret 255 tail call fastcc void @f(%struct.foo* noalias sret %agg.result) nounwind 256 ret void 257} 258 259declare void @f(%struct.foo* noalias sret) nounwind 260 261define void @t16() nounwind ssp { 262entry: 263; 32: t16: 264; 32: calll {{_?}}bar4 265; 32: fstp 266 267; 64: t16: 268; 64: jmp {{_?}}bar4 269 %0 = tail call double @bar4() nounwind 270 ret void 271} 272 273declare double @bar4() 274 275; rdar://6283267 276define void @t17() nounwind ssp { 277entry: 278; 32: t17: 279; 32: jmp {{_?}}bar5 280 281; 64: t17: 282; 64: xorb %al, %al 283; 64: jmp {{_?}}bar5 284 tail call void (...)* @bar5() nounwind 285 ret void 286} 287 288declare void @bar5(...) 289 290; rdar://7774847 291define void @t18() nounwind ssp { 292entry: 293; 32: t18: 294; 32: calll {{_?}}bar6 295; 32: fstp %st(0) 296 297; 64: t18: 298; 64: xorb %al, %al 299; 64: jmp {{_?}}bar6 300 %0 = tail call double (...)* @bar6() nounwind 301 ret void 302} 303 304declare double @bar6(...) 305 306define void @t19() alignstack(32) nounwind { 307entry: 308; CHECK: t19: 309; CHECK: andl $-32 310; CHECK: calll {{_?}}foo 311 tail call void @foo() nounwind 312 ret void 313} 314 315; If caller / callee calling convention mismatch then check if the return 316; values are returned in the same registers. 317; rdar://7874780 318 319define double @t20(double %x) nounwind { 320entry: 321; 32: t20: 322; 32: calll {{_?}}foo20 323; 32: fldl (%esp) 324 325; 64: t20: 326; 64: jmp {{_?}}foo20 327 %0 = tail call fastcc double @foo20(double %x) nounwind 328 ret double %0 329} 330 331declare fastcc double @foo20(double) nounwind 332