1; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=core2 < %s | FileCheck %s 2 3declare i64 @testi() 4 5define i64 @test_trivial() { 6 %A = tail call i64 @testi() 7 ret i64 %A 8} 9; CHECK-LABEL: test_trivial: 10; CHECK: jmp _testi ## TAILCALL 11 12 13define i64 @test_noop_bitcast() { 14 %A = tail call i64 @testi() 15 %B = bitcast i64 %A to i64 16 ret i64 %B 17} 18; CHECK-LABEL: test_noop_bitcast: 19; CHECK: jmp _testi ## TAILCALL 20 21 22; Tail call shouldn't be blocked by no-op inttoptr. 23define i8* @test_inttoptr() { 24 %A = tail call i64 @testi() 25 %B = inttoptr i64 %A to i8* 26 ret i8* %B 27} 28 29; CHECK-LABEL: test_inttoptr: 30; CHECK: jmp _testi ## TAILCALL 31 32 33declare <4 x float> @testv() 34 35define <4 x i32> @test_vectorbitcast() { 36 %A = tail call <4 x float> @testv() 37 %B = bitcast <4 x float> %A to <4 x i32> 38 ret <4 x i32> %B 39} 40; CHECK-LABEL: test_vectorbitcast: 41; CHECK: jmp _testv ## TAILCALL 42 43 44declare { i64, i64 } @testp() 45 46define {i64, i64} @test_pair_trivial() { 47 %A = tail call { i64, i64} @testp() 48 ret { i64, i64} %A 49} 50; CHECK-LABEL: test_pair_trivial: 51; CHECK: jmp _testp ## TAILCALL 52 53define {i64, i64} @test_pair_notail() { 54 %A = tail call i64 @testi() 55 56 %b = insertvalue {i64, i64} undef, i64 %A, 0 57 %c = insertvalue {i64, i64} %b, i64 %A, 1 58 59 ret { i64, i64} %c 60} 61; CHECK-LABEL: test_pair_notail: 62; CHECK-NOT: jmp _testi 63 64define {i64, i64} @test_pair_extract_trivial() { 65 %A = tail call { i64, i64} @testp() 66 %x = extractvalue { i64, i64} %A, 0 67 %y = extractvalue { i64, i64} %A, 1 68 69 %b = insertvalue {i64, i64} undef, i64 %x, 0 70 %c = insertvalue {i64, i64} %b, i64 %y, 1 71 72 ret { i64, i64} %c 73} 74 75; CHECK-LABEL: test_pair_extract_trivial: 76; CHECK: jmp _testp ## TAILCALL 77 78define {i64, i64} @test_pair_extract_notail() { 79 %A = tail call { i64, i64} @testp() 80 %x = extractvalue { i64, i64} %A, 0 81 %y = extractvalue { i64, i64} %A, 1 82 83 %b = insertvalue {i64, i64} undef, i64 %y, 0 84 %c = insertvalue {i64, i64} %b, i64 %x, 1 85 86 ret { i64, i64} %c 87} 88 89; CHECK-LABEL: test_pair_extract_notail: 90; CHECK-NOT: jmp _testp 91 92define {i8*, i64} @test_pair_extract_conv() { 93 %A = tail call { i64, i64} @testp() 94 %x = extractvalue { i64, i64} %A, 0 95 %y = extractvalue { i64, i64} %A, 1 96 97 %x1 = inttoptr i64 %x to i8* 98 99 %b = insertvalue {i8*, i64} undef, i8* %x1, 0 100 %c = insertvalue {i8*, i64} %b, i64 %y, 1 101 102 ret { i8*, i64} %c 103} 104 105; CHECK-LABEL: test_pair_extract_conv: 106; CHECK: jmp _testp ## TAILCALL 107 108define {i64, i64} @test_pair_extract_multiple() { 109 %A = tail call { i64, i64} @testp() 110 %x = extractvalue { i64, i64} %A, 0 111 %y = extractvalue { i64, i64} %A, 1 112 113 %b = insertvalue {i64, i64} undef, i64 %x, 0 114 %c = insertvalue {i64, i64} %b, i64 %y, 1 115 116 %x1 = extractvalue { i64, i64} %b, 0 117 %y1 = extractvalue { i64, i64} %c, 1 118 119 %d = insertvalue {i64, i64} undef, i64 %x1, 0 120 %e = insertvalue {i64, i64} %b, i64 %y1, 1 121 122 ret { i64, i64} %e 123} 124 125; CHECK-LABEL: test_pair_extract_multiple: 126; CHECK: jmp _testp ## TAILCALL 127 128define {i64, i64} @test_pair_extract_undef() { 129 %A = tail call { i64, i64} @testp() 130 %x = extractvalue { i64, i64} %A, 0 131 132 %b = insertvalue {i64, i64} undef, i64 %x, 0 133 134 ret { i64, i64} %b 135} 136 137; CHECK-LABEL: test_pair_extract_undef: 138; CHECK: jmp _testp ## TAILCALL 139 140declare { i64, { i32, i32 } } @testn() 141 142define {i64, {i32, i32}} @test_nest() { 143 %A = tail call { i64, { i32, i32 } } @testn() 144 %x = extractvalue { i64, { i32, i32}} %A, 0 145 %y = extractvalue { i64, { i32, i32}} %A, 1 146 %y1 = extractvalue { i32, i32} %y, 0 147 %y2 = extractvalue { i32, i32} %y, 1 148 149 %b = insertvalue {i64, {i32, i32}} undef, i64 %x, 0 150 %c1 = insertvalue {i32, i32} undef, i32 %y1, 0 151 %c2 = insertvalue {i32, i32} %c1, i32 %y2, 1 152 %c = insertvalue {i64, {i32, i32}} %b, {i32, i32} %c2, 1 153 154 ret { i64, { i32, i32}} %c 155} 156 157; CHECK-LABEL: test_nest: 158; CHECK: jmp _testn ## TAILCALL 159 160%struct.A = type { i32 } 161%struct.B = type { %struct.A, i32 } 162 163declare %struct.B* @testu() 164 165define %struct.A* @test_upcast() { 166entry: 167 %A = tail call %struct.B* @testu() 168 %x = getelementptr inbounds %struct.B, %struct.B* %A, i32 0, i32 0 169 ret %struct.A* %x 170} 171 172; CHECK-LABEL: test_upcast: 173; CHECK: jmp _testu ## TAILCALL 174 175; PR13006 176define { i64, i64 } @crash(i8* %this) { 177 %c = tail call { i64, i64 } @testp() 178 %mrv7 = insertvalue { i64, i64 } %c, i64 undef, 1 179 ret { i64, i64 } %mrv7 180} 181 182; Check that we can fold an indexed load into a tail call instruction. 183; CHECK: fold_indexed_load 184; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]] 185; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) ## TAILCALL 186%struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 } 187@func_table = external global [0 x %struct.funcs] 188define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp { 189entry: 190 %dsplen = getelementptr inbounds [0 x %struct.funcs], [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2 191 %x1 = load i32 (i8*)*, i32 (i8*)** %dsplen, align 8 192 %call = tail call i32 %x1(i8* %mbstr) nounwind 193 ret void 194} 195 196; <rdar://problem/12282281> Fold an indexed load into the tail call instruction. 197; Calling a varargs function with 6 arguments requires 7 registers (%al is the 198; vector count for varargs functions). This leaves %r11 as the only available 199; scratch register. 200; 201; It is not possible to fold an indexed load into TCRETURNmi64 in that case. 202; 203; typedef int (*funcptr)(void*, ...); 204; extern const funcptr funcs[]; 205; int f(int n) { 206; return funcs[n](0, 0, 0, 0, 0, 0); 207; } 208; 209; CHECK-LABEL: rdar12282281 210; CHECK: jmpq *%r11 ## TAILCALL 211@funcs = external constant [0 x i32 (i8*, ...)*] 212 213define i32 @rdar12282281(i32 %n) nounwind uwtable ssp { 214entry: 215 %idxprom = sext i32 %n to i64 216 %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*], [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom 217 %0 = load i32 (i8*, ...)*, i32 (i8*, ...)** %arrayidx, align 8 218 %call = tail call i32 (i8*, ...) %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind 219 ret i32 %call 220} 221 222define x86_fp80 @fp80_call(x86_fp80 %x) nounwind { 223entry: 224; CHECK-LABEL: fp80_call: 225; CHECK: jmp _fp80_callee 226 %call = tail call x86_fp80 @fp80_callee(x86_fp80 %x) nounwind 227 ret x86_fp80 %call 228} 229 230declare x86_fp80 @fp80_callee(x86_fp80) 231 232; rdar://12229511 233define x86_fp80 @trunc_fp80(x86_fp80 %x) nounwind { 234entry: 235; CHECK-LABEL: trunc_fp80 236; CHECK: callq _trunc 237; CHECK-NOT: jmp _trunc 238; CHECK: ret 239 %conv = fptrunc x86_fp80 %x to double 240 %call = tail call double @trunc(double %conv) nounwind readnone 241 %conv1 = fpext double %call to x86_fp80 242 ret x86_fp80 %conv1 243} 244 245declare double @trunc(double) nounwind readnone 246