• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-linux  -tailcallopt  | FileCheck %s
2
3; FIXME: Win64 does not support byval.
4
5; Expect the entry point.
6; CHECK: tailcaller:
7
8; Expect 2 rep;movs because of tail call byval lowering.
9; CHECK: rep;
10; CHECK: rep;
11
12; A sequence of copyto/copyfrom virtual registers is used to deal with byval
13; lowering appearing after moving arguments to registers. The following two
14; checks verify that the register allocator changes those sequences to direct
15; moves to argument register where it can (for registers that are not used in
16; byval lowering - not rsi, not rdi, not rcx).
17; Expect argument 4 to be moved directly to register edx.
18; CHECK: movl $7, %edx
19
20; Expect argument 6 to be moved directly to register r8.
21; CHECK: movl $17, %r8d
22
23; Expect not call but jmp to @tailcallee.
24; CHECK: jmp tailcallee
25
26; Expect the trailer.
27; CHECK: .size tailcaller
28
29%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
30                   i64, i64, i64, i64, i64, i64, i64, i64,
31                   i64, i64, i64, i64, i64, i64, i64, i64 }
32
33declare  fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
34
35
36define  fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
37entry:
38        %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
39        %tmp3 = load i64* %tmp2, align 8
40        %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
41        ret i64 %tmp4
42}
43