• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
3; RUN: llc -verify-machineinstrs < %s -enable-tail-merge=0 -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
4; RUN: llc -verify-machineinstrs < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
5; RUN: llc -verify-machineinstrs < %s -enable-tail-merge=0 -mtriple=i686-windows | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
6; RUN: llc -verify-machineinstrs < %s -enable-tail-merge=0 -mtriple=i686-windows -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
7
8; Test that we actually spill and reload all arguments in the variadic argument
9; pack. Doing a normal call will clobber all argument registers, and we will
10; spill around it. A simple adjustment should not require any XMM spills.
11
12declare void @llvm.va_start(i8*) nounwind
13
14declare void(i8*, ...)* @get_f(i8* %this)
15
16define void @f_thunk(i8* %this, ...) {
17  ; Use va_start so that we exercise the combination.
18; LINUX-LABEL: f_thunk:
19; LINUX:       # %bb.0:
20; LINUX-NEXT:    pushq %rbp
21; LINUX-NEXT:    .cfi_def_cfa_offset 16
22; LINUX-NEXT:    pushq %r15
23; LINUX-NEXT:    .cfi_def_cfa_offset 24
24; LINUX-NEXT:    pushq %r14
25; LINUX-NEXT:    .cfi_def_cfa_offset 32
26; LINUX-NEXT:    pushq %r13
27; LINUX-NEXT:    .cfi_def_cfa_offset 40
28; LINUX-NEXT:    pushq %r12
29; LINUX-NEXT:    .cfi_def_cfa_offset 48
30; LINUX-NEXT:    pushq %rbx
31; LINUX-NEXT:    .cfi_def_cfa_offset 56
32; LINUX-NEXT:    subq $360, %rsp # imm = 0x168
33; LINUX-NEXT:    .cfi_def_cfa_offset 416
34; LINUX-NEXT:    .cfi_offset %rbx, -56
35; LINUX-NEXT:    .cfi_offset %r12, -48
36; LINUX-NEXT:    .cfi_offset %r13, -40
37; LINUX-NEXT:    .cfi_offset %r14, -32
38; LINUX-NEXT:    .cfi_offset %r15, -24
39; LINUX-NEXT:    .cfi_offset %rbp, -16
40; LINUX-NEXT:    movq %r9, %r15
41; LINUX-NEXT:    movq %r8, %r12
42; LINUX-NEXT:    movq %rcx, %r13
43; LINUX-NEXT:    movq %rdx, %rbp
44; LINUX-NEXT:    movq %rsi, %rbx
45; LINUX-NEXT:    movq %rdi, %r14
46; LINUX-NEXT:    movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
47; LINUX-NEXT:    testb %al, %al
48; LINUX-NEXT:    je .LBB0_2
49; LINUX-NEXT:  # %bb.1:
50; LINUX-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
51; LINUX-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp)
52; LINUX-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp)
53; LINUX-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp)
54; LINUX-NEXT:    movaps %xmm4, {{[0-9]+}}(%rsp)
55; LINUX-NEXT:    movaps %xmm5, {{[0-9]+}}(%rsp)
56; LINUX-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp)
57; LINUX-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp)
58; LINUX-NEXT:  .LBB0_2:
59; LINUX-NEXT:    movq %rbx, {{[0-9]+}}(%rsp)
60; LINUX-NEXT:    movq %rbp, {{[0-9]+}}(%rsp)
61; LINUX-NEXT:    movq %r13, {{[0-9]+}}(%rsp)
62; LINUX-NEXT:    movq %r12, {{[0-9]+}}(%rsp)
63; LINUX-NEXT:    movq %r15, {{[0-9]+}}(%rsp)
64; LINUX-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
65; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
66; LINUX-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
67; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
68; LINUX-NEXT:    movabsq $206158430216, %rax # imm = 0x3000000008
69; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
70; LINUX-NEXT:    movq %r14, %rdi
71; LINUX-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
72; LINUX-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
73; LINUX-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
74; LINUX-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
75; LINUX-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
76; LINUX-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
77; LINUX-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
78; LINUX-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
79; LINUX-NEXT:    callq get_f
80; LINUX-NEXT:    movq %rax, %r11
81; LINUX-NEXT:    movq %r14, %rdi
82; LINUX-NEXT:    movq %rbx, %rsi
83; LINUX-NEXT:    movq %rbp, %rdx
84; LINUX-NEXT:    movq %r13, %rcx
85; LINUX-NEXT:    movq %r12, %r8
86; LINUX-NEXT:    movq %r15, %r9
87; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
88; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
89; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
90; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
91; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
92; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
93; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
94; LINUX-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
95; LINUX-NEXT:    movb {{[-0-9]+}}(%r{{[sb]}}p), %al # 1-byte Reload
96; LINUX-NEXT:    addq $360, %rsp # imm = 0x168
97; LINUX-NEXT:    .cfi_def_cfa_offset 56
98; LINUX-NEXT:    popq %rbx
99; LINUX-NEXT:    .cfi_def_cfa_offset 48
100; LINUX-NEXT:    popq %r12
101; LINUX-NEXT:    .cfi_def_cfa_offset 40
102; LINUX-NEXT:    popq %r13
103; LINUX-NEXT:    .cfi_def_cfa_offset 32
104; LINUX-NEXT:    popq %r14
105; LINUX-NEXT:    .cfi_def_cfa_offset 24
106; LINUX-NEXT:    popq %r15
107; LINUX-NEXT:    .cfi_def_cfa_offset 16
108; LINUX-NEXT:    popq %rbp
109; LINUX-NEXT:    .cfi_def_cfa_offset 8
110; LINUX-NEXT:    jmpq *%r11 # TAILCALL
111;
112; LINUX-X32-LABEL: f_thunk:
113; LINUX-X32:       # %bb.0:
114; LINUX-X32-NEXT:    pushq %rbp
115; LINUX-X32-NEXT:    .cfi_def_cfa_offset 16
116; LINUX-X32-NEXT:    pushq %r15
117; LINUX-X32-NEXT:    .cfi_def_cfa_offset 24
118; LINUX-X32-NEXT:    pushq %r14
119; LINUX-X32-NEXT:    .cfi_def_cfa_offset 32
120; LINUX-X32-NEXT:    pushq %r13
121; LINUX-X32-NEXT:    .cfi_def_cfa_offset 40
122; LINUX-X32-NEXT:    pushq %r12
123; LINUX-X32-NEXT:    .cfi_def_cfa_offset 48
124; LINUX-X32-NEXT:    pushq %rbx
125; LINUX-X32-NEXT:    .cfi_def_cfa_offset 56
126; LINUX-X32-NEXT:    subl $344, %esp # imm = 0x158
127; LINUX-X32-NEXT:    .cfi_def_cfa_offset 400
128; LINUX-X32-NEXT:    .cfi_offset %rbx, -56
129; LINUX-X32-NEXT:    .cfi_offset %r12, -48
130; LINUX-X32-NEXT:    .cfi_offset %r13, -40
131; LINUX-X32-NEXT:    .cfi_offset %r14, -32
132; LINUX-X32-NEXT:    .cfi_offset %r15, -24
133; LINUX-X32-NEXT:    .cfi_offset %rbp, -16
134; LINUX-X32-NEXT:    movq %r9, %r15
135; LINUX-X32-NEXT:    movq %r8, %r12
136; LINUX-X32-NEXT:    movq %rcx, %r13
137; LINUX-X32-NEXT:    movq %rdx, %rbp
138; LINUX-X32-NEXT:    movq %rsi, %rbx
139; LINUX-X32-NEXT:    movq %rdi, %r14
140; LINUX-X32-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
141; LINUX-X32-NEXT:    testb %al, %al
142; LINUX-X32-NEXT:    je .LBB0_2
143; LINUX-X32-NEXT:  # %bb.1:
144; LINUX-X32-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
145; LINUX-X32-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
146; LINUX-X32-NEXT:    movaps %xmm2, {{[0-9]+}}(%esp)
147; LINUX-X32-NEXT:    movaps %xmm3, {{[0-9]+}}(%esp)
148; LINUX-X32-NEXT:    movaps %xmm4, {{[0-9]+}}(%esp)
149; LINUX-X32-NEXT:    movaps %xmm5, {{[0-9]+}}(%esp)
150; LINUX-X32-NEXT:    movaps %xmm6, {{[0-9]+}}(%esp)
151; LINUX-X32-NEXT:    movaps %xmm7, {{[0-9]+}}(%esp)
152; LINUX-X32-NEXT:  .LBB0_2:
153; LINUX-X32-NEXT:    movq %rbx, {{[0-9]+}}(%esp)
154; LINUX-X32-NEXT:    movq %rbp, {{[0-9]+}}(%esp)
155; LINUX-X32-NEXT:    movq %r13, {{[0-9]+}}(%esp)
156; LINUX-X32-NEXT:    movq %r12, {{[0-9]+}}(%esp)
157; LINUX-X32-NEXT:    movq %r15, {{[0-9]+}}(%esp)
158; LINUX-X32-NEXT:    leal {{[0-9]+}}(%rsp), %eax
159; LINUX-X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
160; LINUX-X32-NEXT:    leal {{[0-9]+}}(%rsp), %eax
161; LINUX-X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
162; LINUX-X32-NEXT:    movabsq $206158430216, %rax # imm = 0x3000000008
163; LINUX-X32-NEXT:    movq %rax, {{[0-9]+}}(%esp)
164; LINUX-X32-NEXT:    movq %r14, %rdi
165; LINUX-X32-NEXT:    movaps %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
166; LINUX-X32-NEXT:    movaps %xmm6, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
167; LINUX-X32-NEXT:    movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
168; LINUX-X32-NEXT:    movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
169; LINUX-X32-NEXT:    movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
170; LINUX-X32-NEXT:    movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
171; LINUX-X32-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
172; LINUX-X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
173; LINUX-X32-NEXT:    callq get_f
174; LINUX-X32-NEXT:    movl %eax, %r11d
175; LINUX-X32-NEXT:    movq %r14, %rdi
176; LINUX-X32-NEXT:    movq %rbx, %rsi
177; LINUX-X32-NEXT:    movq %rbp, %rdx
178; LINUX-X32-NEXT:    movq %r13, %rcx
179; LINUX-X32-NEXT:    movq %r12, %r8
180; LINUX-X32-NEXT:    movq %r15, %r9
181; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
182; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
183; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
184; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
185; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm4 # 16-byte Reload
186; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm5 # 16-byte Reload
187; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm6 # 16-byte Reload
188; LINUX-X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm7 # 16-byte Reload
189; LINUX-X32-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload
190; LINUX-X32-NEXT:    addl $344, %esp # imm = 0x158
191; LINUX-X32-NEXT:    .cfi_def_cfa_offset 56
192; LINUX-X32-NEXT:    popq %rbx
193; LINUX-X32-NEXT:    .cfi_def_cfa_offset 48
194; LINUX-X32-NEXT:    popq %r12
195; LINUX-X32-NEXT:    .cfi_def_cfa_offset 40
196; LINUX-X32-NEXT:    popq %r13
197; LINUX-X32-NEXT:    .cfi_def_cfa_offset 32
198; LINUX-X32-NEXT:    popq %r14
199; LINUX-X32-NEXT:    .cfi_def_cfa_offset 24
200; LINUX-X32-NEXT:    popq %r15
201; LINUX-X32-NEXT:    .cfi_def_cfa_offset 16
202; LINUX-X32-NEXT:    popq %rbp
203; LINUX-X32-NEXT:    .cfi_def_cfa_offset 8
204; LINUX-X32-NEXT:    jmpq *%r11 # TAILCALL
205;
206; WINDOWS-LABEL: f_thunk:
207; WINDOWS:       # %bb.0:
208; WINDOWS-NEXT:    pushq %r14
209; WINDOWS-NEXT:    .seh_pushreg %r14
210; WINDOWS-NEXT:    pushq %rsi
211; WINDOWS-NEXT:    .seh_pushreg %rsi
212; WINDOWS-NEXT:    pushq %rdi
213; WINDOWS-NEXT:    .seh_pushreg %rdi
214; WINDOWS-NEXT:    pushq %rbx
215; WINDOWS-NEXT:    .seh_pushreg %rbx
216; WINDOWS-NEXT:    subq $72, %rsp
217; WINDOWS-NEXT:    .seh_stackalloc 72
218; WINDOWS-NEXT:    .seh_endprologue
219; WINDOWS-NEXT:    movq %r9, %r14
220; WINDOWS-NEXT:    movq %r8, %rdi
221; WINDOWS-NEXT:    movq %rdx, %rbx
222; WINDOWS-NEXT:    movq %rcx, %rsi
223; WINDOWS-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
224; WINDOWS-NEXT:    movq %r8, {{[0-9]+}}(%rsp)
225; WINDOWS-NEXT:    movq %r9, {{[0-9]+}}(%rsp)
226; WINDOWS-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
227; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
228; WINDOWS-NEXT:    callq get_f
229; WINDOWS-NEXT:    movq %rsi, %rcx
230; WINDOWS-NEXT:    movq %rbx, %rdx
231; WINDOWS-NEXT:    movq %rdi, %r8
232; WINDOWS-NEXT:    movq %r14, %r9
233; WINDOWS-NEXT:    addq $72, %rsp
234; WINDOWS-NEXT:    popq %rbx
235; WINDOWS-NEXT:    popq %rdi
236; WINDOWS-NEXT:    popq %rsi
237; WINDOWS-NEXT:    popq %r14
238; WINDOWS-NEXT:    rex64 jmpq *%rax # TAILCALL
239; WINDOWS-NEXT:    .seh_endproc
240;
241; X86-NOSSE-LABEL: f_thunk:
242; X86-NOSSE:       # %bb.0:
243; X86-NOSSE-NEXT:    pushl %ebp
244; X86-NOSSE-NEXT:    movl %esp, %ebp
245; X86-NOSSE-NEXT:    pushl %esi
246; X86-NOSSE-NEXT:    andl $-16, %esp
247; X86-NOSSE-NEXT:    subl $32, %esp
248; X86-NOSSE-NEXT:    movl 8(%ebp), %esi
249; X86-NOSSE-NEXT:    leal 12(%ebp), %eax
250; X86-NOSSE-NEXT:    movl %eax, (%esp)
251; X86-NOSSE-NEXT:    pushl %esi
252; X86-NOSSE-NEXT:    calll _get_f
253; X86-NOSSE-NEXT:    addl $4, %esp
254; X86-NOSSE-NEXT:    movl %esi, 8(%ebp)
255; X86-NOSSE-NEXT:    leal -4(%ebp), %esp
256; X86-NOSSE-NEXT:    popl %esi
257; X86-NOSSE-NEXT:    popl %ebp
258; X86-NOSSE-NEXT:    jmpl *%eax # TAILCALL
259;
260; X86-SSE-LABEL: f_thunk:
261; X86-SSE:       # %bb.0:
262; X86-SSE-NEXT:    pushl %ebp
263; X86-SSE-NEXT:    movl %esp, %ebp
264; X86-SSE-NEXT:    pushl %esi
265; X86-SSE-NEXT:    andl $-16, %esp
266; X86-SSE-NEXT:    subl $80, %esp
267; X86-SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
268; X86-SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
269; X86-SSE-NEXT:    movaps %xmm0, (%esp) # 16-byte Spill
270; X86-SSE-NEXT:    movl 8(%ebp), %esi
271; X86-SSE-NEXT:    leal 12(%ebp), %eax
272; X86-SSE-NEXT:    movl %eax, {{[0-9]+}}(%esp)
273; X86-SSE-NEXT:    pushl %esi
274; X86-SSE-NEXT:    calll _get_f
275; X86-SSE-NEXT:    addl $4, %esp
276; X86-SSE-NEXT:    movl %esi, 8(%ebp)
277; X86-SSE-NEXT:    movaps (%esp), %xmm0 # 16-byte Reload
278; X86-SSE-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
279; X86-SSE-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
280; X86-SSE-NEXT:    leal -4(%ebp), %esp
281; X86-SSE-NEXT:    popl %esi
282; X86-SSE-NEXT:    popl %ebp
283; X86-SSE-NEXT:    jmpl *%eax # TAILCALL
284  %ap = alloca [4 x i8*], align 16
285  %ap_i8 = bitcast [4 x i8*]* %ap to i8*
286  call void @llvm.va_start(i8* %ap_i8)
287
288  %fptr = call void(i8*, ...)*(i8*) @get_f(i8* %this)
289  musttail call void (i8*, ...) %fptr(i8* %this, ...)
290  ret void
291}
292
293; Save and restore 6 GPRs, 8 XMMs, and AL around the call.
294
295; No regparms on normal x86 conventions.
296
297; This thunk shouldn't require any spills and reloads, assuming the register
298; allocator knows what it's doing.
299
300define void @g_thunk(i8* %fptr_i8, ...) {
301; LINUX-LABEL: g_thunk:
302; LINUX:       # %bb.0:
303; LINUX-NEXT:    jmpq *%rdi # TAILCALL
304;
305; LINUX-X32-LABEL: g_thunk:
306; LINUX-X32:       # %bb.0:
307; LINUX-X32-NEXT:    jmpq *%rdi # TAILCALL
308;
309; WINDOWS-LABEL: g_thunk:
310; WINDOWS:       # %bb.0:
311; WINDOWS-NEXT:    rex64 jmpq *%rcx # TAILCALL
312;
313; X86-LABEL: g_thunk:
314; X86:       # %bb.0:
315; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
316; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
317; X86-NEXT:    jmpl *%eax # TAILCALL
318  %fptr = bitcast i8* %fptr_i8 to void (i8*, ...)*
319  musttail call void (i8*, ...) %fptr(i8* %fptr_i8, ...)
320  ret void
321}
322
323; Do a simple multi-exit multi-bb test.
324
325%struct.Foo = type { i1, i8*, i8* }
326
327@g = external dso_local global i32
328
329define void @h_thunk(%struct.Foo* %this, ...) {
330; LINUX-LABEL: h_thunk:
331; LINUX:       # %bb.0:
332; LINUX-NEXT:    cmpb $1, (%rdi)
333; LINUX-NEXT:    jne .LBB2_2
334; LINUX-NEXT:  # %bb.1: # %then
335; LINUX-NEXT:    movq 8(%rdi), %r11
336; LINUX-NEXT:    jmpq *%r11 # TAILCALL
337; LINUX-NEXT:  .LBB2_2: # %else
338; LINUX-NEXT:    movq 16(%rdi), %r11
339; LINUX-NEXT:    movl $42, {{.*}}(%rip)
340; LINUX-NEXT:    jmpq *%r11 # TAILCALL
341;
342; LINUX-X32-LABEL: h_thunk:
343; LINUX-X32:       # %bb.0:
344; LINUX-X32-NEXT:    cmpb $1, (%edi)
345; LINUX-X32-NEXT:    jne .LBB2_2
346; LINUX-X32-NEXT:  # %bb.1: # %then
347; LINUX-X32-NEXT:    movl 4(%edi), %r11d
348; LINUX-X32-NEXT:    movl %edi, %edi
349; LINUX-X32-NEXT:    jmpq *%r11 # TAILCALL
350; LINUX-X32-NEXT:  .LBB2_2: # %else
351; LINUX-X32-NEXT:    movl 8(%edi), %r11d
352; LINUX-X32-NEXT:    movl $42, {{.*}}(%rip)
353; LINUX-X32-NEXT:    movl %edi, %edi
354; LINUX-X32-NEXT:    jmpq *%r11 # TAILCALL
355;
356; WINDOWS-LABEL: h_thunk:
357; WINDOWS:       # %bb.0:
358; WINDOWS-NEXT:    cmpb $1, (%rcx)
359; WINDOWS-NEXT:    jne .LBB2_2
360; WINDOWS-NEXT:  # %bb.1: # %then
361; WINDOWS-NEXT:    movq 8(%rcx), %rax
362; WINDOWS-NEXT:    rex64 jmpq *%rax # TAILCALL
363; WINDOWS-NEXT:  .LBB2_2: # %else
364; WINDOWS-NEXT:    movq 16(%rcx), %rax
365; WINDOWS-NEXT:    movl $42, {{.*}}(%rip)
366; WINDOWS-NEXT:    rex64 jmpq *%rax # TAILCALL
367;
368; X86-LABEL: h_thunk:
369; X86:       # %bb.0:
370; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
371; X86-NEXT:    cmpb $1, (%eax)
372; X86-NEXT:    jne LBB2_2
373; X86-NEXT:  # %bb.1: # %then
374; X86-NEXT:    movl 4(%eax), %ecx
375; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
376; X86-NEXT:    jmpl *%ecx # TAILCALL
377; X86-NEXT:  LBB2_2: # %else
378; X86-NEXT:    movl 8(%eax), %ecx
379; X86-NEXT:    movl $42, _g
380; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
381; X86-NEXT:    jmpl *%ecx # TAILCALL
382  %cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
383  %cond = load i1, i1* %cond_p
384  br i1 %cond, label %then, label %else
385
386then:
387  %a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
388  %a_i8 = load i8*, i8** %a_p
389  %a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
390  musttail call void (%struct.Foo*, ...) %a(%struct.Foo* %this, ...)
391  ret void
392
393else:
394  %b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
395  %b_i8 = load i8*, i8** %b_p
396  %b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
397  store i32 42, i32* @g
398  musttail call void (%struct.Foo*, ...) %b(%struct.Foo* %this, ...)
399  ret void
400}
401