• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=i686-linux -segmented-stacks | FileCheck %s -check-prefix=X32
2; RUN: llc < %s -mtriple=x86_64-linux  -segmented-stacks | FileCheck %s -check-prefix=X64
3
4; Just to prevent the alloca from being optimized away
5declare void @dummy_use(i32*, i32)
6
7define i32 @test_basic(i32 %l) {
8        %mem = alloca i32, i32 %l
9        call void @dummy_use (i32* %mem, i32 %l)
10        %terminate = icmp eq i32 %l, 0
11        br i1 %terminate, label %true, label %false
12
13true:
14        ret i32 0
15
16false:
17        %newlen = sub i32 %l, 1
18        %retvalue = call i32 @test_basic(i32 %newlen)
19        ret i32 %retvalue
20
21; X32:      test_basic:
22
23; X32:      leal -12(%esp), %ecx
24; X32-NEXT: cmpl %gs:48, %ecx
25
26; X32:      subl $8, %esp
27; X32-NEXT: pushl $4
28; X32-NEXT: pushl $12
29; X32-NEXT: calll __morestack
30; X32-NEXT: addl $8, %esp
31; X32-NEXT: ret
32
33; X32:      movl %eax, %esp
34
35; X32:      subl $12, %esp
36; X32-NEXT: pushl %ecx
37; X32-NEXT: calll __morestack_allocate_stack_space
38; X32-NEXT: addl $16, %esp
39
40; X64:      test_basic:
41
42; X64:      leaq -24(%rsp), %r11
43; X64-NEXT: cmpq %fs:112, %r11
44
45; X64:      movabsq $24, %r10
46; X64-NEXT: movabsq $0, %r11
47; X64-NEXT: callq __morestack
48; X64-NEXT: ret
49
50; X64:      movq %rsp, %rax
51; X64-NEXT: subq %rcx, %rax
52; X64-NEXT: cmpq %rax, %fs:112
53
54; X64:      movq %rax, %rsp
55
56; X64:      movq %rcx, %rdi
57; X64-NEXT: callq __morestack_allocate_stack_space
58
59}
60
61define i32 @test_nested(i32 * nest %closure, i32 %other) {
62       %addend = load i32 * %closure
63       %result = add i32 %other, %addend
64       ret i32 %result
65
66; X32:      leal (%esp), %edx
67; X32-NEXT: cmpl %gs:48, %edx
68
69
70; X32:      subl $8, %esp
71; X32-NEXT: pushl $4
72; X32-NEXT: pushl $0
73; X32-NEXT: calll __morestack
74; X32-NEXT: addl $8, %esp
75; X32-NEXT: ret
76
77; X64:      leaq (%rsp), %r11
78; X64-NEXT: cmpq %fs:112, %r11
79
80; X64:      movq %r10, %rax
81; X64-NEXT: movabsq $0, %r10
82; X64-NEXT: movabsq $0, %r11
83; X64-NEXT: callq __morestack
84; X64-NEXT: ret
85; X64:      movq %rax, %r10
86
87}
88