1; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X86 %s 2; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=pic | FileCheck --check-prefix=PIC86 %s 3; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X64 %s 4; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 -relocation-model=pic | FileCheck --check-prefix=PIC64 %s 5 6@buf = internal global [5 x i8*] zeroinitializer 7 8declare i8* @llvm.frameaddress(i32) nounwind readnone 9 10declare i8* @llvm.stacksave() nounwind 11 12declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind 13 14declare void @llvm.eh.sjlj.longjmp(i8*) nounwind 15 16define i32 @sj0() nounwind { 17 %fp = tail call i8* @llvm.frameaddress(i32 0) 18 store i8* %fp, i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @buf, i64 0, i64 0), align 16 19 %sp = tail call i8* @llvm.stacksave() 20 store i8* %sp, i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @buf, i64 0, i64 2), align 16 21 %r = tail call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([5 x i8*]* @buf to i8*)) 22 ret i32 %r 23; X86: sj0 24; x86: movl %ebp, buf 25; X86: movl %esp, buf+8 26; x86: movl ${{.*LBB.*}}, buf+4 27; X86: ret 28; PIC86: sj0 29; PIC86: movl %ebp, buf@GOTOFF(%[[GOT:.*]]) 30; PIC86: movl %esp, buf@GOTOFF+8(%[[GOT]]) 31; PIC86: leal {{.*LBB.*}}@GOTOFF(%[[GOT]]), %[[LREG:.*]] 32; PIC86: movl %[[LREG]], buf@GOTOFF+4 33; PIC86: ret 34; X64: sj0 35; x64: movq %rbp, buf(%rip) 36; x64: movq ${{.*LBB.*}}, buf+8(%rip) 37; X64: movq %rsp, buf+16(%rip) 38; X64: ret 39; PIC64: sj0 40; PIC64: movq %rbp, buf(%rip) 41; PIC64: movq %rsp, buf+16(%rip) 42; PIC64: leaq {{.*LBB.*}}(%rip), %[[LREG:.*]] 43; PIC64: movq %[[LREG]], buf+8(%rip) 44; PIC64: ret 45} 46 47define void @lj0() nounwind { 48 tail call void @llvm.eh.sjlj.longjmp(i8* bitcast ([5 x i8*]* @buf to i8*)) 49 unreachable 50; X86: lj0 51; X86: movl buf, %ebp 52; X86: movl buf+4, %[[REG32:.*]] 53; X86: movl buf+8, %esp 54; X86: jmpl *%[[REG32]] 55; X64: lj0 56; X64: movq buf(%rip), %rbp 57; X64: movq buf+8(%rip), %[[REG64:.*]] 58; X64: movq buf+16(%rip), %rsp 59; X64: jmpq *%[[REG64]] 60} 61