1; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown < %s | FileCheck %s 2 3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 4target triple = "x86_64-unknown-unknown" 5 6; Stack reload folding tests. 7; 8; By including a nop call with sideeffects we can force a partial register spill of the 9; relevant registers and check that the reload is correctly folded into the instruction. 10 11;TODO stack_fold_bsf_i16 12declare i16 @llvm.cttz.i16(i16, i1) 13 14define i32 @stack_fold_bsf_i32(i32 %a0) { 15 ;CHECK-LABEL: stack_fold_bsf_i32 16 ;CHECK: bsfl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload 17 %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() 18 %2 = call i32 @llvm.cttz.i32(i32 %a0, i1 -1) 19 ret i32 %2 20} 21declare i32 @llvm.cttz.i32(i32, i1) 22 23define i64 @stack_fold_bsf_i64(i64 %a0) { 24 ;CHECK-LABEL: stack_fold_bsf_i64 25 ;CHECK: bsfq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload 26 %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() 27 %2 = call i64 @llvm.cttz.i64(i64 %a0, i1 -1) 28 ret i64 %2 29} 30declare i64 @llvm.cttz.i64(i64, i1) 31 32;TODO stack_fold_bsr_i16 33declare i16 @llvm.ctlz.i16(i16, i1) 34 35define i32 @stack_fold_bsr_i32(i32 %a0) { 36 ;CHECK-LABEL: stack_fold_bsr_i32 37 ;CHECK: bsrl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload 38 %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() 39 %2 = call i32 @llvm.ctlz.i32(i32 %a0, i1 -1) 40 ret i32 %2 41} 42declare i32 @llvm.ctlz.i32(i32, i1) 43 44define i64 @stack_fold_bsr_i64(i64 %a0) { 45 ;CHECK-LABEL: stack_fold_bsr_i64 46 ;CHECK: bsrq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload 47 %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() 48 %2 = call i64 @llvm.ctlz.i64(i64 %a0, i1 -1) 49 ret i64 %2 50} 51declare i64 @llvm.ctlz.i64(i64, i1) 52