1; RUN: llc < %s -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s 2 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 4target triple = "x86_64-apple-darwin10.0.0" 5 6; Make sure that fast-isel folds the immediate into the binop even though it 7; is non-canonical. 8define i32 @test1(i32 %i) nounwind ssp { 9 %and = and i32 8, %i 10 ret i32 %and 11} 12 13; CHECK: test1: 14; CHECK: andl $8, 15 16 17; rdar://9289512 - The load should fold into the compare. 18define void @test2(i64 %x) nounwind ssp { 19entry: 20 %x.addr = alloca i64, align 8 21 store i64 %x, i64* %x.addr, align 8 22 %tmp = load i64* %x.addr, align 8 23 %cmp = icmp sgt i64 %tmp, 42 24 br i1 %cmp, label %if.then, label %if.end 25 26if.then: ; preds = %entry 27 br label %if.end 28 29if.end: ; preds = %if.then, %entry 30 ret void 31; CHECK: test2: 32; CHECK: movq %rdi, -8(%rsp) 33; CHECK: cmpq $42, -8(%rsp) 34} 35 36 37 38 39@G = external global i32 40define i64 @test3() nounwind { 41 %A = ptrtoint i32* @G to i64 42 ret i64 %A 43; CHECK: test3: 44; CHECK: movq _G@GOTPCREL(%rip), %rax 45; CHECK-NEXT: ret 46} 47 48 49 50; rdar://9289558 51@rtx_length = external global [153 x i8] 52 53define i32 @test4(i64 %idxprom9) nounwind { 54 %arrayidx10 = getelementptr inbounds [153 x i8]* @rtx_length, i32 0, i64 %idxprom9 55 %tmp11 = load i8* %arrayidx10, align 1 56 %conv = zext i8 %tmp11 to i32 57 ret i32 %conv 58 59; CHECK: test4: 60; CHECK: movq _rtx_length@GOTPCREL(%rip), %rax 61; CHECK-NEXT: movzbl (%rax,%rdi), %eax 62; CHECK-NEXT: ret 63} 64 65 66; PR3242 - Out of range shifts should not be folded by fastisel. 67define void @test5(i32 %x, i32* %p) nounwind { 68 %y = ashr i32 %x, 50000 69 store i32 %y, i32* %p 70 ret void 71 72; CHECK: test5: 73; CHECK: movl $50000, %ecx 74; CHECK: sarl %cl, %edi 75; CHECK: ret 76} 77 78; rdar://9289501 - fast isel should fold trivial multiplies to shifts. 79define i64 @test6(i64 %x) nounwind ssp { 80entry: 81 %mul = mul nsw i64 %x, 8 82 ret i64 %mul 83 84; CHECK: test6: 85; CHECK: shlq $3, %rdi 86} 87 88define i32 @test7(i32 %x) nounwind ssp { 89entry: 90 %mul = mul nsw i32 %x, 8 91 ret i32 %mul 92; CHECK: test7: 93; CHECK: shll $3, %edi 94} 95 96 97; rdar://9289507 - folding of immediates into 64-bit operations. 98define i64 @test8(i64 %x) nounwind ssp { 99entry: 100 %add = add nsw i64 %x, 7 101 ret i64 %add 102 103; CHECK: test8: 104; CHECK: addq $7, %rdi 105} 106 107define i64 @test9(i64 %x) nounwind ssp { 108entry: 109 %add = mul nsw i64 %x, 7 110 ret i64 %add 111; CHECK: test9: 112; CHECK: imulq $7, %rdi, %rax 113} 114 115; rdar://9297011 - Don't reject udiv by a power of 2. 116define i32 @test10(i32 %X) nounwind { 117 %Y = udiv i32 %X, 8 118 ret i32 %Y 119; CHECK: test10: 120; CHECK: shrl $3, 121} 122 123define i32 @test11(i32 %X) nounwind { 124 %Y = sdiv exact i32 %X, 8 125 ret i32 %Y 126; CHECK: test11: 127; CHECK: sarl $3, 128} 129 130 131; rdar://9297006 - Trunc to bool. 132define void @test12(i8 %tmp) nounwind ssp noredzone { 133entry: 134 %tobool = trunc i8 %tmp to i1 135 br i1 %tobool, label %if.then, label %if.end 136 137if.then: ; preds = %entry 138 call void @test12(i8 0) noredzone 139 br label %if.end 140 141if.end: ; preds = %if.then, %entry 142 ret void 143; CHECK: test12: 144; CHECK: testb $1, 145; CHECK-NEXT: je L 146; CHECK-NEXT: movl $0, %edi 147; CHECK-NEXT: callq 148} 149 150declare void @test13f(i1 %X) 151 152define void @test13() nounwind { 153 call void @test13f(i1 0) 154 ret void 155; CHECK: test13: 156; CHECK: movl $0, %edi 157; CHECK-NEXT: callq 158} 159 160 161 162; rdar://9297003 - fast isel bails out on all functions taking bools 163define void @test14(i8 %tmp) nounwind ssp noredzone { 164entry: 165 %tobool = trunc i8 %tmp to i1 166 call void @test13f(i1 zeroext %tobool) noredzone 167 ret void 168; CHECK: test14: 169; CHECK: andb $1, 170; CHECK: callq 171} 172 173declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) 174 175; rdar://9289488 - fast-isel shouldn't bail out on llvm.memcpy 176define void @test15(i8* %a, i8* %b) nounwind { 177 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 4, i32 4, i1 false) 178 ret void 179; CHECK: test15: 180; CHECK-NEXT: movl (%rsi), %eax 181; CHECK-NEXT: movl %eax, (%rdi) 182; CHECK-NEXT: ret 183} 184 185; Handling for varargs calls 186declare void @test16callee(...) nounwind 187define void @test16() nounwind { 188; CHECK: test16: 189; CHECK: movl $1, %edi 190; CHECK: movb $0, %al 191; CHECK: callq _test16callee 192 call void (...)* @test16callee(i32 1) 193 br label %block2 194 195block2: 196; CHECK: movabsq $1 197; CHECK: cvtsi2sdq {{.*}} %xmm0 198; CHECK: movb $1, %al 199; CHECK: callq _test16callee 200 call void (...)* @test16callee(double 1.000000e+00) 201 ret void 202} 203 204 205declare void @foo() unnamed_addr ssp align 2 206 207; Verify that we don't fold the load into the compare here. That would move it 208; w.r.t. the call. 209define i32 @test17(i32 *%P) ssp nounwind { 210entry: 211 %tmp = load i32* %P 212 %cmp = icmp ne i32 %tmp, 5 213 call void @foo() 214 br i1 %cmp, label %if.then, label %if.else 215 216if.then: ; preds = %entry 217 ret i32 1 218 219if.else: ; preds = %entry 220 ret i32 2 221; CHECK: test17: 222; CHECK: movl (%rdi), %eax 223; CHECK: callq _foo 224; CHECK: cmpl $5, %eax 225; CHECK-NEXT: je 226} 227 228; Check that 0.0 is materialized using xorps 229define void @test18(float* %p1) { 230 store float 0.0, float* %p1 231 ret void 232; CHECK: test18: 233; CHECK: xorps 234} 235 236; Without any type hints, doubles use the smaller xorps instead of xorpd. 237define void @test19(double* %p1) { 238 store double 0.0, double* %p1 239 ret void 240; CHECK: test19: 241; CHECK: xorps 242} 243 244; Check that we fast-isel sret 245%struct.a = type { i64, i64, i64 } 246define void @test20() nounwind ssp { 247entry: 248 %tmp = alloca %struct.a, align 8 249 call void @test20sret(%struct.a* sret %tmp) 250 ret void 251; CHECK: test20: 252; CHECK: leaq (%rsp), %rdi 253; CHECK: callq _test20sret 254} 255declare void @test20sret(%struct.a* sret) 256 257; Check that -0.0 is not materialized using xor 258define void @test21(double* %p1) { 259 store double -0.0, double* %p1 260 ret void 261; CHECK: test21: 262; CHECK-NOT: xor 263; CHECK: movsd LCPI 264} 265 266; Check that immediate arguments to a function 267; do not cause massive spilling and are used 268; as immediates just before the call. 269define void @test22() nounwind { 270entry: 271 call void @foo22(i32 0) 272 call void @foo22(i32 1) 273 call void @foo22(i32 2) 274 call void @foo22(i32 3) 275 ret void 276; CHECK: test22: 277; CHECK: movl $0, %edi 278; CHECK: callq _foo22 279; CHECK: movl $1, %edi 280; CHECK: callq _foo22 281; CHECK: movl $2, %edi 282; CHECK: callq _foo22 283; CHECK: movl $3, %edi 284; CHECK: callq _foo22 285} 286 287declare void @foo22(i32) 288