1; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s 2 3target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 4target triple = "x86_64-unknown-unknown" 5 6; pr18846 - needless avx spill/reload 7; Test for unnecessary repeated spills due to eliminateRedundantSpills failing 8; to recognise unaligned ymm load/stores to the stack. 9; Bugpoint reduced testcase. 10 11;CHECK-LABEL: _Z16opt_kernel_cachePfS_S_ 12;CHECK-NOT: vmovups {{.*#+}} 32-byte Folded Spill 13;CHECK-NOT: vmovups {{.*#+}} 32-byte Folded Reload 14 15; Function Attrs: uwtable 16define void @_Z16opt_kernel_cachePfS_S_() #0 { 17entry: 18 br label %for.body29 19 20for.body29: ; preds = %for.body29, %entry 21 br i1 undef, label %for.body29, label %for.body65 22 23for.body65: ; preds = %for.body29 24 %0 = load float, float* undef, align 4, !tbaa !1 25 %vecinit7.i4448 = insertelement <8 x float> undef, float %0, i32 7 26 %1 = load float, float* null, align 4, !tbaa !1 27 %vecinit7.i4304 = insertelement <8 x float> undef, float %1, i32 7 28 %2 = load float, float* undef, align 4, !tbaa !1 29 %vecinit7.i4196 = insertelement <8 x float> undef, float %2, i32 7 30 %3 = or i64 0, 16 31 %add.ptr111.sum4096 = add i64 %3, 0 32 %4 = load <8 x float>, <8 x float>* null, align 16, !tbaa !5 33 %add.ptr162 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr111.sum4096 34 %__v.i4158 = bitcast float* %add.ptr162 to <8 x float>* 35 %5 = load <8 x float>, <8 x float>* %__v.i4158, align 16, !tbaa !5 36 %add.ptr158.sum40975066 = or i64 %add.ptr111.sum4096, 8 37 %add.ptr183 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr158.sum40975066 38 %__v.i4162 = bitcast float* %add.ptr183 to <8 x float>* 39 %6 = load <8 x float>, <8 x float>* %__v.i4162, align 16, !tbaa !5 40 %add.ptr200.sum40995067 = or i64 undef, 8 41 %add.ptr225 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr200.sum40995067 42 %__v.i4167 = bitcast float* %add.ptr225 to <8 x float>* 43 %7 = load <8 x float>, <8 x float>* %__v.i4167, align 4, !tbaa !5 44 %8 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5 45 %add.ptr242.sum41015068 = or i64 0, 8 46 %add.ptr267 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr242.sum41015068 47 %__v.i4171 = bitcast float* %add.ptr267 to <8 x float>* 48 %9 = load <8 x float>, <8 x float>* %__v.i4171, align 4, !tbaa !5 49 %mul.i4690 = fmul <8 x float> %7, undef 50 %add.i4665 = fadd <8 x float> undef, undef 51 %mul.i4616 = fmul <8 x float> %8, undef 52 %mul.i4598 = fmul <8 x float> undef, undef 53 %add.i4597 = fadd <8 x float> undef, %mul.i4598 54 %mul.i4594 = fmul <8 x float> %6, undef 55 %add.i4593 = fadd <8 x float> undef, %mul.i4594 56 %mul.i4578 = fmul <8 x float> %9, undef 57 %add.i4577 = fadd <8 x float> %add.i4593, %mul.i4578 58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1 59 %10 = load <8 x float>, <8 x float>* null, align 16, !tbaa !5 60 %11 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5 61 %mul.i4564 = fmul <8 x float> %4, undef 62 %add.i4563 = fadd <8 x float> %10, %mul.i4564 63 %mul.i4560 = fmul <8 x float> %5, undef 64 %add.i4559 = fadd <8 x float> %11, %mul.i4560 65 %add.i4547 = fadd <8 x float> %add.i4563, undef 66 %mul.i4546 = fmul <8 x float> %7, undef 67 %add.i4545 = fadd <8 x float> undef, %mul.i4546 68 %mul.i4544 = fmul <8 x float> %8, undef 69 %add.i4543 = fadd <8 x float> %add.i4559, %mul.i4544 70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1 71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1 72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1 73 %add.i4455 = fadd <8 x float> undef, undef 74 %mul.i4454 = fmul <8 x float> undef, undef 75 %add.i4453 = fadd <8 x float> undef, %mul.i4454 76 %mul.i4440 = fmul <8 x float> zeroinitializer, %vecinit7.i4448 77 %add.i4439 = fadd <8 x float> %add.i4455, %mul.i4440 78 %mul.i4438 = fmul <8 x float> %7, %vecinit7.i4448 79 %add.i4437 = fadd <8 x float> %add.i4453, %mul.i4438 80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1 81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1 82 %add.i4413 = fadd <8 x float> zeroinitializer, undef 83 %mul.i4400 = fmul <8 x float> %8, undef 84 %add.i4399 = fadd <8 x float> undef, %mul.i4400 85 %add.i4397 = fadd <8 x float> %add.i4413, zeroinitializer 86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1 87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1 88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1 89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1 90 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1 91 %mul.i4330 = fmul <8 x float> %7, undef 92 %add.i4329 = fadd <8 x float> undef, %mul.i4330 93 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4329) #1 94 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1 95 %mul.i4312 = fmul <8 x float> %4, undef 96 %add.i4311 = fadd <8 x float> undef, %mul.i4312 97 %mul.i4306 = fmul <8 x float> %6, undef 98 %add.i4305 = fadd <8 x float> undef, %mul.i4306 99 %add.i4295 = fadd <8 x float> %add.i4311, undef 100 %mul.i4294 = fmul <8 x float> %7, %vecinit7.i4304 101 %add.i4293 = fadd <8 x float> undef, %mul.i4294 102 %mul.i4292 = fmul <8 x float> %8, %vecinit7.i4304 103 %add.i4291 = fadd <8 x float> undef, %mul.i4292 104 %mul.i4290 = fmul <8 x float> %9, %vecinit7.i4304 105 %add.i4289 = fadd <8 x float> %add.i4305, %mul.i4290 106 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4295) #1 107 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4293) #1 108 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4291) #1 109 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4289) #1 110 %12 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5 111 %mul.i4274 = fmul <8 x float> undef, undef 112 %add.i4273 = fadd <8 x float> %12, %mul.i4274 113 %mul.i4258 = fmul <8 x float> %7, undef 114 %add.i4257 = fadd <8 x float> %add.i4273, %mul.i4258 115 %mul.i4254 = fmul <8 x float> %9, undef 116 %add.i4253 = fadd <8 x float> undef, %mul.i4254 117 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4257) #1 118 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4253) #1 119 %mul.i = fmul <8 x float> %9, %vecinit7.i4196 120 %add.i = fadd <8 x float> undef, %mul.i 121 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1 122 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i) #1 123 unreachable 124} 125 126; Function Attrs: nounwind 127declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) #1 128 129attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 130attributes #1 = { nounwind } 131 132!llvm.ident = !{!0} 133 134!0 = !{!"clang version 3.5 "} 135!1 = !{!2, !2, i64 0} 136!2 = !{!"float", !3, i64 0} 137!3 = !{!"omnipotent char", !4, i64 0} 138!4 = !{!"Simple C/C++ TBAA"} 139!5 = !{!3, !3, i64 0} 140