1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=powerpc64-unknown-freebsd13.0 -verify-machineinstrs \ 3; RUN: -mcpu=ppc64 -ppc-asm-full-reg-names < %s | FileCheck %s 4@a = local_unnamed_addr global float* null, align 8 5 6; Function Attrs: nounwind 7define void @d() local_unnamed_addr #0 { 8; CHECK-LABEL: d: 9; CHECK: # %bb.0: # %entry 10; CHECK-NEXT: mflr r0 11; CHECK-NEXT: std r0, 16(r1) 12; CHECK-NEXT: stdu r1, -208(r1) 13; CHECK-NEXT: addis r3, r2, .LC0@toc@ha 14; CHECK-NEXT: std r29, 184(r1) # 8-byte Folded Spill 15; CHECK-NEXT: ld r3, .LC0@toc@l(r3) 16; CHECK-NEXT: std r30, 192(r1) # 8-byte Folded Spill 17; CHECK-NEXT: ld r29, 0(r3) 18; CHECK-NEXT: bl c 19; CHECK-NEXT: nop 20; CHECK-NEXT: mr r30, r3 21; CHECK-NEXT: bl b 22; CHECK-NEXT: nop 23; CHECK-NEXT: cmpwi r30, 1 24; CHECK-NEXT: blt cr0, .LBB0_9 25; CHECK-NEXT: # %bb.1: # %for.body.preheader 26; CHECK-NEXT: cmplwi r30, 4 27; CHECK-NEXT: clrldi r4, r30, 32 28; CHECK-NEXT: li r5, 0 29; CHECK-NEXT: blt cr0, .LBB0_7 30; CHECK-NEXT: # %bb.2: # %vector.memcheck 31; CHECK-NEXT: rldic r6, r30, 2, 30 32; CHECK-NEXT: add r7, r3, r6 33; CHECK-NEXT: cmpld r29, r7 34; CHECK-NEXT: add r6, r29, r6 35; CHECK-NEXT: bc 4, lt, .LBB0_4 36; CHECK-NEXT: # %bb.3: # %vector.memcheck 37; CHECK-NEXT: cmpld r3, r6 38; CHECK-NEXT: bc 12, lt, .LBB0_7 39; CHECK-NEXT: .LBB0_4: # %vector.ph 40; CHECK-NEXT: rlwinm r5, r4, 0, 0, 29 41; CHECK-NEXT: li r7, 15 42; CHECK-NEXT: addi r6, r5, -4 43; CHECK-NEXT: addi r8, r1, 144 44; CHECK-NEXT: rldicl r6, r6, 62, 2 45; CHECK-NEXT: addi r9, r1, 128 46; CHECK-NEXT: addi r6, r6, 1 47; CHECK-NEXT: addi r10, r1, 160 48; CHECK-NEXT: mtctr r6 49; CHECK-NEXT: li r6, 0 50; CHECK-NEXT: addi r11, r1, 112 51; CHECK-NEXT: .LBB0_5: # %vector.body 52; CHECK-NEXT: # 53; CHECK-NEXT: add r12, r3, r6 54; CHECK-NEXT: lvx v3, r3, r6 55; CHECK-NEXT: lvx v5, r12, r7 56; CHECK-NEXT: add r12, r29, r6 57; CHECK-NEXT: lvsl v2, r3, r6 58; CHECK-NEXT: vperm v2, v3, v5, v2 59; CHECK-NEXT: lvx v3, r29, r6 60; CHECK-NEXT: lvx v5, r12, r7 61; CHECK-NEXT: lvsl v4, r29, r6 62; CHECK-NEXT: stvx v2, 0, r8 63; CHECK-NEXT: vperm v2, v3, v5, v4 64; CHECK-NEXT: stvx v2, 0, r9 65; CHECK-NEXT: lfs f0, 156(r1) 66; CHECK-NEXT: lfs f1, 140(r1) 67; CHECK-NEXT: fdivs f0, f1, f0 68; CHECK-NEXT: lfs f1, 136(r1) 69; CHECK-NEXT: stfs f0, 172(r1) 70; CHECK-NEXT: lfs f0, 152(r1) 71; CHECK-NEXT: fdivs f0, f1, f0 72; CHECK-NEXT: lfs f1, 132(r1) 73; CHECK-NEXT: stfs f0, 168(r1) 74; CHECK-NEXT: lfs f0, 148(r1) 75; CHECK-NEXT: fdivs f0, f1, f0 76; CHECK-NEXT: lfs f1, 128(r1) 77; CHECK-NEXT: stfs f0, 164(r1) 78; CHECK-NEXT: lfs f0, 144(r1) 79; CHECK-NEXT: fdivs f0, f1, f0 80; CHECK-NEXT: stfs f0, 160(r1) 81; CHECK-NEXT: lvx v2, 0, r10 82; CHECK-NEXT: stvx v2, 0, r11 83; CHECK-NEXT: ld r0, 112(r1) 84; CHECK-NEXT: stdx r0, r29, r6 85; CHECK-NEXT: addi r6, r6, 16 86; CHECK-NEXT: ld r0, 120(r1) 87; CHECK-NEXT: std r0, 8(r12) 88; CHECK-NEXT: bdnz .LBB0_5 89; CHECK-NEXT: # %bb.6: # %middle.block 90; CHECK-NEXT: cmpld r5, r4 91; CHECK-NEXT: beq cr0, .LBB0_9 92; CHECK-NEXT: .LBB0_7: # %for.body.preheader18 93; CHECK-NEXT: sldi r6, r5, 2 94; CHECK-NEXT: sub r5, r4, r5 95; CHECK-NEXT: addi r6, r6, -4 96; CHECK-NEXT: add r3, r3, r6 97; CHECK-NEXT: add r4, r29, r6 98; CHECK-NEXT: mtctr r5 99; CHECK-NEXT: .LBB0_8: # %for.body 100; CHECK-NEXT: # 101; CHECK-NEXT: lfsu f0, 4(r4) 102; CHECK-NEXT: lfsu f1, 4(r3) 103; CHECK-NEXT: fdivs f0, f0, f1 104; CHECK-NEXT: stfs f0, 0(r4) 105; CHECK-NEXT: bdnz .LBB0_8 106; CHECK-NEXT: .LBB0_9: # %for.end 107; CHECK-NEXT: ld r30, 192(r1) # 8-byte Folded Reload 108; CHECK-NEXT: ld r29, 184(r1) # 8-byte Folded Reload 109; CHECK-NEXT: addi r1, r1, 208 110; CHECK-NEXT: ld r0, 16(r1) 111; CHECK-NEXT: mtlr r0 112; CHECK-NEXT: blr 113entry: 114 %0 = load float*, float** @a, align 8 115 %call = call signext i32 bitcast (i32 (...)* @c to i32 ()*)() #2 116 %call1 = call float* bitcast (float* (...)* @b to float* ()*)() #2 117 %cmp11 = icmp sgt i32 %call, 0 118 br i1 %cmp11, label %for.body.preheader, label %for.end 119 120for.body.preheader: ; preds = %entry 121 %wide.trip.count = zext i32 %call to i64 122 %min.iters.check = icmp ult i32 %call, 4 123 br i1 %min.iters.check, label %for.body.preheader18, label %vector.memcheck 124 125vector.memcheck: ; preds = %for.body.preheader 126 %scevgep = getelementptr float, float* %0, i64 %wide.trip.count 127 %scevgep15 = getelementptr float, float* %call1, i64 %wide.trip.count 128 %bound0 = icmp ult float* %0, %scevgep15 129 %bound1 = icmp ult float* %call1, %scevgep 130 %found.conflict = and i1 %bound0, %bound1 131 br i1 %found.conflict, label %for.body.preheader18, label %vector.ph 132 133vector.ph: ; preds = %vector.memcheck 134 %n.vec = and i64 %wide.trip.count, 4294967292 135 br label %vector.body 136 137vector.body: ; preds = %vector.body, %vector.ph 138 %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] 139 %1 = getelementptr inbounds float, float* %call1, i64 %index 140 %2 = bitcast float* %1 to <4 x float>* 141 %wide.load = load <4 x float>, <4 x float>* %2, align 4 142 %3 = getelementptr inbounds float, float* %0, i64 %index 143 %4 = bitcast float* %3 to <4 x float>* 144 %wide.load17 = load <4 x float>, <4 x float>* %4, align 4 145 %5 = fdiv reassoc nsz arcp afn <4 x float> %wide.load17, %wide.load 146 %6 = bitcast float* %3 to <4 x float>* 147 store <4 x float> %5, <4 x float>* %6, align 4 148 %index.next = add i64 %index, 4 149 %7 = icmp eq i64 %index.next, %n.vec 150 br i1 %7, label %middle.block, label %vector.body 151 152middle.block: ; preds = %vector.body 153 %cmp.n = icmp eq i64 %n.vec, %wide.trip.count 154 br i1 %cmp.n, label %for.end, label %for.body.preheader18 155 156for.body.preheader18: ; preds = %middle.block, %vector.memcheck, %for.body.preheader 157 %indvars.iv.ph = phi i64 [ 0, %vector.memcheck ], [ 0, %for.body.preheader ], [ %n.vec, %middle.block ] 158 br label %for.body 159 160for.body: ; preds = %for.body.preheader18, %for.body 161 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader18 ] 162 %arrayidx = getelementptr inbounds float, float* %call1, i64 %indvars.iv 163 %8 = load float, float* %arrayidx, align 4 164 %arrayidx3 = getelementptr inbounds float, float* %0, i64 %indvars.iv 165 %9 = load float, float* %arrayidx3, align 4 166 %div = fdiv reassoc nsz arcp afn float %9, %8 167 store float %div, float* %arrayidx3, align 4 168 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 169 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count 170 br i1 %exitcond.not, label %for.end, label %for.body 171 172for.end: ; preds = %for.body, %middle.block, %entry 173 ret void 174} 175 176declare signext i32 @c(...) local_unnamed_addr #1 177 178declare float* @b(...) local_unnamed_addr #1 179 180attributes #0 = { nounwind } 181