1; RUN: llc -march=hexagon < %s 2; REQUIRES: asserts 3 4; Test that the register scavenger does not fail because it can't find 5; a spill slot. This occurs the offset for a spilled object is too large 6; and requires another register to compute the location on the stack. 7 8; Function Attrs: nounwind 9define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i8* nocapture readonly %a3, i8* nocapture readonly %a4, i8* nocapture %a5) #0 { 10b0: 11 %v0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> zeroinitializer) 12 br i1 undef, label %b1, label %b5 13 14b1: ; preds = %b0 15 %v1 = getelementptr inbounds i8, i8* %a3, i32 31 16 br label %b2 17 18b2: ; preds = %b4, %b1 19 %v2 = phi <16 x i32>* [ undef, %b1 ], [ %v102, %b4 ] 20 %v3 = phi i32 [ %a2, %b1 ], [ undef, %b4 ] 21 %v4 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> undef, i32 undef) 22 br label %b3 23 24b3: ; preds = %b3, %b2 25 %v5 = phi <32 x i32> [ %v4, %b2 ], [ %v72, %b3 ] 26 %v6 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v71, %b3 ] 27 %v7 = phi i32 [ -4, %b2 ], [ %v73, %b3 ] 28 %v8 = load <16 x i32>, <16 x i32>* undef, align 64 29 %v9 = mul nsw i32 %v7, 9 30 %v10 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 4) 31 %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 4) 32 %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v10, <16 x i32> undef) 33 %v13 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v11, <16 x i32> undef) 34 %v14 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %v12, <16 x i32> zeroinitializer, i32 0) 35 %v15 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v14, <16 x i32> %v12, <16 x i32> zeroinitializer, i32 1) 36 %v16 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v15, <16 x i32> %v12, <16 x i32> undef, i32 2) 37 %v17 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v16, <16 x i32> %v12, <16 x i32> undef, i32 3) 38 %v18 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v17, <16 x i32> %v12, <16 x i32> %v0, i32 4) 39 %v19 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v18, <16 x i32> %v12, <16 x i32> %v0, i32 5) 40 %v20 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v19, <16 x i32> %v12, <16 x i32> undef, i32 6) 41 %v21 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v20, <16 x i32> %v12, <16 x i32> undef, i32 7) 42 %v22 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> %v13, <16 x i32> %v0, i32 4) 43 %v23 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v22, <16 x i32> %v13, <16 x i32> %v0, i32 5) 44 %v24 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v23, <16 x i32> %v13, <16 x i32> undef, i32 6) 45 %v25 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v24, <16 x i32> %v13, <16 x i32> undef, i32 7) 46 %v26 = add nsw i32 %v9, 36 47 %v27 = getelementptr inbounds i8, i8* %a3, i32 %v26 48 %v28 = load i8, i8* %v27, align 1 49 %v29 = zext i8 %v28 to i32 50 %v30 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v29) 51 %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v21, i32 %v30) 52 %v32 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v25, i32 %v30) 53 %v33 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v31) 54 %v34 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v33) 55 %v35 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v32) 56 %v36 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v35, <16 x i32> undef) 57 %v37 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v36, <16 x i32> %v34) 58 %v38 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v6, <32 x i32> %v37, i32 16843009) 59 %v39 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v10, <16 x i32> %v34) 60 %v40 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v11, <16 x i32> %v36) 61 %v41 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v39) 62 %v42 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v40) 63 %v43 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %v41, <16 x i32> %v42) 64 %v44 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v5, <32 x i32> %v43) 65 %v45 = add nsw i32 %v9, 37 66 %v46 = getelementptr inbounds i8, i8* %a3, i32 %v45 67 %v47 = load i8, i8* %v46, align 1 68 %v48 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v38, <32 x i32> undef, i32 16843009) 69 %v49 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v44, <32 x i32> undef) 70 %v50 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 2) 71 %v51 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v48, <32 x i32> undef, i32 16843009) 72 %v52 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v50, <16 x i32> undef) 73 %v53 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> undef, <16 x i32> zeroinitializer) 74 %v54 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v52) 75 %v55 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v53) 76 %v56 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %v54, <16 x i32> %v55) 77 %v57 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v49, <32 x i32> %v56) 78 %v58 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 1) 79 %v59 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v58, <16 x i32> undef) 80 %v60 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> %v59, <16 x i32> undef, i32 7) 81 %v61 = load i8, i8* undef, align 1 82 %v62 = zext i8 %v61 to i32 83 %v63 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v62) 84 %v64 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v63) 85 %v65 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v60, i32 %v63) 86 %v66 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v64) 87 %v67 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v66) 88 %v68 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v65) 89 %v69 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v68) 90 %v70 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v69, <16 x i32> %v67) 91 %v71 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v51, <32 x i32> %v70, i32 16843009) 92 %v72 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v57, <32 x i32> undef) 93 %v73 = add nsw i32 %v7, 1 94 %v74 = icmp eq i32 %v73, 5 95 br i1 %v74, label %b4, label %b3 96 97b4: ; preds = %b3 98 %v75 = phi <32 x i32> [ %v72, %b3 ] 99 %v76 = phi <32 x i32> [ %v71, %b3 ] 100 %v77 = load i8, i8* %v1, align 1 101 %v78 = zext i8 %v77 to i32 102 %v79 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v78) 103 %v80 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v79) 104 %v81 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> undef) 105 %v82 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v80) 106 %v83 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v82) 107 %v84 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v83, <16 x i32> %v81) 108 %v85 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v84, i32 16843009) 109 %v86 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v85) 110 %v87 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %v86, i32 8388736) 111 %v88 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v87) 112 %v89 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v88, i32 1) 113 %v90 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v89, i32 1) 114 %v91 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v90, i32 1) 115 %v92 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v91, i32 1) 116 %v93 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v92, i32 1) 117 %v94 = tail call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> undef, <16 x i32> undef, <16 x i32> %v93) 118 %v95 = tail call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> undef, <16 x i32> %v94, <16 x i32> undef) 119 %v96 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> undef, i32 1) 120 %v97 = tail call <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %v96, <16 x i32> %v95) 121 %v98 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> %v97, <16 x i32> undef, <16 x i32> undef) 122 %v99 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> undef, <16 x i32> undef, <16 x i32> undef) 123 %v100 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v99, <16 x i32> %v98) 124 %v101 = tail call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %v100, <16 x i32> undef) 125 %v102 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1 126 store <16 x i32> %v101, <16 x i32>* %v2, align 64 127 %v103 = icmp sgt i32 %v3, 64 128 br i1 %v103, label %b2, label %b5 129 130b5: ; preds = %b4, %b0 131 ret void 132} 133 134; Function Attrs: nounwind readnone 135declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #1 136 137; Function Attrs: nounwind readnone 138declare <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32>, i32) #1 139 140; Function Attrs: nounwind readnone 141declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1 142 143; Function Attrs: nounwind readnone 144declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1 145 146; Function Attrs: nounwind readnone 147declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1 148 149; Function Attrs: nounwind readnone 150declare <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32>, <16 x i32>, i32) #1 151 152; Function Attrs: nounwind readnone 153declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1 154 155; Function Attrs: nounwind readnone 156declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1 157 158; Function Attrs: nounwind readnone 159declare <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32>, <16 x i32>, i32) #1 160 161; Function Attrs: nounwind readnone 162declare <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32>, <16 x i32>, <16 x i32>, i32) #1 163 164; Function Attrs: nounwind readnone 165declare i32 @llvm.hexagon.S2.vsplatrb(i32) #1 166 167; Function Attrs: nounwind readnone 168declare <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32>, i32) #1 169 170; Function Attrs: nounwind readnone 171declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #1 172 173; Function Attrs: nounwind readnone 174declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1 175 176; Function Attrs: nounwind readnone 177declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #1 178 179; Function Attrs: nounwind readnone 180declare <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32>, <32 x i32>) #1 181 182; Function Attrs: nounwind readnone 183declare <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32>, <16 x i32>) #1 184 185; Function Attrs: nounwind readnone 186declare <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32>, <16 x i32>) #1 187 188; Function Attrs: nounwind readnone 189declare <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1 190 191; Function Attrs: nounwind readnone 192declare <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1 193 194; Function Attrs: nounwind readnone 195declare <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32>, i32) #1 196 197; Function Attrs: nounwind readnone 198declare <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32>, <16 x i32>) #1 199 200; Function Attrs: nounwind readnone 201declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #1 202 203attributes #0 = { nounwind "target-cpu"="hexagonv62" "target-features"="+hvxv62,+hvx-length64b" } 204attributes #1 = { nounwind readnone } 205