1; RUN: llc -O3 -march=hexagon < %s 2; REQUIRES: asserts 3 4target triple = "hexagon-unknown--elf" 5 6@g0 = external global void (float*, i32, i32, float*, float*)** 7 8; Function Attrs: nounwind 9define void @f0(float* nocapture %a0, float* nocapture %a1, float* %a2) #0 { 10b0: 11 %v0 = alloca [64 x float], align 16 12 %v1 = alloca [8 x float], align 8 13 %v2 = bitcast [64 x float]* %v0 to i8* 14 call void @llvm.lifetime.start.p0i8(i64 256, i8* %v2) #2 15 %v3 = load float, float* %a0, align 4, !tbaa !0 16 %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 35 17 store float %v3, float* %v4, align 4, !tbaa !0 18 %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0 19 store float %v3, float* %v5, align 16, !tbaa !0 20 %v6 = getelementptr inbounds float, float* %a0, i32 1 21 %v7 = load float, float* %v6, align 4, !tbaa !0 22 %v8 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 36 23 store float %v7, float* %v8, align 16, !tbaa !0 24 %v9 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 1 25 store float %v7, float* %v9, align 4, !tbaa !0 26 %v10 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 37 27 store float 1.000000e+00, float* %v10, align 4, !tbaa !0 28 %v11 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 2 29 store float 1.000000e+00, float* %v11, align 8, !tbaa !0 30 %v12 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 34 31 store float 0.000000e+00, float* %v12, align 8, !tbaa !0 32 %v13 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 33 33 store float 0.000000e+00, float* %v13, align 4, !tbaa !0 34 %v14 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 32 35 store float 0.000000e+00, float* %v14, align 16, !tbaa !0 36 %v15 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 5 37 store float 0.000000e+00, float* %v15, align 4, !tbaa !0 38 %v16 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 4 39 store float 0.000000e+00, float* %v16, align 16, !tbaa !0 40 %v17 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 3 41 store float 0.000000e+00, float* %v17, align 4, !tbaa !0 42 %v18 = load float, float* %a1, align 4, !tbaa !0 43 %v19 = fmul float %v3, %v18 44 %v20 = fsub float -0.000000e+00, %v19 45 %v21 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 6 46 store float %v20, float* %v21, align 8, !tbaa !0 47 %v22 = fmul float %v7, %v18 48 %v23 = fsub float -0.000000e+00, %v22 49 %v24 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 7 50 store float %v23, float* %v24, align 4, !tbaa !0 51 %v25 = getelementptr inbounds float, float* %a1, i32 1 52 %v26 = load float, float* %v25, align 4, !tbaa !0 53 %v27 = fmul float %v3, %v26 54 %v28 = fsub float -0.000000e+00, %v27 55 %v29 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 38 56 store float %v28, float* %v29, align 8, !tbaa !0 57 %v30 = fmul float %v7, %v26 58 %v31 = fsub float -0.000000e+00, %v30 59 %v32 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 39 60 store float %v31, float* %v32, align 4, !tbaa !0 61 %v33 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 0 62 store float %v18, float* %v33, align 8, !tbaa !0 63 %v34 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 4 64 store float %v26, float* %v34, align 8, !tbaa !0 65 %v35 = getelementptr float, float* %a0, i32 2 66 %v36 = getelementptr float, float* %a1, i32 2 67 %v37 = load float, float* %v35, align 4, !tbaa !0 68 %v38 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 43 69 store float %v37, float* %v38, align 4, !tbaa !0 70 %v39 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 8 71 store float %v37, float* %v39, align 16, !tbaa !0 72 %v40 = getelementptr inbounds float, float* %a0, i32 3 73 %v41 = load float, float* %v40, align 4, !tbaa !0 74 %v42 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 44 75 store float %v41, float* %v42, align 16, !tbaa !0 76 %v43 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 9 77 store float %v41, float* %v43, align 4, !tbaa !0 78 %v44 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 45 79 store float 1.000000e+00, float* %v44, align 4, !tbaa !0 80 %v45 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 10 81 store float 1.000000e+00, float* %v45, align 8, !tbaa !0 82 %v46 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 42 83 store float 0.000000e+00, float* %v46, align 8, !tbaa !0 84 %v47 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 41 85 store float 0.000000e+00, float* %v47, align 4, !tbaa !0 86 %v48 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 40 87 store float 0.000000e+00, float* %v48, align 16, !tbaa !0 88 %v49 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 13 89 store float 0.000000e+00, float* %v49, align 4, !tbaa !0 90 %v50 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 12 91 store float 0.000000e+00, float* %v50, align 16, !tbaa !0 92 %v51 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 11 93 store float 0.000000e+00, float* %v51, align 4, !tbaa !0 94 %v52 = load float, float* %v36, align 4, !tbaa !0 95 %v53 = fmul float %v37, %v52 96 %v54 = fsub float -0.000000e+00, %v53 97 %v55 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 14 98 store float %v54, float* %v55, align 8, !tbaa !0 99 %v56 = fmul float %v41, %v52 100 %v57 = fsub float -0.000000e+00, %v56 101 %v58 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 15 102 store float %v57, float* %v58, align 4, !tbaa !0 103 %v59 = getelementptr inbounds float, float* %a1, i32 3 104 %v60 = load float, float* %v59, align 4, !tbaa !0 105 %v61 = fmul float %v37, %v60 106 %v62 = fsub float -0.000000e+00, %v61 107 %v63 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 46 108 store float %v62, float* %v63, align 8, !tbaa !0 109 %v64 = fmul float %v41, %v60 110 %v65 = fsub float -0.000000e+00, %v64 111 %v66 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 47 112 store float %v65, float* %v66, align 4, !tbaa !0 113 %v67 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 1 114 store float %v52, float* %v67, align 4, !tbaa !0 115 %v68 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 5 116 store float %v60, float* %v68, align 4, !tbaa !0 117 %v69 = getelementptr float, float* %a0, i32 4 118 %v70 = getelementptr float, float* %a1, i32 4 119 %v71 = load float, float* %v69, align 4, !tbaa !0 120 %v72 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 51 121 store float %v71, float* %v72, align 4, !tbaa !0 122 %v73 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 16 123 store float %v71, float* %v73, align 16, !tbaa !0 124 %v74 = getelementptr inbounds float, float* %a0, i32 5 125 %v75 = load float, float* %v74, align 4, !tbaa !0 126 %v76 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 52 127 store float %v75, float* %v76, align 16, !tbaa !0 128 %v77 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 17 129 store float %v75, float* %v77, align 4, !tbaa !0 130 %v78 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 53 131 store float 1.000000e+00, float* %v78, align 4, !tbaa !0 132 %v79 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 18 133 store float 1.000000e+00, float* %v79, align 8, !tbaa !0 134 %v80 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 50 135 store float 0.000000e+00, float* %v80, align 8, !tbaa !0 136 %v81 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 49 137 store float 0.000000e+00, float* %v81, align 4, !tbaa !0 138 %v82 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 48 139 store float 0.000000e+00, float* %v82, align 16, !tbaa !0 140 %v83 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 21 141 store float 0.000000e+00, float* %v83, align 4, !tbaa !0 142 %v84 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 20 143 store float 0.000000e+00, float* %v84, align 16, !tbaa !0 144 %v85 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 19 145 store float 0.000000e+00, float* %v85, align 4, !tbaa !0 146 %v86 = load float, float* %v70, align 4, !tbaa !0 147 %v87 = fmul float %v71, %v86 148 %v88 = fsub float -0.000000e+00, %v87 149 %v89 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 22 150 store float %v88, float* %v89, align 8, !tbaa !0 151 %v90 = fmul float %v75, %v86 152 %v91 = fsub float -0.000000e+00, %v90 153 %v92 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 23 154 store float %v91, float* %v92, align 4, !tbaa !0 155 %v93 = getelementptr inbounds float, float* %a1, i32 5 156 %v94 = load float, float* %v93, align 4, !tbaa !0 157 %v95 = fmul float %v71, %v94 158 %v96 = fsub float -0.000000e+00, %v95 159 %v97 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 54 160 store float %v96, float* %v97, align 8, !tbaa !0 161 %v98 = fmul float %v75, %v94 162 %v99 = fsub float -0.000000e+00, %v98 163 %v100 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 55 164 store float %v99, float* %v100, align 4, !tbaa !0 165 %v101 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 2 166 store float %v86, float* %v101, align 8, !tbaa !0 167 %v102 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 6 168 store float %v94, float* %v102, align 8, !tbaa !0 169 %v103 = getelementptr float, float* %a0, i32 6 170 %v104 = getelementptr float, float* %a1, i32 6 171 %v105 = load float, float* %v103, align 4, !tbaa !0 172 %v106 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 59 173 store float %v105, float* %v106, align 4, !tbaa !0 174 %v107 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 24 175 store float %v105, float* %v107, align 16, !tbaa !0 176 %v108 = getelementptr inbounds float, float* %a0, i32 7 177 %v109 = load float, float* %v108, align 4, !tbaa !0 178 %v110 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 60 179 store float %v109, float* %v110, align 16, !tbaa !0 180 %v111 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 25 181 store float %v109, float* %v111, align 4, !tbaa !0 182 %v112 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 61 183 store float 1.000000e+00, float* %v112, align 4, !tbaa !0 184 %v113 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 26 185 store float 1.000000e+00, float* %v113, align 8, !tbaa !0 186 %v114 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 58 187 store float 0.000000e+00, float* %v114, align 8, !tbaa !0 188 %v115 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 57 189 store float 0.000000e+00, float* %v115, align 4, !tbaa !0 190 %v116 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 56 191 store float 0.000000e+00, float* %v116, align 16, !tbaa !0 192 %v117 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 29 193 store float 0.000000e+00, float* %v117, align 4, !tbaa !0 194 %v118 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 28 195 store float 0.000000e+00, float* %v118, align 16, !tbaa !0 196 %v119 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 27 197 store float 0.000000e+00, float* %v119, align 4, !tbaa !0 198 %v120 = load float, float* %v104, align 4, !tbaa !0 199 %v121 = fmul float %v105, %v120 200 %v122 = fsub float -0.000000e+00, %v121 201 %v123 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 30 202 store float %v122, float* %v123, align 8, !tbaa !0 203 %v124 = fmul float %v109, %v120 204 %v125 = fsub float -0.000000e+00, %v124 205 %v126 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 31 206 store float %v125, float* %v126, align 4, !tbaa !0 207 %v127 = getelementptr inbounds float, float* %a1, i32 7 208 %v128 = load float, float* %v127, align 4, !tbaa !0 209 %v129 = fmul float %v105, %v128 210 %v130 = fsub float -0.000000e+00, %v129 211 %v131 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 62 212 store float %v130, float* %v131, align 8, !tbaa !0 213 %v132 = fmul float %v109, %v128 214 %v133 = fsub float -0.000000e+00, %v132 215 %v134 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 63 216 store float %v133, float* %v134, align 4, !tbaa !0 217 %v135 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 3 218 store float %v120, float* %v135, align 4, !tbaa !0 219 %v136 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 7 220 store float %v128, float* %v136, align 4, !tbaa !0 221 %v137 = load void (float*, i32, i32, float*, float*)**, void (float*, i32, i32, float*, float*)*** @g0, align 4, !tbaa !4 222 %v138 = load void (float*, i32, i32, float*, float*)*, void (float*, i32, i32, float*, float*)** %v137, align 4, !tbaa !4 223 call void %v138(float* %v5, i32 8, i32 8, float* %v33, float* %a2) #2 224 %v139 = getelementptr inbounds float, float* %a2, i32 8 225 store float 1.000000e+00, float* %v139, align 4, !tbaa !0 226 call void @llvm.lifetime.end.p0i8(i64 256, i8* %v2) #2 227 ret void 228} 229 230; Function Attrs: argmemonly nounwind 231declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 232 233; Function Attrs: argmemonly nounwind 234declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 235 236attributes #0 = { nounwind "target-cpu"="hexagonv55" } 237attributes #1 = { argmemonly nounwind } 238attributes #2 = { nounwind } 239 240!0 = !{!1, !1, i64 0} 241!1 = !{!"float", !2} 242!2 = !{!"omnipotent char", !3} 243!3 = !{!"Simple C/C++ TBAA"} 244!4 = !{!5, !5, i64 0} 245!5 = !{!"any pointer", !2} 246