1; REQUIRES: asserts 2; RUN: llc -march=hexagon -stats -o /dev/null < %s 3 4%s.0 = type { %s.1*, %s.2*, %s.17*, i32, i32, i32, i32, i8*, i8*, i8* } 5%s.1 = type opaque 6%s.2 = type { %s.3, %s.4*, i8* } 7%s.3 = type { i32, i32 } 8%s.4 = type { %s.4*, %s.4*, %s.4*, %s.4*, i32, i32, i32, %s.3*, i32, [1 x %s.5]*, [1 x %s.5]*, i8, i8, i8, i8*, i32, %s.4*, %s.8*, i8, i8, i8, i32*, i32, i32*, i32, i8*, i8, %s.9, [32 x i8**], [7 x i8*], i32, i8*, i32, %s.4*, i32, i32, %s.11, %s.13, i8, i8, i8, %s.14*, %s.15*, %s.15*, i32, [12 x i8] } 9%s.5 = type { [1 x %s.6], i32, %s.7, [4 x i8] } 10%s.6 = type { [16 x i32] } 11%s.7 = type { [2 x i32] } 12%s.8 = type { void (i8*)*, i8*, i32, %s.8* } 13%s.9 = type { i8* (i8*)*, i8*, %s.7, i32, %s.10 } 14%s.10 = type { i32 } 15%s.11 = type { %s.12, i8, i8* } 16%s.12 = type { [2 x i32] } 17%s.13 = type { i32, i32 } 18%s.14 = type { i8*, i32 (i8*, %s.4*)* } 19%s.15 = type { %s.15*, %s.16*, i32 } 20%s.16 = type { %s.3, i32, %s.4*, %s.4*, %s.4*, i32, i32 } 21%s.17 = type { i32, void (i8*)* } 22%s.18 = type { %s.0*, i8* } 23 24; Function Attrs: nounwind 25define zeroext i8 @f0(%s.0* %a0, i32 %a1, %s.18* %a2) #0 { 26b0: 27 %v0 = alloca i8, align 1 28 %v1 = alloca %s.0*, align 4 29 %v2 = alloca i32, align 4 30 %v3 = alloca %s.18*, align 4 31 %v4 = alloca i32, align 4 32 %v5 = alloca i32, align 4 33 %v6 = alloca i8* 34 %v7 = alloca i32, align 4 35 %v8 = alloca i32 36 %v9 = alloca %s.4, align 32 37 store %s.0* %a0, %s.0** %v1, align 4 38 store i32 %a1, i32* %v2, align 4 39 store %s.18* %a2, %s.18** %v3, align 4 40 %v10 = load %s.0*, %s.0** %v1, align 4 41 %v11 = getelementptr inbounds %s.0, %s.0* %v10, i32 0, i32 3 42 %v12 = load i32, i32* %v11, align 4 43 store i32 %v12, i32* %v4, align 4 44 %v13 = load %s.0*, %s.0** %v1, align 4 45 %v14 = getelementptr inbounds %s.0, %s.0* %v13, i32 0, i32 6 46 %v15 = load i32, i32* %v14, align 4 47 store i32 %v15, i32* %v5, align 4 48 %v16 = load i32, i32* %v4, align 4 49 %v17 = call i8* @llvm.stacksave() 50 store i8* %v17, i8** %v6 51 %v18 = alloca %s.2, i32 %v16, align 8 52 %v19 = load %s.0*, %s.0** %v1, align 4 53 %v20 = call i32 @f1(%s.0* %v19) 54 %v21 = icmp ne i32 %v20, 0 55 br i1 %v21, label %b2, label %b1 56 57b1: ; preds = %b0 58 store i8 8, i8* %v0 59 store i32 1, i32* %v8 60 br label %b23 61 62b2: ; preds = %b0 63 %v22 = load %s.0*, %s.0** %v1, align 4 64 %v23 = getelementptr inbounds %s.0, %s.0* %v22, i32 0, i32 0 65 %v24 = load %s.1*, %s.1** %v23, align 4 66 %v25 = load %s.0*, %s.0** %v1, align 4 67 %v26 = getelementptr inbounds %s.0, %s.0* %v25, i32 0, i32 1 68 %v27 = load %s.2*, %s.2** %v26, align 4 69 %v28 = bitcast %s.2* %v27 to i8* 70 %v29 = bitcast %s.2* %v18 to i8* 71 %v30 = load i32, i32* %v4, align 4 72 %v31 = mul i32 16, %v30 73 %v32 = call zeroext i8 @f2(%s.1* %v24, i8* %v28, i8* %v29, i32 %v31) 74 %v33 = zext i8 %v32 to i32 75 %v34 = icmp ne i32 %v33, 0 76 br i1 %v34, label %b3, label %b4 77 78b3: ; preds = %b2 79 store i8 1, i8* %v0 80 store i32 1, i32* %v8 81 br label %b23 82 83b4: ; preds = %b2 84 store i32 0, i32* %v7, align 4 85 br label %b5 86 87b5: ; preds = %b21, %b4 88 %v35 = load i32, i32* %v7, align 4 89 %v36 = load i32, i32* %v4, align 4 90 %v37 = icmp ult i32 %v35, %v36 91 br i1 %v37, label %b6, label %b7 92 93b6: ; preds = %b5 94 br label %b7 95 96b7: ; preds = %b6, %b5 97 %v38 = phi i1 [ false, %b5 ], [ true, %b6 ] 98 br i1 %v38, label %b8, label %b22 99 100b8: ; preds = %b7 101 %v39 = load i32, i32* %v7, align 4 102 %v40 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v39 103 %v41 = getelementptr inbounds %s.2, %s.2* %v40, i32 0, i32 1 104 %v42 = load %s.4*, %s.4** %v41, align 4 105 %v43 = icmp ne %s.4* %v42, null 106 br i1 %v43, label %b9, label %b17 107 108b9: ; preds = %b8 109 %v44 = load %s.0*, %s.0** %v1, align 4 110 %v45 = getelementptr inbounds %s.0, %s.0* %v44, i32 0, i32 0 111 %v46 = load %s.1*, %s.1** %v45, align 4 112 %v47 = load i32, i32* %v7, align 4 113 %v48 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v47 114 %v49 = getelementptr inbounds %s.2, %s.2* %v48, i32 0, i32 1 115 %v50 = load %s.4*, %s.4** %v49, align 4 116 %v51 = bitcast %s.4* %v50 to i8* 117 %v52 = bitcast %s.4* %v9 to i8* 118 %v53 = load i32, i32* %v5, align 4 119 %v54 = call zeroext i8 @f2(%s.1* %v46, i8* %v51, i8* %v52, i32 %v53) 120 %v55 = zext i8 %v54 to i32 121 %v56 = icmp ne i32 %v55, 0 122 br i1 %v56, label %b10, label %b11 123 124b10: ; preds = %b9 125 store i8 1, i8* %v0 126 store i32 1, i32* %v8 127 br label %b23 128 129b11: ; preds = %b9 130 %v57 = getelementptr inbounds %s.4, %s.4* %v9, i32 0, i32 5 131 %v58 = load i32, i32* %v57, align 4 132 %v59 = icmp ne i32 %v58, 0 133 br i1 %v59, label %b12, label %b13 134 135b12: ; preds = %b11 136 br label %b14 137 138b13: ; preds = %b11 139 %v60 = load %s.0*, %s.0** %v1, align 4 140 %v61 = getelementptr inbounds %s.0, %s.0* %v60, i32 0, i32 0 141 %v62 = load %s.1*, %s.1** %v61, align 4 142 %v63 = call i32 @f3(%s.1* %v62) 143 br label %b14 144 145b14: ; preds = %b13, %b12 146 %v64 = phi i32 [ %v58, %b12 ], [ %v63, %b13 ] 147 %v65 = load i32, i32* %v2, align 4 148 %v66 = icmp eq i32 %v64, %v65 149 br i1 %v66, label %b15, label %b16 150 151b15: ; preds = %b14 152 %v67 = load %s.0*, %s.0** %v1, align 4 153 %v68 = load %s.18*, %s.18** %v3, align 4 154 %v69 = getelementptr inbounds %s.18, %s.18* %v68, i32 0, i32 0 155 store %s.0* %v67, %s.0** %v69, align 4 156 %v70 = load i32, i32* %v7, align 4 157 %v71 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v70 158 %v72 = getelementptr inbounds %s.2, %s.2* %v71, i32 0, i32 1 159 %v73 = load %s.4*, %s.4** %v72, align 4 160 %v74 = bitcast %s.4* %v73 to i8* 161 %v75 = load %s.18*, %s.18** %v3, align 4 162 %v76 = getelementptr inbounds %s.18, %s.18* %v75, i32 0, i32 1 163 store i8* %v74, i8** %v76, align 4 164 store i8 0, i8* %v0 165 store i32 1, i32* %v8 166 br label %b23 167 168b16: ; preds = %b14 169 br label %b20 170 171b17: ; preds = %b8 172 %v77 = load i32, i32* %v7, align 4 173 %v78 = icmp eq i32 %v77, 0 174 br i1 %v78, label %b18, label %b19 175 176b18: ; preds = %b17 177 %v79 = load %s.0*, %s.0** %v1, align 4 178 %v80 = load %s.18*, %s.18** %v3, align 4 179 %v81 = getelementptr inbounds %s.18, %s.18* %v80, i32 0, i32 0 180 store %s.0* %v79, %s.0** %v81, align 4 181 %v82 = load %s.18*, %s.18** %v3, align 4 182 %v83 = getelementptr inbounds %s.18, %s.18* %v82, i32 0, i32 1 183 store i8* null, i8** %v83, align 4 184 store i8 0, i8* %v0 185 store i32 1, i32* %v8 186 br label %b23 187 188b19: ; preds = %b17 189 br label %b20 190 191b20: ; preds = %b19, %b16 192 br label %b21 193 194b21: ; preds = %b20 195 %v84 = load i32, i32* %v7, align 4 196 %v85 = add i32 %v84, 1 197 store i32 %v85, i32* %v7, align 4 198 br label %b5 199 200b22: ; preds = %b7 201 store i8 4, i8* %v0 202 store i32 1, i32* %v8 203 br label %b23 204 205b23: ; preds = %b22, %b18, %b15, %b10, %b3, %b1 206 %v86 = load i8*, i8** %v6 207 call void @llvm.stackrestore(i8* %v86) 208 %v87 = load i8, i8* %v0 209 ret i8 %v87 210} 211 212; Function Attrs: nounwind 213declare i8* @llvm.stacksave() #0 214 215; Function Attrs: inlinehint nounwind 216declare i32 @f1(%s.0*) #1 217 218declare zeroext i8 @f2(%s.1*, i8*, i8*, i32) #0 219 220declare i32 @f3(%s.1*) #0 221 222; Function Attrs: nounwind 223declare void @llvm.stackrestore(i8*) #0 224 225attributes #0 = { nounwind } 226attributes #1 = { inlinehint nounwind } 227