1; RUN: llvm-dis < %s.bc| FileCheck %s 2; RUN: verify-uselistorder < %s.bc 3 4; memOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2. 5; The test checks that LLVM does not misread memory related instructions of 6; older bitcode files. 7 8define void @alloca(){ 9entry: 10; CHECK: %res1 = alloca i8 11 %res1 = alloca i8 12 13; CHECK-NEXT: %res2 = alloca i8, i32 2 14 %res2 = alloca i8, i32 2 15 16; CHECK-NEXT: %res3 = alloca i8, i32 2, align 4 17 %res3 = alloca i8, i32 2, align 4 18 19; CHECK-NEXT: %res4 = alloca i8, align 4 20 %res4 = alloca i8, align 4 21 22 ret void 23} 24 25define void @load(){ 26entry: 27 %ptr1 = alloca i8 28 store i8 2, i8* %ptr1 29 30; CHECK: %res1 = load i8, i8* %ptr1 31 %res1 = load i8, i8* %ptr1 32 33; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1 34 %res2 = load volatile i8, i8* %ptr1 35 36; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1 37 %res3 = load i8, i8* %ptr1, align 1 38 39; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1 40 %res4 = load volatile i8, i8* %ptr1, align 1 41 42; CHECK-NEXT: %res5 = load i8, i8* %ptr1, align 1, !nontemporal !0 43 %res5 = load i8, i8* %ptr1, !nontemporal !0 44 45; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 46 %res6 = load volatile i8, i8* %ptr1, !nontemporal !0 47 48; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 49 %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 50 51; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 52 %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 53 54; CHECK-NEXT: %res9 = load i8, i8* %ptr1, align 1, !invariant.load !1 55 %res9 = load i8, i8* %ptr1, !invariant.load !1 56 57; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 58 %res10 = load volatile i8, i8* %ptr1, !invariant.load !1 59 60; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 61 %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 62 63; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 64 %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 65 66; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} 67 %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 68 69; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} 70 %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 71 72; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} 73 %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 74 75; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} 76 %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 77 78 ret void 79} 80 81define void @loadAtomic(){ 82entry: 83 %ptr1 = alloca i8 84 store i8 2, i8* %ptr1 85 86; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1 87 %res1 = load atomic i8, i8* %ptr1 unordered, align 1 88 89; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 90 %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 91 92; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1 93 %res3 = load atomic i8, i8* %ptr1 acquire, align 1 94 95; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 96 %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 97 98; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 99 %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 100 101; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 102 %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 103 104; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 105 %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 106 107; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 108 %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 109 110; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1 111 %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1 112 113; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1 114 %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1 115 116; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1 117 %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1 118 119; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 120 %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 121 122; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1 123 %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1 124 125; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1 126 %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1 127 128; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1 129 %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1 130 131; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 132 %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 133 134 ret void 135} 136 137define void @store(){ 138entry: 139 %ptr1 = alloca i8 140 141; CHECK: store i8 2, i8* %ptr1 142 store i8 2, i8* %ptr1 143 144; CHECK-NEXT: store volatile i8 2, i8* %ptr1 145 store volatile i8 2, i8* %ptr1 146 147; CHECK-NEXT: store i8 2, i8* %ptr1, align 1 148 store i8 2, i8* %ptr1, align 1 149 150; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1 151 store volatile i8 2, i8* %ptr1, align 1 152 153; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0 154 store i8 2, i8* %ptr1, !nontemporal !0 155 156; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0 157 store volatile i8 2, i8* %ptr1, !nontemporal !0 158 159; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0 160 store i8 2, i8* %ptr1, align 1, !nontemporal !0 161 162; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0 163 store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0 164 165 ret void 166} 167 168define void @storeAtomic(){ 169entry: 170 %ptr1 = alloca i8 171 172; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1 173 store atomic i8 2, i8* %ptr1 unordered, align 1 174 175; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1 176 store atomic i8 2, i8* %ptr1 monotonic, align 1 177 178; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1 179 store atomic i8 2, i8* %ptr1 release, align 1 180 181; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1 182 store atomic i8 2, i8* %ptr1 seq_cst, align 1 183 184; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1 185 store atomic volatile i8 2, i8* %ptr1 unordered, align 1 186 187; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1 188 store atomic volatile i8 2, i8* %ptr1 monotonic, align 1 189 190; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1 191 store atomic volatile i8 2, i8* %ptr1 release, align 1 192 193; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1 194 store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1 195 196; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1 197 store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1 198 199; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1 200 store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1 201 202; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1 203 store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1 204 205; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 206 store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 207 208; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1 209 store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1 210 211; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1 212 store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1 213 214; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1 215 store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1 216 217; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 218 store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1 219 220 ret void 221} 222 223define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){ 224entry: 225 ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering> 226 227; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 228; CHECK-NEXT: %res1 = extractvalue { i32, i1 } [[TMP]], 0 229 %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 230 231; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 232; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0 233 %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic 234 235; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic 236; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0 237 %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic 238 239; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic 240; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0 241 %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic 242 243 244; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire 245; CHECK-NEXT: %res5 = extractvalue { i32, i1 } [[TMP]], 0 246 %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire 247 248; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire 249; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0 250 %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire 251 252; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire 253; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0 254 %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire 255 256; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire 257; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0 258 %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire 259 260 261; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic 262; CHECK-NEXT: %res9 = extractvalue { i32, i1 } [[TMP]], 0 263 %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic 264 265; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic 266; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0 267 %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic 268 269; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic 270; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0 271 %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic 272 273; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic 274; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0 275 %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic 276 277 278; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 279; CHECK-NEXT: %res13 = extractvalue { i32, i1 } [[TMP]], 0 280 %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 281 282; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 283; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0 284 %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire 285 286; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 287; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0 288 %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 289 290; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 291; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0 292 %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire 293 294 295; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst 296; CHECK-NEXT: %res17 = extractvalue { i32, i1 } [[TMP]], 0 297 %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst 298 299; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst 300; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0 301 %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst 302 303; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst 304; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0 305 %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst 306 307; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst 308; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0 309 %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst 310 311 ret void 312} 313 314define void @getelementptr({i8, i8}, {i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){ 315entry: 316; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1 317 %res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1 318 319; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1 320 %res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1 321 322; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets 323 %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets 324 325 ret void 326} 327 328!0 = metadata !{ i32 1 } 329!1 = metadata !{} 330