1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -dse -S | FileCheck %s 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4 5%struct.vec2 = type { <4 x i32>, <4 x i32> } 6%struct.vec2plusi = type { <4 x i32>, <4 x i32>, i32 } 7 8@glob1 = global %struct.vec2 zeroinitializer, align 16 9@glob2 = global %struct.vec2plusi zeroinitializer, align 16 10 11define void @write24to28(i32* nocapture %p) nounwind uwtable ssp { 12; CHECK-LABEL: @write24to28( 13; CHECK-NEXT: entry: 14; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 15; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8* 16; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i1 false) 17; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 18; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 19; CHECK-NEXT: ret void 20; 21entry: 22 %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1 23 %p3 = bitcast i32* %arrayidx0 to i8* 24 call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i1 false) 25 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 26 store i32 1, i32* %arrayidx1, align 4 27 ret void 28} 29 30define void @write24to28_atomic(i32* nocapture %p) nounwind uwtable ssp { 31; CHECK-LABEL: @write24to28_atomic( 32; CHECK-NEXT: entry: 33; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 34; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8* 35; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i32 4) 36; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 37; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 38; CHECK-NEXT: ret void 39; 40entry: 41 %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1 42 %p3 = bitcast i32* %arrayidx0 to i8* 43 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 44 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 45 store atomic i32 1, i32* %arrayidx1 unordered, align 4 46 ret void 47} 48 49; Atomicity of the store is weaker from the memset 50define void @write24to28_atomic_weaker(i32* nocapture %p) nounwind uwtable ssp { 51; CHECK-LABEL: @write24to28_atomic_weaker( 52; CHECK-NEXT: entry: 53; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1 54; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[ARRAYIDX0]] to i8* 55; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 24, i32 4) 56; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 57; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 58; CHECK-NEXT: ret void 59; 60entry: 61 %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1 62 %p3 = bitcast i32* %arrayidx0 to i8* 63 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 28, i32 4) 64 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 65 store i32 1, i32* %arrayidx1, align 4 66 ret void 67} 68 69define void @write28to32(i32* nocapture %p) nounwind uwtable ssp { 70; CHECK-LABEL: @write28to32( 71; CHECK-NEXT: entry: 72; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8* 73; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i1 false) 74; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 75; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 76; CHECK-NEXT: ret void 77; 78entry: 79 %p3 = bitcast i32* %p to i8* 80 call void @llvm.memset.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i1 false) 81 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 82 store i32 1, i32* %arrayidx1, align 4 83 ret void 84} 85 86define void @write28to32_atomic(i32* nocapture %p) nounwind uwtable ssp { 87; CHECK-LABEL: @write28to32_atomic( 88; CHECK-NEXT: entry: 89; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8* 90; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 [[P3]], i8 0, i64 28, i32 4) 91; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 92; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 93; CHECK-NEXT: ret void 94; 95entry: 96 %p3 = bitcast i32* %p to i8* 97 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %p3, i8 0, i64 32, i32 4) 98 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 99 store atomic i32 1, i32* %arrayidx1 unordered, align 4 100 ret void 101} 102 103define void @dontwrite28to32memset(i32* nocapture %p) nounwind uwtable ssp { 104; CHECK-LABEL: @dontwrite28to32memset( 105; CHECK-NEXT: entry: 106; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8* 107; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[P3]], i8 0, i64 32, i1 false) 108; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 109; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 110; CHECK-NEXT: ret void 111; 112entry: 113 %p3 = bitcast i32* %p to i8* 114 call void @llvm.memset.p0i8.i64(i8* align 16 %p3, i8 0, i64 32, i1 false) 115 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 116 store i32 1, i32* %arrayidx1, align 4 117 ret void 118} 119 120define void @dontwrite28to32memset_atomic(i32* nocapture %p) nounwind uwtable ssp { 121; CHECK-LABEL: @dontwrite28to32memset_atomic( 122; CHECK-NEXT: entry: 123; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8* 124; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 16 [[P3]], i8 0, i64 32, i32 4) 125; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7 126; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 127; CHECK-NEXT: ret void 128; 129entry: 130 %p3 = bitcast i32* %p to i8* 131 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 16 %p3, i8 0, i64 32, i32 4) 132 %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7 133 store atomic i32 1, i32* %arrayidx1 unordered, align 4 134 ret void 135} 136 137define void @write32to36(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp { 138; CHECK-LABEL: @write32to36( 139; CHECK-NEXT: entry: 140; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8* 141; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i1 false) 142; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2 143; CHECK-NEXT: store i32 1, i32* [[C]], align 4 144; CHECK-NEXT: ret void 145; 146entry: 147 %0 = bitcast %struct.vec2plusi* %p to i8* 148 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i1 false) 149 %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2 150 store i32 1, i32* %c, align 4 151 ret void 152} 153 154define void @write32to36_atomic(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp { 155; CHECK-LABEL: @write32to36_atomic( 156; CHECK-NEXT: entry: 157; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8* 158; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i32 4) 159; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2 160; CHECK-NEXT: store atomic i32 1, i32* [[C]] unordered, align 4 161; CHECK-NEXT: ret void 162; 163entry: 164 %0 = bitcast %struct.vec2plusi* %p to i8* 165 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i32 4) 166 %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2 167 store atomic i32 1, i32* %c unordered, align 4 168 ret void 169} 170 171; Atomicity of the store is weaker than the memcpy 172define void @write32to36_atomic_weaker(%struct.vec2plusi* nocapture %p) nounwind uwtable ssp { 173; CHECK-LABEL: @write32to36_atomic_weaker( 174; CHECK-NEXT: entry: 175; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2plusi* [[P:%.*]] to i8* 176; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i32 4) 177; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], %struct.vec2plusi* [[P]], i64 0, i32 2 178; CHECK-NEXT: store i32 1, i32* [[C]], align 4 179; CHECK-NEXT: ret void 180; 181entry: 182 %0 = bitcast %struct.vec2plusi* %p to i8* 183 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i32 4) 184 %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2 185 store i32 1, i32* %c, align 4 186 ret void 187} 188 189define void @write16to32(%struct.vec2* nocapture %p) nounwind uwtable ssp { 190; CHECK-LABEL: @write16to32( 191; CHECK-NEXT: entry: 192; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8* 193; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 16, i1 false) 194; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 1 195; CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* [[C]], align 4 196; CHECK-NEXT: ret void 197; 198entry: 199 %0 = bitcast %struct.vec2* %p to i8* 200 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false) 201 %c = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 1 202 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %c, align 4 203 ret void 204} 205 206define void @write16to32_atomic(%struct.vec2* nocapture %p) nounwind uwtable ssp { 207; CHECK-LABEL: @write16to32_atomic( 208; CHECK-NEXT: entry: 209; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8* 210; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 16, i32 4) 211; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 1 212; CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* [[C]], align 4 213; CHECK-NEXT: ret void 214; 215entry: 216 %0 = bitcast %struct.vec2* %p to i8* 217 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4) 218 %c = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 1 219 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %c, align 4 220 ret void 221} 222 223define void @dontwrite28to32memcpy(%struct.vec2* nocapture %p) nounwind uwtable ssp { 224; CHECK-LABEL: @dontwrite28to32memcpy( 225; CHECK-NEXT: entry: 226; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8* 227; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false) 228; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 0, i64 7 229; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 230; CHECK-NEXT: ret void 231; 232entry: 233 %0 = bitcast %struct.vec2* %p to i8* 234 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i1 false) 235 %arrayidx1 = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 0, i64 7 236 store i32 1, i32* %arrayidx1, align 4 237 ret void 238} 239 240define void @dontwrite28to32memcpy_atomic(%struct.vec2* nocapture %p) nounwind uwtable ssp { 241; CHECK-LABEL: @dontwrite28to32memcpy_atomic( 242; CHECK-NEXT: entry: 243; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.vec2* [[P:%.*]] to i8* 244; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 [[TMP0]], i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4) 245; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], %struct.vec2* [[P]], i64 0, i32 0, i64 7 246; CHECK-NEXT: store atomic i32 1, i32* [[ARRAYIDX1]] unordered, align 4 247; CHECK-NEXT: ret void 248; 249entry: 250 %0 = bitcast %struct.vec2* %p to i8* 251 tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 4) 252 %arrayidx1 = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 0, i64 7 253 store atomic i32 1, i32* %arrayidx1 unordered, align 4 254 ret void 255} 256 257declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind 258declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind 259declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind 260declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind 261 262%struct.trapframe = type { i64, i64, i64 } 263 264; bugzilla 11455 - make sure negative GEP's don't break this optimisation 265define void @cpu_lwp_fork(%struct.trapframe* %md_regs, i64 %pcb_rsp0) nounwind uwtable noinline ssp { 266; CHECK-LABEL: @cpu_lwp_fork( 267; CHECK-NEXT: entry: 268; CHECK-NEXT: [[TMP0:%.*]] = inttoptr i64 [[PCB_RSP0:%.*]] to %struct.trapframe* 269; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME:%.*]], %struct.trapframe* [[TMP0]], i64 -1 270; CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.trapframe* [[ADD_PTR]] to i8* 271; CHECK-NEXT: [[TMP2:%.*]] = bitcast %struct.trapframe* [[MD_REGS:%.*]] to i8* 272; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP1]], i8* [[TMP2]], i64 24, i1 false) 273; CHECK-NEXT: [[TF_TRAPNO:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME]], %struct.trapframe* [[TMP0]], i64 -1, i32 1 274; CHECK-NEXT: store i64 3, i64* [[TF_TRAPNO]], align 8 275; CHECK-NEXT: ret void 276; 277entry: 278 %0 = inttoptr i64 %pcb_rsp0 to %struct.trapframe* 279 %add.ptr = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1 280 %1 = bitcast %struct.trapframe* %add.ptr to i8* 281 %2 = bitcast %struct.trapframe* %md_regs to i8* 282 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 24, i1 false) 283 %tf_trapno = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1, i32 1 284 store i64 3, i64* %tf_trapno, align 8 285 ret void 286} 287 288define void @write16To23AndThen24To31(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) { 289; CHECK-LABEL: @write16To23AndThen24To31( 290; CHECK-NEXT: entry: 291; CHECK-NEXT: [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8* 292; CHECK-NEXT: [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0 293; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i1 false) 294; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2 295; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3 296; CHECK-NEXT: store i64 3, i64* [[BASE64_2]] 297; CHECK-NEXT: store i64 3, i64* [[BASE64_3]] 298; CHECK-NEXT: ret void 299; 300entry: 301 302 %base0 = bitcast i64* %P to i8* 303 %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0 304 tail call void @llvm.memset.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i1 false) 305 306 %base64_2 = getelementptr inbounds i64, i64* %P, i64 2 307 %base64_3 = getelementptr inbounds i64, i64* %P, i64 3 308 309 store i64 3, i64* %base64_2 310 store i64 3, i64* %base64_3 311 ret void 312} 313 314define void @write16To23AndThen24To31_atomic(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) { 315; CHECK-LABEL: @write16To23AndThen24To31_atomic( 316; CHECK-NEXT: entry: 317; CHECK-NEXT: [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8* 318; CHECK-NEXT: [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0 319; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8) 320; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2 321; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3 322; CHECK-NEXT: store atomic i64 3, i64* [[BASE64_2]] unordered, align 8 323; CHECK-NEXT: store atomic i64 3, i64* [[BASE64_3]] unordered, align 8 324; CHECK-NEXT: ret void 325; 326entry: 327 328 %base0 = bitcast i64* %P to i8* 329 %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0 330 tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8) 331 332 %base64_2 = getelementptr inbounds i64, i64* %P, i64 2 333 %base64_3 = getelementptr inbounds i64, i64* %P, i64 3 334 335 store atomic i64 3, i64* %base64_2 unordered, align 8 336 store atomic i64 3, i64* %base64_3 unordered, align 8 337 ret void 338} 339 340define void @write16To23AndThen24To31_atomic_weaker1(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) { 341; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker1( 342; CHECK-NEXT: entry: 343; CHECK-NEXT: [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8* 344; CHECK-NEXT: [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0 345; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8) 346; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2 347; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3 348; CHECK-NEXT: store i64 3, i64* [[BASE64_2]], align 8 349; CHECK-NEXT: store atomic i64 3, i64* [[BASE64_3]] unordered, align 8 350; CHECK-NEXT: ret void 351; 352entry: 353 354 %base0 = bitcast i64* %P to i8* 355 %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0 356 tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8) 357 358 %base64_2 = getelementptr inbounds i64, i64* %P, i64 2 359 %base64_3 = getelementptr inbounds i64, i64* %P, i64 3 360 361 store i64 3, i64* %base64_2, align 8 362 store atomic i64 3, i64* %base64_3 unordered, align 8 363 ret void 364} 365 366define void @write16To23AndThen24To31_atomic_weaker2(i64* nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) { 367; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker2( 368; CHECK-NEXT: entry: 369; CHECK-NEXT: [[BASE0:%.*]] = bitcast i64* [[P:%.*]] to i8* 370; CHECK-NEXT: [[MYBASE0:%.*]] = getelementptr inbounds i8, i8* [[BASE0]], i64 0 371; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 [[MYBASE0]], i8 0, i64 16, i32 8) 372; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 2 373; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 3 374; CHECK-NEXT: store atomic i64 3, i64* [[BASE64_2]] unordered, align 8 375; CHECK-NEXT: store i64 3, i64* [[BASE64_3]], align 8 376; CHECK-NEXT: ret void 377; 378entry: 379 380 %base0 = bitcast i64* %P to i8* 381 %mybase0 = getelementptr inbounds i8, i8* %base0, i64 0 382 tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %mybase0, i8 0, i64 32, i32 8) 383 384 %base64_2 = getelementptr inbounds i64, i64* %P, i64 2 385 %base64_3 = getelementptr inbounds i64, i64* %P, i64 3 386 387 store atomic i64 3, i64* %base64_2 unordered, align 8 388 store i64 3, i64* %base64_3, align 8 389 ret void 390} 391