1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=GCN %s 2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=VI -check-prefix=GCN %s 3 4; GCN-LABEL: {{^}}lds_atomic_xchg_ret_i64: 5; GCN: ds_wrxchg_rtn_b64 6; GCN: s_endpgm 7define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst 9 store i64 %result, i64 addrspace(1)* %out, align 8 10 ret void 11} 12 13; GCN-LABEL: {{^}}lds_atomic_xchg_ret_i64_offset: 14; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32 15; GCN: s_endpgm 16define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 17 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst 19 store i64 %result, i64 addrspace(1)* %out, align 8 20 ret void 21} 22 23; GCN-LABEL: {{^}}lds_atomic_add_ret_i64: 24; GCN: ds_add_rtn_u64 25; GCN: s_endpgm 26define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst 28 store i64 %result, i64 addrspace(1)* %out, align 8 29 ret void 30} 31 32; GCN-LABEL: {{^}}lds_atomic_add_ret_i64_offset: 33; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb 34; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c 35; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 9 36; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0 37; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] 38; GCN: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32 39; GCN: buffer_store_dwordx2 [[RESULT]], 40; GCN: s_endpgm 41define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 42 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4 43 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst 44 store i64 %result, i64 addrspace(1)* %out, align 8 45 ret void 46} 47 48; GCN-LABEL: {{^}}lds_atomic_add1_ret_i64: 49; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 1{{$}} 50; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}} 51; GCN: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} 52; GCN: buffer_store_dwordx2 [[RESULT]], 53; GCN: s_endpgm 54define void @lds_atomic_add1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst 56 store i64 %result, i64 addrspace(1)* %out, align 8 57 ret void 58} 59 60; GCN-LABEL: {{^}}lds_atomic_add1_ret_i64_offset: 61; GCN: ds_add_rtn_u64 {{.*}} offset:32 62; GCN: s_endpgm 63define void @lds_atomic_add1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 64 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst 66 store i64 %result, i64 addrspace(1)* %out, align 8 67 ret void 68} 69 70; GCN-LABEL: {{^}}lds_atomic_sub_ret_i64: 71; GCN: ds_sub_rtn_u64 72; GCN: s_endpgm 73define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst 75 store i64 %result, i64 addrspace(1)* %out, align 8 76 ret void 77} 78 79; GCN-LABEL: {{^}}lds_atomic_sub_ret_i64_offset: 80; GCN: ds_sub_rtn_u64 {{.*}} offset:32 81; GCN: s_endpgm 82define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 83 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst 85 store i64 %result, i64 addrspace(1)* %out, align 8 86 ret void 87} 88 89; GCN-LABEL: {{^}}lds_atomic_sub1_ret_i64: 90; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 1{{$}} 91; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}} 92; GCN: ds_sub_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} 93; GCN: buffer_store_dwordx2 [[RESULT]], 94; GCN: s_endpgm 95define void @lds_atomic_sub1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst 97 store i64 %result, i64 addrspace(1)* %out, align 8 98 ret void 99} 100 101; GCN-LABEL: {{^}}lds_atomic_sub1_ret_i64_offset: 102; GCN: ds_sub_rtn_u64 {{.*}} offset:32 103; GCN: s_endpgm 104define void @lds_atomic_sub1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 105 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 106 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst 107 store i64 %result, i64 addrspace(1)* %out, align 8 108 ret void 109} 110 111; GCN-LABEL: {{^}}lds_atomic_and_ret_i64: 112; GCN: ds_and_rtn_b64 113; GCN: s_endpgm 114define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 115 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst 116 store i64 %result, i64 addrspace(1)* %out, align 8 117 ret void 118} 119 120; GCN-LABEL: {{^}}lds_atomic_and_ret_i64_offset: 121; GCN: ds_and_rtn_b64 {{.*}} offset:32 122; GCN: s_endpgm 123define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 124 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 125 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst 126 store i64 %result, i64 addrspace(1)* %out, align 8 127 ret void 128} 129 130; GCN-LABEL: {{^}}lds_atomic_or_ret_i64: 131; GCN: ds_or_rtn_b64 132; GCN: s_endpgm 133define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 134 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst 135 store i64 %result, i64 addrspace(1)* %out, align 8 136 ret void 137} 138 139; GCN-LABEL: {{^}}lds_atomic_or_ret_i64_offset: 140; GCN: ds_or_rtn_b64 {{.*}} offset:32 141; GCN: s_endpgm 142define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 143 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 144 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst 145 store i64 %result, i64 addrspace(1)* %out, align 8 146 ret void 147} 148 149; GCN-LABEL: {{^}}lds_atomic_xor_ret_i64: 150; GCN: ds_xor_rtn_b64 151; GCN: s_endpgm 152define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 153 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst 154 store i64 %result, i64 addrspace(1)* %out, align 8 155 ret void 156} 157 158; GCN-LABEL: {{^}}lds_atomic_xor_ret_i64_offset: 159; GCN: ds_xor_rtn_b64 {{.*}} offset:32 160; GCN: s_endpgm 161define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 162 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 163 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst 164 store i64 %result, i64 addrspace(1)* %out, align 8 165 ret void 166} 167 168; FIXME: There is no atomic nand instr 169; XGCN-LABEL: {{^}}lds_atomic_nand_ret_i64:uction, so we somehow need to expand this. 170; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 171; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst 172; store i64 %result, i64 addrspace(1)* %out, align 8 173; ret void 174; } 175 176; GCN-LABEL: {{^}}lds_atomic_min_ret_i64: 177; GCN: ds_min_rtn_i64 178; GCN: s_endpgm 179define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 180 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst 181 store i64 %result, i64 addrspace(1)* %out, align 8 182 ret void 183} 184 185; GCN-LABEL: {{^}}lds_atomic_min_ret_i64_offset: 186; GCN: ds_min_rtn_i64 {{.*}} offset:32 187; GCN: s_endpgm 188define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 189 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 190 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst 191 store i64 %result, i64 addrspace(1)* %out, align 8 192 ret void 193} 194 195; GCN-LABEL: {{^}}lds_atomic_max_ret_i64: 196; GCN: ds_max_rtn_i64 197; GCN: s_endpgm 198define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 199 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst 200 store i64 %result, i64 addrspace(1)* %out, align 8 201 ret void 202} 203 204; GCN-LABEL: {{^}}lds_atomic_max_ret_i64_offset: 205; GCN: ds_max_rtn_i64 {{.*}} offset:32 206; GCN: s_endpgm 207define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 208 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 209 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst 210 store i64 %result, i64 addrspace(1)* %out, align 8 211 ret void 212} 213 214; GCN-LABEL: {{^}}lds_atomic_umin_ret_i64: 215; GCN: ds_min_rtn_u64 216; GCN: s_endpgm 217define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 218 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst 219 store i64 %result, i64 addrspace(1)* %out, align 8 220 ret void 221} 222 223; GCN-LABEL: {{^}}lds_atomic_umin_ret_i64_offset: 224; GCN: ds_min_rtn_u64 {{.*}} offset:32 225; GCN: s_endpgm 226define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 227 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 228 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst 229 store i64 %result, i64 addrspace(1)* %out, align 8 230 ret void 231} 232 233; GCN-LABEL: {{^}}lds_atomic_umax_ret_i64: 234; GCN: ds_max_rtn_u64 235; GCN: s_endpgm 236define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 237 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst 238 store i64 %result, i64 addrspace(1)* %out, align 8 239 ret void 240} 241 242; GCN-LABEL: {{^}}lds_atomic_umax_ret_i64_offset: 243; GCN: ds_max_rtn_u64 {{.*}} offset:32 244; GCN: s_endpgm 245define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 246 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 247 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst 248 store i64 %result, i64 addrspace(1)* %out, align 8 249 ret void 250} 251 252; GCN-LABEL: {{^}}lds_atomic_xchg_noret_i64: 253; GCN: ds_wrxchg_rtn_b64 254; GCN: s_endpgm 255define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind { 256 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst 257 ret void 258} 259 260; GCN-LABEL: {{^}}lds_atomic_xchg_noret_i64_offset: 261; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32 262; GCN: s_endpgm 263define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 264 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 265 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst 266 ret void 267} 268 269; GCN-LABEL: {{^}}lds_atomic_add_noret_i64: 270; GCN: ds_add_u64 271; GCN: s_endpgm 272define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind { 273 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst 274 ret void 275} 276 277; GCN-LABEL: {{^}}lds_atomic_add_noret_i64_offset: 278; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9 279; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24 280; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 9 281; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0 282; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] 283; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32 284; GCN: s_endpgm 285define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 286 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4 287 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst 288 ret void 289} 290 291; GCN-LABEL: {{^}}lds_atomic_add1_noret_i64: 292; GCN-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 1{{$}} 293; GCN-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}} 294; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} 295; GCN: s_endpgm 296define void @lds_atomic_add1_noret_i64(i64 addrspace(3)* %ptr) nounwind { 297 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst 298 ret void 299} 300 301; GCN-LABEL: {{^}}lds_atomic_add1_noret_i64_offset: 302; GCN: ds_add_u64 {{.*}} offset:32 303; GCN: s_endpgm 304define void @lds_atomic_add1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 305 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 306 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst 307 ret void 308} 309 310; GCN-LABEL: {{^}}lds_atomic_sub_noret_i64: 311; GCN: ds_sub_u64 312; GCN: s_endpgm 313define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind { 314 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst 315 ret void 316} 317 318; GCN-LABEL: {{^}}lds_atomic_sub_noret_i64_offset: 319; GCN: ds_sub_u64 {{.*}} offset:32 320; GCN: s_endpgm 321define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 322 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 323 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst 324 ret void 325} 326 327; GCN-LABEL: {{^}}lds_atomic_sub1_noret_i64: 328; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 1{{$}} 329; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}} 330; GCN: ds_sub_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} 331; GCN: s_endpgm 332define void @lds_atomic_sub1_noret_i64(i64 addrspace(3)* %ptr) nounwind { 333 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst 334 ret void 335} 336 337; GCN-LABEL: {{^}}lds_atomic_sub1_noret_i64_offset: 338; GCN: ds_sub_u64 {{.*}} offset:32 339; GCN: s_endpgm 340define void @lds_atomic_sub1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 341 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 342 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst 343 ret void 344} 345 346; GCN-LABEL: {{^}}lds_atomic_and_noret_i64: 347; GCN: ds_and_b64 348; GCN: s_endpgm 349define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind { 350 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst 351 ret void 352} 353 354; GCN-LABEL: {{^}}lds_atomic_and_noret_i64_offset: 355; GCN: ds_and_b64 {{.*}} offset:32 356; GCN: s_endpgm 357define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 358 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 359 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst 360 ret void 361} 362 363; GCN-LABEL: {{^}}lds_atomic_or_noret_i64: 364; GCN: ds_or_b64 365; GCN: s_endpgm 366define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind { 367 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst 368 ret void 369} 370 371; GCN-LABEL: {{^}}lds_atomic_or_noret_i64_offset: 372; GCN: ds_or_b64 {{.*}} offset:32 373; GCN: s_endpgm 374define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 375 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 376 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst 377 ret void 378} 379 380; GCN-LABEL: {{^}}lds_atomic_xor_noret_i64: 381; GCN: ds_xor_b64 382; GCN: s_endpgm 383define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind { 384 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst 385 ret void 386} 387 388; GCN-LABEL: {{^}}lds_atomic_xor_noret_i64_offset: 389; GCN: ds_xor_b64 {{.*}} offset:32 390; GCN: s_endpgm 391define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 392 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 393 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst 394 ret void 395} 396 397; FIXME: There is no atomic nand instr 398; XGCN-LABEL: {{^}}lds_atomic_nand_noret_i64:uction, so we somehow need to expand this. 399; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind { 400; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst 401; ret void 402; } 403 404; GCN-LABEL: {{^}}lds_atomic_min_noret_i64: 405; GCN: ds_min_i64 406; GCN: s_endpgm 407define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind { 408 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst 409 ret void 410} 411 412; GCN-LABEL: {{^}}lds_atomic_min_noret_i64_offset: 413; GCN: ds_min_i64 {{.*}} offset:32 414; GCN: s_endpgm 415define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 416 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 417 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst 418 ret void 419} 420 421; GCN-LABEL: {{^}}lds_atomic_max_noret_i64: 422; GCN: ds_max_i64 423; GCN: s_endpgm 424define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind { 425 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst 426 ret void 427} 428 429; GCN-LABEL: {{^}}lds_atomic_max_noret_i64_offset: 430; GCN: ds_max_i64 {{.*}} offset:32 431; GCN: s_endpgm 432define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 433 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 434 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst 435 ret void 436} 437 438; GCN-LABEL: {{^}}lds_atomic_umin_noret_i64: 439; GCN: ds_min_u64 440; GCN: s_endpgm 441define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind { 442 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst 443 ret void 444} 445 446; GCN-LABEL: {{^}}lds_atomic_umin_noret_i64_offset: 447; GCN: ds_min_u64 {{.*}} offset:32 448; GCN: s_endpgm 449define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 450 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 451 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst 452 ret void 453} 454 455; GCN-LABEL: {{^}}lds_atomic_umax_noret_i64: 456; GCN: ds_max_u64 457; GCN: s_endpgm 458define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind { 459 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst 460 ret void 461} 462 463; GCN-LABEL: {{^}}lds_atomic_umax_noret_i64_offset: 464; GCN: ds_max_u64 {{.*}} offset:32 465; GCN: s_endpgm 466define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind { 467 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 468 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst 469 ret void 470} 471