1#if __clang_major__ >= 7 2target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" 3#else 4target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" 5#endif 6 7define i64 @__clc__sync_fetch_and_min_global_8(i64 addrspace(1)* nocapture %ptr, i64 %value) nounwind alwaysinline { 8entry: 9 %0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %value seq_cst 10 ret i64 %0 11} 12 13define i64 @__clc__sync_fetch_and_umin_global_8(i64 addrspace(1)* nocapture %ptr, i64 %value) nounwind alwaysinline { 14entry: 15 %0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %value seq_cst 16 ret i64 %0 17} 18 19define i64 @__clc__sync_fetch_and_min_local_8(i64 addrspace(3)* nocapture %ptr, i64 %value) nounwind alwaysinline { 20entry: 21 %0 = atomicrmw volatile min i64 addrspace(3)* %ptr, i64 %value seq_cst 22 ret i64 %0 23} 24 25define i64 @__clc__sync_fetch_and_umin_local_8(i64 addrspace(3)* nocapture %ptr, i64 %value) nounwind alwaysinline { 26entry: 27 %0 = atomicrmw volatile umin i64 addrspace(3)* %ptr, i64 %value seq_cst 28 ret i64 %0 29} 30 31define i64 @__clc__sync_fetch_and_max_global_8(i64 addrspace(1)* nocapture %ptr, i64 %value) nounwind alwaysinline { 32entry: 33 %0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %value seq_cst 34 ret i64 %0 35} 36 37define i64 @__clc__sync_fetch_and_umax_global_8(i64 addrspace(1)* nocapture %ptr, i64 %value) nounwind alwaysinline { 38entry: 39 %0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %value seq_cst 40 ret i64 %0 41} 42 43define i64 @__clc__sync_fetch_and_max_local_8(i64 addrspace(3)* nocapture %ptr, i64 %value) nounwind alwaysinline { 44entry: 45 %0 = atomicrmw volatile max i64 addrspace(3)* %ptr, i64 %value seq_cst 46 ret i64 %0 47} 48 49define i64 @__clc__sync_fetch_and_umax_local_8(i64 addrspace(3)* nocapture %ptr, i64 %value) nounwind alwaysinline { 50entry: 51 %0 = atomicrmw volatile umax i64 addrspace(3)* %ptr, i64 %value seq_cst 52 ret i64 %0 53} 54