1; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s 2 3; CHECK-LABEL: @memset_group_to_flat( 4; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 5define amdgpu_kernel void @memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 { 6 %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8* 7 call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 8 ret void 9} 10 11; CHECK-LABEL: @memset_global_to_flat( 12; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %global.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 13define amdgpu_kernel void @memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 { 14 %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8* 15 call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 16 ret void 17} 18 19; CHECK-LABEL: @memset_group_to_flat_no_md( 20; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 4, i64 %size, i1 false){{$}} 21define amdgpu_kernel void @memset_group_to_flat_no_md(i8 addrspace(3)* %group.ptr, i64 %size) #0 { 22 %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8* 23 call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 %size, i1 false) 24 ret void 25} 26 27; CHECK-LABEL: @memset_global_to_flat_no_md( 28; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %global.ptr, i8 4, i64 %size, i1 false){{$}} 29define amdgpu_kernel void @memset_global_to_flat_no_md(i8 addrspace(1)* %global.ptr, i64 %size) #0 { 30 %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8* 31 call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 %size, i1 false) 32 ret void 33} 34 35; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group( 36; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 37define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 38 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 39 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 40 ret void 41} 42 43; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_with_group( 44; CHECK: call void @llvm.memcpy.p3i8.p0i8.i64(i8 addrspace(3)* align 4 %dest.group.ptr, i8* align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 45define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_with_group(i8 addrspace(3)* %dest.group.ptr, i8* %src.ptr, i64 %size) #0 { 46 %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8* 47 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 48 ret void 49} 50 51; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_src_with_group( 52; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* align 4 %src.group.ptr, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 53define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_src_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 54 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 55 %cast.dest = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 56 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 57 ret void 58} 59 60; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_group_src_global( 61; CHECK: call void @llvm.memcpy.p3i8.p1i8.i64(i8 addrspace(3)* align 4 %dest.group.ptr, i8 addrspace(1)* align 4 %src.global.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 62define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_group_src_global(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size) #0 { 63 %cast.src = addrspacecast i8 addrspace(1)* %src.global.ptr to i8* 64 %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8* 65 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 66 ret void 67} 68 69; CHECK-LABEL: @memcpy_group_to_flat_replace_dest_global( 70; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %dest.global.ptr, i8 addrspace(3)* align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 71define amdgpu_kernel void @memcpy_group_to_flat_replace_dest_global(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size) #0 { 72 %cast.dest = addrspacecast i8 addrspace(1)* %dest.global.ptr to i8* 73 call void @llvm.memcpy.p0i8.p3i8.i32(i8* align 4 %cast.dest, i8 addrspace(3)* align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 74 ret void 75} 76 77; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct( 78; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa.struct !7 79define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 80 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 81 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa.struct !7 82 ret void 83} 84 85; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_no_md( 86; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}} 87define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_no_md(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 88 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 89 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false) 90 ret void 91} 92 93; CHECK-LABEL: @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md( 94; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest0, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}} 95; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest1, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}} 96define amdgpu_kernel void @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(i8* %dest0, i8* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 97 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 98 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest0, i8* align 4 %cast.src, i64 %size, i1 false) 99 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest1, i8* align 4 %cast.src, i64 %size, i1 false) 100 ret void 101} 102 103; Check for iterator problems if the pointer has 2 uses in the same call 104; CHECK-LABEL: @memcpy_group_flat_to_flat_self( 105; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 addrspace(3)* align 4 %group.ptr, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 106define amdgpu_kernel void @memcpy_group_flat_to_flat_self(i8 addrspace(3)* %group.ptr) #0 { 107 %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8* 108 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast, i8* align 4 %cast, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 109 ret void 110} 111; CHECK-LABEL: @memmove_flat_to_flat_replace_src_with_group( 112; CHECK: call void @llvm.memmove.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 113define amdgpu_kernel void @memmove_flat_to_flat_replace_src_with_group(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 { 114 %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8* 115 call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !4 116 ret void 117} 118 119declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1 120declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 121declare void @llvm.memcpy.p0i8.p3i8.i32(i8* nocapture writeonly, i8 addrspace(3)* nocapture readonly, i32, i1) #1 122declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 123 124attributes #0 = { nounwind } 125attributes #1 = { argmemonly nounwind } 126 127!0 = !{!1, !1, i64 0} 128!1 = !{!"A", !2} 129!2 = !{!"tbaa root"} 130!3 = !{!"B", !2} 131!4 = !{!5} 132!5 = distinct !{!5, !6, !"some scope"} 133!6 = distinct !{!6, !"some domain"} 134!7 = !{i64 0, i64 8, null} 135