1; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX 2; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX 3; RUN: opt < %s -S -nvptx-favor-non-generic -dce | FileCheck %s --check-prefix IR 4 5@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4 6@scalar = internal addrspace(3) global float 0.000000e+00, align 4 7 8; Verifies nvptx-favor-non-generic correctly optimizes generic address space 9; usage to non-generic address space usage for the patterns we claim to handle: 10; 1. load cast 11; 2. store cast 12; 3. load gep cast 13; 4. store gep cast 14; gep and cast can be an instruction or a constant expression. This function 15; tries all possible combinations. 16define float @ld_st_shared_f32(i32 %i, float %v) { 17; IR-LABEL: @ld_st_shared_f32 18; IR-NOT: addrspacecast 19; PTX-LABEL: ld_st_shared_f32( 20 ; load cast 21 %1 = load float* addrspacecast (float addrspace(3)* @scalar to float*), align 4 22; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar]; 23 ; store cast 24 store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4 25; PTX: st.shared.f32 [scalar], %f{{[0-9]+}}; 26 ; use syncthreads to disable optimizations across components 27 call void @llvm.cuda.syncthreads() 28; PTX: bar.sync 0; 29 30 ; cast; load 31 %2 = addrspacecast float addrspace(3)* @scalar to float* 32 %3 = load float* %2, align 4 33; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar]; 34 ; cast; store 35 store float %v, float* %2, align 4 36; PTX: st.shared.f32 [scalar], %f{{[0-9]+}}; 37 call void @llvm.cuda.syncthreads() 38; PTX: bar.sync 0; 39 40 ; load gep cast 41 %4 = load float* getelementptr inbounds ([10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4 42; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20]; 43 ; store gep cast 44 store float %v, float* getelementptr inbounds ([10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4 45; PTX: st.shared.f32 [array+20], %f{{[0-9]+}}; 46 call void @llvm.cuda.syncthreads() 47; PTX: bar.sync 0; 48 49 ; gep cast; load 50 %5 = getelementptr inbounds [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5 51 %6 = load float* %5, align 4 52; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20]; 53 ; gep cast; store 54 store float %v, float* %5, align 4 55; PTX: st.shared.f32 [array+20], %f{{[0-9]+}}; 56 call void @llvm.cuda.syncthreads() 57; PTX: bar.sync 0; 58 59 ; cast; gep; load 60 %7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]* 61 %8 = getelementptr inbounds [10 x float]* %7, i32 0, i32 %i 62 %9 = load float* %8, align 4 63; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}]; 64 ; cast; gep; store 65 store float %v, float* %8, align 4 66; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}}; 67 call void @llvm.cuda.syncthreads() 68; PTX: bar.sync 0; 69 70 %sum2 = fadd float %1, %3 71 %sum3 = fadd float %sum2, %4 72 %sum4 = fadd float %sum3, %6 73 %sum5 = fadd float %sum4, %9 74 ret float %sum5 75} 76 77; When hoisting an addrspacecast between different pointer types, replace the 78; addrspacecast with a bitcast. 79define i32 @ld_int_from_float() { 80; IR-LABEL: @ld_int_from_float 81; IR: load i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*) 82; PTX-LABEL: ld_int_from_float( 83; PTX: ld.shared.u{{(32|64)}} 84 %1 = load i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4 85 ret i32 %1 86} 87 88declare void @llvm.cuda.syncthreads() #3 89 90attributes #3 = { noduplicate nounwind } 91 92