• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s
2
3; uniform loads
4; CHECK-LABEL: @uniform_load
5; CHECK: s_load_dwordx4
6; CHECK-NOT: flat_load_dword
7
8define amdgpu_kernel void @uniform_load(float addrspace(1)* %arg, [8 x i32], float addrspace(1)* %arg1) {
9bb:
10  %tmp2 = load float, float addrspace(1)* %arg, align 4, !tbaa !8
11  %tmp3 = fadd float %tmp2, 0.000000e+00
12  %tmp4 = getelementptr inbounds float, float addrspace(1)* %arg, i64 1
13  %tmp5 = load float, float addrspace(1)* %tmp4, align 4, !tbaa !8
14  %tmp6 = fadd float %tmp3, %tmp5
15  %tmp7 = getelementptr inbounds float, float addrspace(1)* %arg, i64 2
16  %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !tbaa !8
17  %tmp9 = fadd float %tmp6, %tmp8
18  %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i64 3
19  %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8
20  %tmp12 = fadd float %tmp9, %tmp11
21  %tmp13 = getelementptr inbounds float, float addrspace(1)* %arg1
22  store float %tmp12, float addrspace(1)* %tmp13, align 4, !tbaa !8
23  ret void
24}
25
26; non-uniform loads
27; CHECK-LABEL: @non-uniform_load
28; CHECK: flat_load_dword
29; CHECK-NOT: s_load_dwordx4
30
31define amdgpu_kernel void @non-uniform_load(float addrspace(1)* %arg, [8 x i32], float addrspace(1)* %arg1) #0 {
32bb:
33  %tmp = call i32 @llvm.amdgcn.workitem.id.x() #1
34  %tmp2 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp
35  %tmp3 = load float, float addrspace(1)* %tmp2, align 4, !tbaa !8
36  %tmp4 = fadd float %tmp3, 0.000000e+00
37  %tmp5 = add i32 %tmp, 1
38  %tmp6 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp5
39  %tmp7 = load float, float addrspace(1)* %tmp6, align 4, !tbaa !8
40  %tmp8 = fadd float %tmp4, %tmp7
41  %tmp9 = add i32 %tmp, 2
42  %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp9
43  %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8
44  %tmp12 = fadd float %tmp8, %tmp11
45  %tmp13 = add i32 %tmp, 3
46  %tmp14 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp13
47  %tmp15 = load float, float addrspace(1)* %tmp14, align 4, !tbaa !8
48  %tmp16 = fadd float %tmp12, %tmp15
49  %tmp17 = getelementptr inbounds float, float addrspace(1)* %arg1, i32 %tmp
50  store float %tmp16, float addrspace(1)* %tmp17, align 4, !tbaa !8
51  ret void
52}
53
54
55; uniform load dominated by no-alias store - scalarize
56; CHECK-LABEL: @no_memdep_alias_arg
57; CHECK: s_load_dwordx2 s{{\[}}[[IN_LO:[0-9]+]]:[[IN_HI:[0-9]+]]], s[4:5], 0x0
58; CHECK: s_load_dword [[SVAL:s[0-9]+]], s{{\[}}[[IN_LO]]:[[IN_HI]]], 0x0
59; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
60; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
61
62define amdgpu_kernel void @no_memdep_alias_arg(i32 addrspace(1)* noalias %in, [8 x i32], i32 addrspace(1)* %out0, [8 x i32], i32 addrspace(1)* %out1) {
63  store i32 0, i32 addrspace(1)* %out0
64  %val = load i32, i32 addrspace(1)* %in
65  store i32 %val, i32 addrspace(1)* %out1
66  ret void
67}
68
69; uniform load dominated by alias store - vector
70; CHECK-LABEL: {{^}}memdep:
71; CHECK: flat_store_dword
72; CHECK: flat_load_dword [[VVAL:v[0-9]+]]
73; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
74define amdgpu_kernel void @memdep(i32 addrspace(1)* %in, [8 x i32], i32 addrspace(1)* %out0, [8 x i32], i32 addrspace(1)* %out1) {
75  store i32 0, i32 addrspace(1)* %out0
76  %val = load i32, i32 addrspace(1)* %in
77  store i32 %val, i32 addrspace(1)* %out1
78  ret void
79}
80
81; uniform load from global array
82; CHECK-LABEL:  @global_array
83; CHECK: s_getpc_b64 [[GET_PC:s\[[0-9]+:[0-9]+\]]]
84; CHECK-DAG: s_load_dwordx2 [[A_ADDR:s\[[0-9]+:[0-9]+\]]], [[GET_PC]], 0x0
85; CHECK-DAG: s_load_dwordx2 [[A_ADDR1:s\[[0-9]+:[0-9]+\]]], [[A_ADDR]], 0x0
86; CHECK-DAG: s_load_dwordx2 [[OUT:s\[[0-9]+:[0-9]+\]]], s[4:5], 0x0
87; CHECK-DAG: s_load_dword [[SVAL:s[0-9]+]], [[A_ADDR1]], 0x0
88; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
89; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
90@A = common local_unnamed_addr addrspace(1) global i32 addrspace(1)* null, align 4
91
92define amdgpu_kernel void @global_array(i32 addrspace(1)* nocapture %out) {
93entry:
94  %load0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4
95  %load1 = load i32, i32 addrspace(1)* %load0, align 4
96  store i32 %load1, i32 addrspace(1)* %out, align 4
97  ret void
98}
99
100
101; uniform load from global array dominated by alias store
102; CHECK-LABEL:  @global_array_alias_store
103; CHECK: flat_store_dword
104; CHECK: v_mov_b32_e32 v[[ADDR_LO:[0-9]+]], s{{[0-9]+}}
105; CHECK: v_mov_b32_e32 v[[ADDR_HI:[0-9]+]], s{{[0-9]+}}
106; CHECK: flat_load_dwordx2 [[A_ADDR:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[ADDR_LO]]:[[ADDR_HI]]{{\]}}
107; CHECK: flat_load_dword [[VVAL:v[0-9]+]], [[A_ADDR]]
108; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
109define amdgpu_kernel void @global_array_alias_store(i32 addrspace(1)* nocapture %out, [8 x i32], i32 %n) {
110entry:
111  %gep = getelementptr i32, i32 addrspace(1) * %out, i32 %n
112  store i32 12, i32 addrspace(1) * %gep
113  %load0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4
114  %load1 = load i32, i32 addrspace(1)* %load0, align 4
115  store i32 %load1, i32 addrspace(1)* %out, align 4
116  ret void
117}
118
119
120declare i32 @llvm.amdgcn.workitem.id.x() #1
121
122attributes #1 = { nounwind readnone }
123
124!8 = !{!9, !9, i64 0}
125!9 = !{!"float", !10, i64 0}
126!10 = !{!"omnipotent char", !11, i64 0}
127!11 = !{!"Simple C/C++ TBAA"}
128