• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=GCN %s
2
3; GCN-LABEL: {{^}}test_membound:
4; MemoryBound: 1
5; WaveLimiterHint : 1
6define amdgpu_kernel void @test_membound(<4 x i32> addrspace(1)* nocapture readonly %arg, <4 x i32> addrspace(1)* nocapture %arg1) {
7bb:
8  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
9  %tmp2 = zext i32 %tmp to i64
10  %tmp3 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp2
11  %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 16
12  %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp2
13  store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %tmp5, align 16
14  %tmp6 = add nuw nsw i64 %tmp2, 1
15  %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp6
16  %tmp8 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp7, align 16
17  %tmp9 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp6
18  store <4 x i32> %tmp8, <4 x i32> addrspace(1)* %tmp9, align 16
19  %tmp10 = add nuw nsw i64 %tmp2, 2
20  %tmp11 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp10
21  %tmp12 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp11, align 16
22  %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp10
23  store <4 x i32> %tmp12, <4 x i32> addrspace(1)* %tmp13, align 16
24  %tmp14 = add nuw nsw i64 %tmp2, 3
25  %tmp15 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp14
26  %tmp16 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp15, align 16
27  %tmp17 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp14
28  store <4 x i32> %tmp16, <4 x i32> addrspace(1)* %tmp17, align 16
29  ret void
30}
31
32; GCN-LABEL: {{^}}test_large_stride:
33; MemoryBound: 0
34; WaveLimiterHint : 1
35define amdgpu_kernel void @test_large_stride(i32 addrspace(1)* nocapture %arg) {
36bb:
37  %tmp = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 4096
38  %tmp1 = load i32, i32 addrspace(1)* %tmp, align 4
39  %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
40  store i32 %tmp1, i32 addrspace(1)* %tmp2, align 4
41  %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 8192
42  %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
43  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
44  store i32 %tmp4, i32 addrspace(1)* %tmp5, align 4
45  %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 12288
46  %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
47  %tmp8 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
48  store i32 %tmp7, i32 addrspace(1)* %tmp8, align 4
49  ret void
50}
51
52; GCN-LABEL: {{^}}test_indirect:
53; MemoryBound: 0
54; WaveLimiterHint : 1
55define amdgpu_kernel void @test_indirect(i32 addrspace(1)* nocapture %arg) {
56bb:
57  %tmp = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
58  %tmp1 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
59  %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
60  %tmp3 = bitcast i32 addrspace(1)* %arg to <4 x i32> addrspace(1)*
61  %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 4
62  %tmp5 = extractelement <4 x i32> %tmp4, i32 0
63  %tmp6 = sext i32 %tmp5 to i64
64  %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6
65  %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4
66  store i32 %tmp8, i32 addrspace(1)* %arg, align 4
67  %tmp9 = extractelement <4 x i32> %tmp4, i32 1
68  %tmp10 = sext i32 %tmp9 to i64
69  %tmp11 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp10
70  %tmp12 = load i32, i32 addrspace(1)* %tmp11, align 4
71  store i32 %tmp12, i32 addrspace(1)* %tmp, align 4
72  %tmp13 = extractelement <4 x i32> %tmp4, i32 2
73  %tmp14 = sext i32 %tmp13 to i64
74  %tmp15 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp14
75  %tmp16 = load i32, i32 addrspace(1)* %tmp15, align 4
76  store i32 %tmp16, i32 addrspace(1)* %tmp1, align 4
77  %tmp17 = extractelement <4 x i32> %tmp4, i32 3
78  %tmp18 = sext i32 %tmp17 to i64
79  %tmp19 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp18
80  %tmp20 = load i32, i32 addrspace(1)* %tmp19, align 4
81  store i32 %tmp20, i32 addrspace(1)* %tmp2, align 4
82  ret void
83}
84
85; GCN-LABEL: {{^}}test_indirect_through_phi:
86; MemoryBound: 0
87; WaveLimiterHint : 0
88define amdgpu_kernel void @test_indirect_through_phi(float addrspace(1)* %arg) {
89bb:
90  %load = load float, float addrspace(1)* %arg, align 8
91  %load.f = bitcast float %load to i32
92  %n = tail call i32 @llvm.amdgcn.workitem.id.x()
93  br label %bb1
94
95bb1:                                              ; preds = %bb1, %bb
96  %phi = phi i32 [ %load.f, %bb ], [ %and2, %bb1 ]
97  %ind = phi i32 [ 0, %bb ], [ %inc2, %bb1 ]
98  %and1 = and i32 %phi, %n
99  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %and1
100  store float %load, float addrspace(1)* %gep, align 4
101  %inc1 = add nsw i32 %phi, 1310720
102  %and2 = and i32 %inc1, %n
103  %inc2 = add nuw nsw i32 %ind, 1
104  %cmp = icmp eq i32 %inc2, 1024
105  br i1 %cmp, label %bb2, label %bb1
106
107bb2:                                              ; preds = %bb1
108  ret void
109}
110
111declare i32 @llvm.amdgcn.workitem.id.x()
112