• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN %s
2; RUN: llvm-as -data-layout=A5 < %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs | FileCheck --check-prefix=GCN %s
3
4declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
5declare i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
6declare i32 @llvm.amdgcn.workitem.id.x()
7declare i32 @llvm.amdgcn.workgroup.id.x()
8declare void @llvm.amdgcn.s.barrier()
9
10@test_local.temp = internal addrspace(3) global [1 x i32] undef, align 4
11@test_global_local.temp = internal addrspace(3) global [1 x i32] undef, align 4
12
13; GCN-LABEL: {{^}}test_local
14; GCN: v_mov_b32_e32 v[[VAL:[0-9]+]], 0x777
15; GCN: ds_write_b32 v{{[0-9]+}}, v[[VAL]]
16; GCN: s_waitcnt lgkmcnt(0){{$}}
17; GCN-NEXT: s_barrier
18; GCN: flat_store_dword
19define amdgpu_kernel void @test_local(i32 addrspace(1)*) {
20  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
21  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
22  %3 = call i32 @llvm.amdgcn.workitem.id.x()
23  %4 = zext i32 %3 to i64
24  %5 = icmp eq i64 %4, 0
25  br i1 %5, label %6, label %7
26
27; <label>:6:                                      ; preds = %1
28  store i32 1911, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_local.temp, i64 0, i64 0), align 4
29  br label %7
30
31; <label>:7:                                      ; preds = %6, %1
32  fence syncscope("workgroup") release
33  call void @llvm.amdgcn.s.barrier()
34  fence syncscope("workgroup") acquire
35  %8 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_local.temp, i64 0, i64 0), align 4
36  %9 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
37  %10 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
38  %11 = call i32 @llvm.amdgcn.workitem.id.x()
39  %12 = call i32 @llvm.amdgcn.workgroup.id.x()
40  %13 = getelementptr inbounds i8, i8 addrspace(4)* %10, i64 4
41  %14 = bitcast i8 addrspace(4)* %13 to i16 addrspace(4)*
42  %15 = load i16, i16 addrspace(4)* %14, align 4
43  %16 = zext i16 %15 to i32
44  %17 = mul i32 %12, %16
45  %18 = add i32 %17, %11
46  %19 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
47  %20 = zext i32 %18 to i64
48  %21 = bitcast i8 addrspace(4)* %19 to i64 addrspace(4)*
49  %22 = load i64, i64 addrspace(4)* %21, align 8
50  %23 = add i64 %22, %20
51  %24 = getelementptr inbounds i32, i32 addrspace(1)* %9, i64 %23
52  store i32 %8, i32 addrspace(1)* %24, align 4
53  ret void
54}
55
56; GCN-LABEL: {{^}}test_global
57; GCN: v_add_u32_e32 v{{[0-9]+}}, vcc, 0x888, v{{[0-9]+}}
58; GCN: flat_store_dword
59; GCN: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
60; GCN-NEXT: s_barrier
61define amdgpu_kernel void @test_global(i32 addrspace(1)*) {
62  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
63  %3 = alloca i32, align 4, addrspace(5)
64  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
65  store i32 0, i32 addrspace(5)* %3, align 4
66  br label %4
67
68; <label>:4:                                      ; preds = %58, %1
69  %5 = load i32, i32 addrspace(5)* %3, align 4
70  %6 = sext i32 %5 to i64
71  %7 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
72  %8 = call i32 @llvm.amdgcn.workitem.id.x()
73  %9 = call i32 @llvm.amdgcn.workgroup.id.x()
74  %10 = getelementptr inbounds i8, i8 addrspace(4)* %7, i64 4
75  %11 = bitcast i8 addrspace(4)* %10 to i16 addrspace(4)*
76  %12 = load i16, i16 addrspace(4)* %11, align 4
77  %13 = zext i16 %12 to i32
78  %14 = mul i32 %9, %13
79  %15 = add i32 %14, %8
80  %16 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
81  %17 = zext i32 %15 to i64
82  %18 = bitcast i8 addrspace(4)* %16 to i64 addrspace(4)*
83  %19 = load i64, i64 addrspace(4)* %18, align 8
84  %20 = add i64 %19, %17
85  %21 = icmp ult i64 %6, %20
86  br i1 %21, label %22, label %61
87
88; <label>:22:                                     ; preds = %4
89  %23 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
90  %24 = call i32 @llvm.amdgcn.workitem.id.x()
91  %25 = call i32 @llvm.amdgcn.workgroup.id.x()
92  %26 = getelementptr inbounds i8, i8 addrspace(4)* %23, i64 4
93  %27 = bitcast i8 addrspace(4)* %26 to i16 addrspace(4)*
94  %28 = load i16, i16 addrspace(4)* %27, align 4
95  %29 = zext i16 %28 to i32
96  %30 = mul i32 %25, %29
97  %31 = add i32 %30, %24
98  %32 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
99  %33 = zext i32 %31 to i64
100  %34 = bitcast i8 addrspace(4)* %32 to i64 addrspace(4)*
101  %35 = load i64, i64 addrspace(4)* %34, align 8
102  %36 = add i64 %35, %33
103  %37 = add i64 %36, 2184
104  %38 = trunc i64 %37 to i32
105  %39 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
106  %40 = load i32, i32 addrspace(5)* %3, align 4
107  %41 = sext i32 %40 to i64
108  %42 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
109  %43 = call i32 @llvm.amdgcn.workitem.id.x()
110  %44 = call i32 @llvm.amdgcn.workgroup.id.x()
111  %45 = getelementptr inbounds i8, i8 addrspace(4)* %42, i64 4
112  %46 = bitcast i8 addrspace(4)* %45 to i16 addrspace(4)*
113  %47 = load i16, i16 addrspace(4)* %46, align 4
114  %48 = zext i16 %47 to i32
115  %49 = mul i32 %44, %48
116  %50 = add i32 %49, %43
117  %51 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
118  %52 = zext i32 %50 to i64
119  %53 = bitcast i8 addrspace(4)* %51 to i64 addrspace(4)*
120  %54 = load i64, i64 addrspace(4)* %53, align 8
121  %55 = add i64 %54, %52
122  %56 = add i64 %41, %55
123  %57 = getelementptr inbounds i32, i32 addrspace(1)* %39, i64 %56
124  store i32 %38, i32 addrspace(1)* %57, align 4
125  fence syncscope("workgroup") release
126  call void @llvm.amdgcn.s.barrier()
127  fence syncscope("workgroup") acquire
128  br label %58
129
130; <label>:58:                                     ; preds = %22
131  %59 = load i32, i32 addrspace(5)* %3, align 4
132  %60 = add nsw i32 %59, 1
133  store i32 %60, i32 addrspace(5)* %3, align 4
134  br label %4
135
136; <label>:61:                                     ; preds = %4
137  ret void
138}
139
140; GCN-LABEL: {{^}}test_global_local
141; GCN: v_mov_b32_e32 v[[VAL:[0-9]+]], 0x999
142; GCN: ds_write_b32 v{{[0-9]+}}, v[[VAL]]
143; GCN: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
144; GCN-NEXT: s_barrier
145; GCN: flat_store_dword
146define amdgpu_kernel void @test_global_local(i32 addrspace(1)*) {
147  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
148  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
149  %3 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
150  %4 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
151  %5 = call i32 @llvm.amdgcn.workitem.id.x()
152  %6 = call i32 @llvm.amdgcn.workgroup.id.x()
153  %7 = getelementptr inbounds i8, i8 addrspace(4)* %4, i64 4
154  %8 = bitcast i8 addrspace(4)* %7 to i16 addrspace(4)*
155  %9 = load i16, i16 addrspace(4)* %8, align 4
156  %10 = zext i16 %9 to i32
157  %11 = mul i32 %6, %10
158  %12 = add i32 %11, %5
159  %13 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
160  %14 = zext i32 %12 to i64
161  %15 = bitcast i8 addrspace(4)* %13 to i64 addrspace(4)*
162  %16 = load i64, i64 addrspace(4)* %15, align 8
163  %17 = add i64 %16, %14
164  %18 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %17
165  store i32 1, i32 addrspace(1)* %18, align 4
166  %19 = call i32 @llvm.amdgcn.workitem.id.x()
167  %20 = zext i32 %19 to i64
168  %21 = icmp eq i64 %20, 0
169  br i1 %21, label %22, label %23
170
171; <label>:22:                                     ; preds = %1
172  store i32 2457, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_global_local.temp, i64 0, i64 0), align 4
173  br label %23
174
175; <label>:23:                                     ; preds = %22, %1
176  fence syncscope("workgroup") release
177  call void @llvm.amdgcn.s.barrier()
178  fence syncscope("workgroup") acquire
179  %24 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_global_local.temp, i64 0, i64 0), align 4
180  %25 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
181  %26 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
182  %27 = call i32 @llvm.amdgcn.workitem.id.x()
183  %28 = call i32 @llvm.amdgcn.workgroup.id.x()
184  %29 = getelementptr inbounds i8, i8 addrspace(4)* %26, i64 4
185  %30 = bitcast i8 addrspace(4)* %29 to i16 addrspace(4)*
186  %31 = load i16, i16 addrspace(4)* %30, align 4
187  %32 = zext i16 %31 to i32
188  %33 = mul i32 %28, %32
189  %34 = add i32 %33, %27
190  %35 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
191  %36 = zext i32 %34 to i64
192  %37 = bitcast i8 addrspace(4)* %35 to i64 addrspace(4)*
193  %38 = load i64, i64 addrspace(4)* %37, align 8
194  %39 = add i64 %38, %36
195  %40 = getelementptr inbounds i32, i32 addrspace(1)* %25, i64 %39
196  store i32 %24, i32 addrspace(1)* %40, align 4
197  ret void
198}
199