• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1;RUN: opt -mtriple=amdgcn-mesa-mesa3d -analyze -divergence %s | FileCheck %s
2
3;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(
4define float @buffer_atomic_swap(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
5main_body:
6  %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
7  %r = bitcast i32 %orig to float
8  ret float %r
9}
10
11;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.add(
12define float @buffer_atomic_add(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
13main_body:
14  %orig = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
15  %r = bitcast i32 %orig to float
16  ret float %r
17}
18
19;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(
20define float @buffer_atomic_sub(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
21main_body:
22  %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
23  %r = bitcast i32 %orig to float
24  ret float %r
25}
26
27;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(
28define float @buffer_atomic_smin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
29main_body:
30  %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
31  %r = bitcast i32 %orig to float
32  ret float %r
33}
34
35;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(
36define float @buffer_atomic_umin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
37main_body:
38  %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
39  %r = bitcast i32 %orig to float
40  ret float %r
41}
42
43;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smax(
44define float @buffer_atomic_smax(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
45main_body:
46  %orig = call i32 @llvm.amdgcn.buffer.atomic.smax(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
47  %r = bitcast i32 %orig to float
48  ret float %r
49}
50
51;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umax(
52define float @buffer_atomic_umax(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
53main_body:
54  %orig = call i32 @llvm.amdgcn.buffer.atomic.umax(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
55  %r = bitcast i32 %orig to float
56  ret float %r
57}
58
59;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.and(
60define float @buffer_atomic_and(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
61main_body:
62  %orig = call i32 @llvm.amdgcn.buffer.atomic.and(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
63  %r = bitcast i32 %orig to float
64  ret float %r
65}
66
67;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.or(
68define float @buffer_atomic_or(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
69main_body:
70  %orig = call i32 @llvm.amdgcn.buffer.atomic.or(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
71  %r = bitcast i32 %orig to float
72  ret float %r
73}
74
75;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.xor(
76define float @buffer_atomic_xor(<4 x i32> inreg %rsrc, i32 inreg %data) #0 {
77main_body:
78  %orig = call i32 @llvm.amdgcn.buffer.atomic.xor(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
79  %r = bitcast i32 %orig to float
80  ret float %r
81}
82
83;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(
84define float @buffer_atomic_cmpswap(<4 x i32> inreg %rsrc, i32 inreg %data, i32 inreg %cmp) #0 {
85main_body:
86  %orig = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %data, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
87  %r = bitcast i32 %orig to float
88  ret float %r
89}
90
91declare i32 @llvm.amdgcn.buffer.atomic.swap(i32, <4 x i32>, i32, i32, i1) #0
92declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1) #0
93declare i32 @llvm.amdgcn.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i1) #0
94declare i32 @llvm.amdgcn.buffer.atomic.smin(i32, <4 x i32>, i32, i32, i1) #0
95declare i32 @llvm.amdgcn.buffer.atomic.umin(i32, <4 x i32>, i32, i32, i1) #0
96declare i32 @llvm.amdgcn.buffer.atomic.smax(i32, <4 x i32>, i32, i32, i1) #0
97declare i32 @llvm.amdgcn.buffer.atomic.umax(i32, <4 x i32>, i32, i32, i1) #0
98declare i32 @llvm.amdgcn.buffer.atomic.and(i32, <4 x i32>, i32, i32, i1) #0
99declare i32 @llvm.amdgcn.buffer.atomic.or(i32, <4 x i32>, i32, i32, i1) #0
100declare i32 @llvm.amdgcn.buffer.atomic.xor(i32, <4 x i32>, i32, i32, i1) #0
101declare i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32, i32, <4 x i32>, i32, i32, i1) #0
102
103attributes #0 = { nounwind }
104