• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
4; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
5
6; FUNC-LABEL: {{^}}s_uaddo_i64_zext:
7; GCN: s_add_u32
8; GCN: s_addc_u32
9; GCN: v_cmp_lt_u64_e32 vcc
10
11; EG: ADDC_UINT
12; EG: ADDC_UINT
13define amdgpu_kernel void @s_uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
14  %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
15  %val = extractvalue { i64, i1 } %uadd, 0
16  %carry = extractvalue { i64, i1 } %uadd, 1
17  %ext = zext i1 %carry to i64
18  %add2 = add i64 %val, %ext
19  store i64 %add2, i64 addrspace(1)* %out, align 8
20  ret void
21}
22
23; FIXME: Could do scalar
24
25; FUNC-LABEL: {{^}}s_uaddo_i32:
26; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
27; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
28; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
29
30; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
31
32; EG: ADDC_UINT
33; EG: ADD_INT
34define amdgpu_kernel void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
35  %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
36  %val = extractvalue { i32, i1 } %uadd, 0
37  %carry = extractvalue { i32, i1 } %uadd, 1
38  store i32 %val, i32 addrspace(1)* %out, align 4
39  store i1 %carry, i1 addrspace(1)* %carryout
40  ret void
41}
42
43; FUNC-LABEL: {{^}}v_uaddo_i32:
44; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
45; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
46; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
47
48; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
49
50; EG: ADDC_UINT
51; EG: ADD_INT
52define amdgpu_kernel void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
53  %tid = call i32 @llvm.amdgcn.workitem.id.x()
54  %tid.ext = sext i32 %tid to i64
55  %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
56  %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
57  %a = load i32, i32 addrspace(1)* %a.gep, align 4
58  %b = load i32, i32 addrspace(1)* %b.gep, align 4
59  %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
60  %val = extractvalue { i32, i1 } %uadd, 0
61  %carry = extractvalue { i32, i1 } %uadd, 1
62  store i32 %val, i32 addrspace(1)* %out, align 4
63  store i1 %carry, i1 addrspace(1)* %carryout
64  ret void
65}
66
67; FUNC-LABEL: {{^}}v_uaddo_i32_novcc:
68; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
69; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
70; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
71
72; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
73
74; EG: ADDC_UINT
75; EG: ADD_INT
76define amdgpu_kernel void @v_uaddo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
77  %tid = call i32 @llvm.amdgcn.workitem.id.x()
78  %tid.ext = sext i32 %tid to i64
79  %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
80  %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
81  %a = load i32, i32 addrspace(1)* %a.gep, align 4
82  %b = load i32, i32 addrspace(1)* %b.gep, align 4
83  %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
84  %val = extractvalue { i32, i1 } %uadd, 0
85  %carry = extractvalue { i32, i1 } %uadd, 1
86  store volatile i32 %val, i32 addrspace(1)* %out, align 4
87  call void asm sideeffect "", "~{VCC}"() #0
88  store volatile i1 %carry, i1 addrspace(1)* %carryout
89  ret void
90}
91
92; FUNC-LABEL: {{^}}s_uaddo_i64:
93; GCN: s_add_u32
94; GCN: s_addc_u32
95
96; EG: ADDC_UINT
97; EG: ADD_INT
98define amdgpu_kernel void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 {
99  %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
100  %val = extractvalue { i64, i1 } %uadd, 0
101  %carry = extractvalue { i64, i1 } %uadd, 1
102  store i64 %val, i64 addrspace(1)* %out, align 8
103  store i1 %carry, i1 addrspace(1)* %carryout
104  ret void
105}
106
107; FUNC-LABEL: {{^}}v_uaddo_i64:
108; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
109; SI: v_addc_u32_e32 v{{[0-9]+}}, vcc,
110
111; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
112; VI: v_addc_u32_e32 v{{[0-9]+}}, vcc,
113
114; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
115; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc,
116
117; EG: ADDC_UINT
118; EG: ADD_INT
119define amdgpu_kernel void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 {
120  %tid = call i32 @llvm.amdgcn.workitem.id.x()
121  %tid.ext = sext i32 %tid to i64
122  %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr
123  %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr
124  %a = load i64, i64 addrspace(1)* %a.gep
125  %b = load i64, i64 addrspace(1)* %b.gep
126  %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
127  %val = extractvalue { i64, i1 } %uadd, 0
128  %carry = extractvalue { i64, i1 } %uadd, 1
129  store i64 %val, i64 addrspace(1)* %out
130  store i1 %carry, i1 addrspace(1)* %carryout
131  ret void
132}
133
134; FUNC-LABEL: {{^}}v_uaddo_i16:
135; VI: v_add_u16_e32
136; VI: v_cmp_lt_u16_e32
137
138; GFX9: v_add_u16_e32
139; GFX9: v_cmp_lt_u16_e32
140define amdgpu_kernel void @v_uaddo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
141  %tid = call i32 @llvm.amdgcn.workitem.id.x()
142  %tid.ext = sext i32 %tid to i64
143  %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr
144  %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr
145  %a = load i16, i16 addrspace(1)* %a.gep
146  %b = load i16, i16 addrspace(1)* %b.gep
147  %uadd = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
148  %val = extractvalue { i16, i1 } %uadd, 0
149  %carry = extractvalue { i16, i1 } %uadd, 1
150  store i16 %val, i16 addrspace(1)* %out
151  store i1 %carry, i1 addrspace(1)* %carryout
152  ret void
153}
154
155declare i32 @llvm.amdgcn.workitem.id.x() #1
156declare { i16, i1 } @llvm.uadd.with.overflow.i16(i16, i16) #1
157declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1
158declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) #1
159
160attributes #0 = { nounwind }
161attributes #1 = { nounwind readnone }
162