• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVGPR -check-prefix=GCN %s
2; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mcpu=tonga  -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVGPR -check-prefix=GCN %s
3; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVMEM -check-prefix=GCN %s
4; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=TOVMEM -check-prefix=GCN %s
5
6; XXX - Why does it like to use vcc?
7
8; GCN-LABEL: {{^}}spill_m0:
9
10; GCN: #ASMSTART
11; GCN-NEXT: s_mov_b32 m0, 0
12; GCN-NEXT: #ASMEND
13; GCN-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
14
15; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], [[M0_LANE:[0-9]+]]
16
17; TOVMEM: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0
18; TOVMEM: s_mov_b32 [[COPY_EXEC_LO:s[0-9]+]], exec_lo
19; TOVMEM: s_mov_b32 exec_lo, 1
20; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 ; 4-byte Folded Spill
21; TOVMEM: s_mov_b32 exec_lo, [[COPY_EXEC_LO]]
22
23; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
24
25; GCN: [[ENDIF]]:
26; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], [[M0_LANE]]
27; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
28
29; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 ; 4-byte Folded Reload
30; TOVMEM: s_waitcnt vmcnt(0)
31; TOVMEM: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]], 0
32; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
33
34; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
35define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
36entry:
37  %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
38  %cmp0 = icmp eq i32 %cond, 0
39  br i1 %cmp0, label %if, label %endif
40
41if:
42  call void asm sideeffect "v_nop", ""() #0
43  br label %endif
44
45endif:
46  %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
47  store i32 %foo, i32 addrspace(1)* %out
48  ret void
49}
50
51@lds = internal addrspace(3) global [64 x float] undef
52
53; m0 is killed, so it isn't necessary during the entry block spill to preserve it
54; GCN-LABEL: {{^}}spill_kill_m0_lds:
55
56; GCN-NOT: v_readlane_b32 m0
57; GCN-NOT: s_buffer_store_dword m0
58; GCN-NOT: s_buffer_load_dword m0
59define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %m0) #0 {
60main_body:
61  %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
62  %cmp = fcmp ueq float 0.000000e+00, %tmp
63  br i1 %cmp, label %if, label %else
64
65if:                                               ; preds = %main_body
66  %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
67  %lds_data_ = load float, float addrspace(3)* %lds_ptr
68  %lds_data = call float @llvm.amdgcn.wqm.f32(float %lds_data_)
69  br label %endif
70
71else:                                             ; preds = %main_body
72  %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
73  br label %endif
74
75endif:                                            ; preds = %else, %if
76  %export = phi float [ %lds_data, %if ], [ %interp, %else ]
77  %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
78  call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
79  ret void
80}
81
82; Force save and restore of m0 during SMEM spill
83; GCN-LABEL: {{^}}m0_unavailable_spill:
84; GCN: s_load_dword [[REG0:s[0-9]+]], s[0:1], {{0x[0-9]+}}
85
86; GCN: ; def m0, 1
87
88; GCN: s_mov_b32 m0, [[REG0]]
89; GCN: v_interp_mov_f32
90
91; GCN: ; clobber m0
92
93; TOSMEM: s_mov_b32 s2, m0
94; TOSMEM: s_add_u32 m0, s3, 0x100
95; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
96; TOSMEM: s_mov_b32 m0, s2
97
98; TOSMEM: s_mov_b64 exec,
99; TOSMEM: s_cbranch_execz
100; TOSMEM: s_branch
101
102; TOSMEM: BB{{[0-9]+_[0-9]+}}:
103; TOSMEM: s_add_u32 m0, s3, 0x100
104; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
105
106; GCN-NOT: v_readlane_b32 m0
107; GCN-NOT: s_buffer_store_dword m0
108; GCN-NOT: s_buffer_load_dword m0
109define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
110main_body:
111  %m0 = call i32 asm sideeffect "; def $0, 1", "={m0}"() #0
112  %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
113  call void asm sideeffect "; clobber $0", "~{m0}"() #0
114  %cmp = fcmp ueq float 0.000000e+00, %tmp
115   br i1 %cmp, label %if, label %else
116
117if:                                               ; preds = %main_body
118  store volatile i32 8, i32 addrspace(1)* undef
119  br label %endif
120
121else:                                             ; preds = %main_body
122  store volatile i32 11, i32 addrspace(1)* undef
123  br label %endif
124
125endif:
126  ret void
127}
128
129; GCN-LABEL: {{^}}restore_m0_lds:
130; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
131; FIXME-TOSMEM-NOT: m0
132; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
133; TOSMEM: s_buffer_store_dword s1, s[88:91], m0 ; 4-byte Folded Spill
134; FIXME-TOSMEM-NOT: m0
135; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
136; TOSMEM: s_add_u32 m0, s3, {{0x[0-9]+}}
137; TOSMEM: s_waitcnt lgkmcnt(0)
138; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
139; FIXME-TOSMEM-NOT: m0
140; TOSMEM: s_cmp_eq_u32
141; TOSMEM: s_cbranch_scc1
142
143; TOSMEM: s_mov_b32 m0, -1
144
145; TOSMEM: s_mov_b32 s2, m0
146; TOSMEM: s_add_u32 m0, s3, 0x200
147; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
148; TOSMEM: s_mov_b32 m0, s2
149; TOSMEM: s_waitcnt lgkmcnt(0)
150
151; TOSMEM: ds_write_b64
152
153; FIXME-TOSMEM-NOT: m0
154; TOSMEM: s_add_u32 m0, s3, 0x100
155; TOSMEM: s_buffer_load_dword s2, s[88:91], m0 ; 4-byte Folded Reload
156; FIXME-TOSMEM-NOT: m0
157
158; TOSMEM: s_mov_b32 [[REG1:s[0-9]+]], m0
159; TOSMEM: s_add_u32 m0, s3, 0x100
160; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
161; TOSMEM: s_mov_b32 m0, [[REG1]]
162; TOSMEM: s_mov_b32 m0, -1
163
164; TOSMEM: s_waitcnt lgkmcnt(0)
165; TOSMEM-NOT: m0
166; TOSMEM: s_mov_b32 m0, s2
167; TOSMEM: ; use m0
168
169; TOSMEM: s_dcache_wb
170; TOSMEM: s_endpgm
171define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
172  %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
173  %sval = load volatile i64, i64 addrspace(4)* undef
174  %cmp = icmp eq i32 %arg, 0
175  br i1 %cmp, label %ret, label %bb
176
177bb:
178  store volatile i64 %sval, i64 addrspace(3)* undef
179  call void asm sideeffect "; use $0", "{m0}"(i32 %m0) #0
180  br label %ret
181
182ret:
183  ret void
184}
185
186declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
187declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
188declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
189declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
190declare float @llvm.amdgcn.wqm.f32(float) #1
191
192attributes #0 = { nounwind }
193attributes #1 = { nounwind readnone }
194