1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "ac_shader_util.h"
8 #include "ac_gpu_info.h"
9
10 #include "sid.h"
11 #include "util/u_math.h"
12
13 #include <assert.h>
14 #include <stdlib.h>
15 #include <string.h>
16
ac_get_spi_shader_z_format(bool writes_z,bool writes_stencil,bool writes_samplemask,bool writes_mrt0_alpha)17 unsigned ac_get_spi_shader_z_format(bool writes_z, bool writes_stencil, bool writes_samplemask,
18 bool writes_mrt0_alpha)
19 {
20 /* RGBA = (Z, stencil, samplemask, mrt0_alpha).
21 * Both stencil and sample mask need only 16 bits.
22 */
23 if (writes_mrt0_alpha) {
24 if (writes_stencil || writes_samplemask)
25 return V_028710_SPI_SHADER_32_ABGR;
26 else
27 return V_028710_SPI_SHADER_32_AR;
28 }
29
30 if (writes_samplemask) {
31 if (writes_z)
32 return V_028710_SPI_SHADER_32_ABGR;
33 else
34 return V_028710_SPI_SHADER_UINT16_ABGR;
35 }
36
37 if (writes_stencil)
38 return V_028710_SPI_SHADER_32_GR;
39 else if (writes_z)
40 return V_028710_SPI_SHADER_32_R;
41 else
42 return V_028710_SPI_SHADER_ZERO;
43 }
44
ac_get_cb_shader_mask(unsigned spi_shader_col_format)45 unsigned ac_get_cb_shader_mask(unsigned spi_shader_col_format)
46 {
47 unsigned i, cb_shader_mask = 0;
48
49 /* If the format is ~0, it means we want a full mask. */
50 if (spi_shader_col_format == ~0)
51 return ~0;
52
53 for (i = 0; i < 8; i++) {
54 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
55 case V_028714_SPI_SHADER_ZERO:
56 break;
57 case V_028714_SPI_SHADER_32_R:
58 cb_shader_mask |= 0x1 << (i * 4);
59 break;
60 case V_028714_SPI_SHADER_32_GR:
61 cb_shader_mask |= 0x3 << (i * 4);
62 break;
63 case V_028714_SPI_SHADER_32_AR:
64 cb_shader_mask |= 0x9u << (i * 4);
65 break;
66 case V_028714_SPI_SHADER_FP16_ABGR:
67 case V_028714_SPI_SHADER_UNORM16_ABGR:
68 case V_028714_SPI_SHADER_SNORM16_ABGR:
69 case V_028714_SPI_SHADER_UINT16_ABGR:
70 case V_028714_SPI_SHADER_SINT16_ABGR:
71 case V_028714_SPI_SHADER_32_ABGR:
72 cb_shader_mask |= 0xfu << (i * 4);
73 break;
74 default:
75 assert(0);
76 }
77 }
78 return cb_shader_mask;
79 }
80
81 /**
82 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
83 * geometry shader.
84 */
ac_vgt_gs_mode(unsigned gs_max_vert_out,enum amd_gfx_level gfx_level)85 uint32_t ac_vgt_gs_mode(unsigned gs_max_vert_out, enum amd_gfx_level gfx_level)
86 {
87 unsigned cut_mode;
88
89 assert (gfx_level < GFX11);
90
91 if (gs_max_vert_out <= 128) {
92 cut_mode = V_028A40_GS_CUT_128;
93 } else if (gs_max_vert_out <= 256) {
94 cut_mode = V_028A40_GS_CUT_256;
95 } else if (gs_max_vert_out <= 512) {
96 cut_mode = V_028A40_GS_CUT_512;
97 } else {
98 assert(gs_max_vert_out <= 1024);
99 cut_mode = V_028A40_GS_CUT_1024;
100 }
101
102 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) | S_028A40_CUT_MODE(cut_mode) |
103 S_028A40_ES_WRITE_OPTIMIZE(gfx_level <= GFX8) | S_028A40_GS_WRITE_OPTIMIZE(1) |
104 S_028A40_ONCHIP(gfx_level >= GFX9 ? 1 : 0);
105 }
106
107 /// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format
108 /// value for LLVM8+ tbuffer intrinsics.
ac_get_tbuffer_format(enum amd_gfx_level gfx_level,unsigned dfmt,unsigned nfmt)109 unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsigned nfmt)
110 {
111 // Some games try to access vertex buffers without a valid format.
112 // This is a game bug, but we should still handle it gracefully.
113 if (dfmt == V_008F0C_GFX10_FORMAT_INVALID)
114 return V_008F0C_GFX10_FORMAT_INVALID;
115
116 if (gfx_level >= GFX11) {
117 switch (dfmt) {
118 default:
119 unreachable("bad dfmt");
120 case V_008F0C_BUF_DATA_FORMAT_INVALID:
121 return V_008F0C_GFX11_FORMAT_INVALID;
122
123 case V_008F0C_BUF_DATA_FORMAT_8:
124 switch (nfmt) {
125 case V_008F0C_BUF_NUM_FORMAT_UNORM:
126 return V_008F0C_GFX11_FORMAT_8_UNORM;
127 case V_008F0C_BUF_NUM_FORMAT_SNORM:
128 return V_008F0C_GFX11_FORMAT_8_SNORM;
129 case V_008F0C_BUF_NUM_FORMAT_USCALED:
130 return V_008F0C_GFX11_FORMAT_8_USCALED;
131 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
132 return V_008F0C_GFX11_FORMAT_8_SSCALED;
133 default:
134 unreachable("bad nfmt");
135 case V_008F0C_BUF_NUM_FORMAT_UINT:
136 return V_008F0C_GFX11_FORMAT_8_UINT;
137 case V_008F0C_BUF_NUM_FORMAT_SINT:
138 return V_008F0C_GFX11_FORMAT_8_SINT;
139 }
140
141 case V_008F0C_BUF_DATA_FORMAT_8_8:
142 switch (nfmt) {
143 case V_008F0C_BUF_NUM_FORMAT_UNORM:
144 return V_008F0C_GFX11_FORMAT_8_8_UNORM;
145 case V_008F0C_BUF_NUM_FORMAT_SNORM:
146 return V_008F0C_GFX11_FORMAT_8_8_SNORM;
147 case V_008F0C_BUF_NUM_FORMAT_USCALED:
148 return V_008F0C_GFX11_FORMAT_8_8_USCALED;
149 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
150 return V_008F0C_GFX11_FORMAT_8_8_SSCALED;
151 default:
152 unreachable("bad nfmt");
153 case V_008F0C_BUF_NUM_FORMAT_UINT:
154 return V_008F0C_GFX11_FORMAT_8_8_UINT;
155 case V_008F0C_BUF_NUM_FORMAT_SINT:
156 return V_008F0C_GFX11_FORMAT_8_8_SINT;
157 }
158
159 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
160 switch (nfmt) {
161 case V_008F0C_BUF_NUM_FORMAT_UNORM:
162 return V_008F0C_GFX11_FORMAT_8_8_8_8_UNORM;
163 case V_008F0C_BUF_NUM_FORMAT_SNORM:
164 return V_008F0C_GFX11_FORMAT_8_8_8_8_SNORM;
165 case V_008F0C_BUF_NUM_FORMAT_USCALED:
166 return V_008F0C_GFX11_FORMAT_8_8_8_8_USCALED;
167 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
168 return V_008F0C_GFX11_FORMAT_8_8_8_8_SSCALED;
169 default:
170 unreachable("bad nfmt");
171 case V_008F0C_BUF_NUM_FORMAT_UINT:
172 return V_008F0C_GFX11_FORMAT_8_8_8_8_UINT;
173 case V_008F0C_BUF_NUM_FORMAT_SINT:
174 return V_008F0C_GFX11_FORMAT_8_8_8_8_SINT;
175 }
176
177 case V_008F0C_BUF_DATA_FORMAT_16:
178 switch (nfmt) {
179 case V_008F0C_BUF_NUM_FORMAT_UNORM:
180 return V_008F0C_GFX11_FORMAT_16_UNORM;
181 case V_008F0C_BUF_NUM_FORMAT_SNORM:
182 return V_008F0C_GFX11_FORMAT_16_SNORM;
183 case V_008F0C_BUF_NUM_FORMAT_USCALED:
184 return V_008F0C_GFX11_FORMAT_16_USCALED;
185 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
186 return V_008F0C_GFX11_FORMAT_16_SSCALED;
187 default:
188 unreachable("bad nfmt");
189 case V_008F0C_BUF_NUM_FORMAT_UINT:
190 return V_008F0C_GFX11_FORMAT_16_UINT;
191 case V_008F0C_BUF_NUM_FORMAT_SINT:
192 return V_008F0C_GFX11_FORMAT_16_SINT;
193 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
194 return V_008F0C_GFX11_FORMAT_16_FLOAT;
195 }
196
197 case V_008F0C_BUF_DATA_FORMAT_16_16:
198 switch (nfmt) {
199 case V_008F0C_BUF_NUM_FORMAT_UNORM:
200 return V_008F0C_GFX11_FORMAT_16_16_UNORM;
201 case V_008F0C_BUF_NUM_FORMAT_SNORM:
202 return V_008F0C_GFX11_FORMAT_16_16_SNORM;
203 case V_008F0C_BUF_NUM_FORMAT_USCALED:
204 return V_008F0C_GFX11_FORMAT_16_16_USCALED;
205 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
206 return V_008F0C_GFX11_FORMAT_16_16_SSCALED;
207 default:
208 unreachable("bad nfmt");
209 case V_008F0C_BUF_NUM_FORMAT_UINT:
210 return V_008F0C_GFX11_FORMAT_16_16_UINT;
211 case V_008F0C_BUF_NUM_FORMAT_SINT:
212 return V_008F0C_GFX11_FORMAT_16_16_SINT;
213 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
214 return V_008F0C_GFX11_FORMAT_16_16_FLOAT;
215 }
216
217 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
218 switch (nfmt) {
219 case V_008F0C_BUF_NUM_FORMAT_UNORM:
220 return V_008F0C_GFX11_FORMAT_16_16_16_16_UNORM;
221 case V_008F0C_BUF_NUM_FORMAT_SNORM:
222 return V_008F0C_GFX11_FORMAT_16_16_16_16_SNORM;
223 case V_008F0C_BUF_NUM_FORMAT_USCALED:
224 return V_008F0C_GFX11_FORMAT_16_16_16_16_USCALED;
225 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
226 return V_008F0C_GFX11_FORMAT_16_16_16_16_SSCALED;
227 default:
228 unreachable("bad nfmt");
229 case V_008F0C_BUF_NUM_FORMAT_UINT:
230 return V_008F0C_GFX11_FORMAT_16_16_16_16_UINT;
231 case V_008F0C_BUF_NUM_FORMAT_SINT:
232 return V_008F0C_GFX11_FORMAT_16_16_16_16_SINT;
233 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
234 return V_008F0C_GFX11_FORMAT_16_16_16_16_FLOAT;
235 }
236
237 case V_008F0C_BUF_DATA_FORMAT_32:
238 switch (nfmt) {
239 default:
240 unreachable("bad nfmt");
241 case V_008F0C_BUF_NUM_FORMAT_UINT:
242 return V_008F0C_GFX11_FORMAT_32_UINT;
243 case V_008F0C_BUF_NUM_FORMAT_SINT:
244 return V_008F0C_GFX11_FORMAT_32_SINT;
245 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
246 return V_008F0C_GFX11_FORMAT_32_FLOAT;
247 }
248
249 case V_008F0C_BUF_DATA_FORMAT_32_32:
250 switch (nfmt) {
251 default:
252 unreachable("bad nfmt");
253 case V_008F0C_BUF_NUM_FORMAT_UINT:
254 return V_008F0C_GFX11_FORMAT_32_32_UINT;
255 case V_008F0C_BUF_NUM_FORMAT_SINT:
256 return V_008F0C_GFX11_FORMAT_32_32_SINT;
257 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
258 return V_008F0C_GFX11_FORMAT_32_32_FLOAT;
259 }
260
261 case V_008F0C_BUF_DATA_FORMAT_32_32_32:
262 switch (nfmt) {
263 default:
264 unreachable("bad nfmt");
265 case V_008F0C_BUF_NUM_FORMAT_UINT:
266 return V_008F0C_GFX11_FORMAT_32_32_32_UINT;
267 case V_008F0C_BUF_NUM_FORMAT_SINT:
268 return V_008F0C_GFX11_FORMAT_32_32_32_SINT;
269 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
270 return V_008F0C_GFX11_FORMAT_32_32_32_FLOAT;
271 }
272
273 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
274 switch (nfmt) {
275 default:
276 unreachable("bad nfmt");
277 case V_008F0C_BUF_NUM_FORMAT_UINT:
278 return V_008F0C_GFX11_FORMAT_32_32_32_32_UINT;
279 case V_008F0C_BUF_NUM_FORMAT_SINT:
280 return V_008F0C_GFX11_FORMAT_32_32_32_32_SINT;
281 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
282 return V_008F0C_GFX11_FORMAT_32_32_32_32_FLOAT;
283 }
284
285 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
286 switch (nfmt) {
287 case V_008F0C_BUF_NUM_FORMAT_UNORM:
288 return V_008F0C_GFX11_FORMAT_2_10_10_10_UNORM;
289 case V_008F0C_BUF_NUM_FORMAT_SNORM:
290 return V_008F0C_GFX11_FORMAT_2_10_10_10_SNORM;
291 case V_008F0C_BUF_NUM_FORMAT_USCALED:
292 return V_008F0C_GFX11_FORMAT_2_10_10_10_USCALED;
293 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
294 return V_008F0C_GFX11_FORMAT_2_10_10_10_SSCALED;
295 default:
296 unreachable("bad nfmt");
297 case V_008F0C_BUF_NUM_FORMAT_UINT:
298 return V_008F0C_GFX11_FORMAT_2_10_10_10_UINT;
299 case V_008F0C_BUF_NUM_FORMAT_SINT:
300 return V_008F0C_GFX11_FORMAT_2_10_10_10_SINT;
301 }
302
303 case V_008F0C_BUF_DATA_FORMAT_10_11_11:
304 switch (nfmt) {
305 default:
306 unreachable("bad nfmt");
307 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
308 return V_008F0C_GFX11_FORMAT_10_11_11_FLOAT;
309 }
310 }
311 } else if (gfx_level >= GFX10) {
312 unsigned format;
313 switch (dfmt) {
314 default:
315 unreachable("bad dfmt");
316 case V_008F0C_BUF_DATA_FORMAT_INVALID:
317 format = V_008F0C_GFX10_FORMAT_INVALID;
318 break;
319 case V_008F0C_BUF_DATA_FORMAT_8:
320 format = V_008F0C_GFX10_FORMAT_8_UINT;
321 break;
322 case V_008F0C_BUF_DATA_FORMAT_8_8:
323 format = V_008F0C_GFX10_FORMAT_8_8_UINT;
324 break;
325 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
326 format = V_008F0C_GFX10_FORMAT_8_8_8_8_UINT;
327 break;
328 case V_008F0C_BUF_DATA_FORMAT_16:
329 format = V_008F0C_GFX10_FORMAT_16_UINT;
330 break;
331 case V_008F0C_BUF_DATA_FORMAT_16_16:
332 format = V_008F0C_GFX10_FORMAT_16_16_UINT;
333 break;
334 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
335 format = V_008F0C_GFX10_FORMAT_16_16_16_16_UINT;
336 break;
337 case V_008F0C_BUF_DATA_FORMAT_32:
338 format = V_008F0C_GFX10_FORMAT_32_UINT;
339 break;
340 case V_008F0C_BUF_DATA_FORMAT_32_32:
341 format = V_008F0C_GFX10_FORMAT_32_32_UINT;
342 break;
343 case V_008F0C_BUF_DATA_FORMAT_32_32_32:
344 format = V_008F0C_GFX10_FORMAT_32_32_32_UINT;
345 break;
346 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
347 format = V_008F0C_GFX10_FORMAT_32_32_32_32_UINT;
348 break;
349 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
350 format = V_008F0C_GFX10_FORMAT_2_10_10_10_UINT;
351 break;
352 case V_008F0C_BUF_DATA_FORMAT_10_11_11:
353 format = V_008F0C_GFX10_FORMAT_10_11_11_UINT;
354 break;
355 }
356
357 // Use the regularity properties of the combined format enum.
358 //
359 // Note: float is incompatible with 8-bit data formats,
360 // [us]{norm,scaled} are incompatible with 32-bit data formats.
361 // [us]scaled are not writable.
362 switch (nfmt) {
363 case V_008F0C_BUF_NUM_FORMAT_UNORM:
364 format -= 4;
365 break;
366 case V_008F0C_BUF_NUM_FORMAT_SNORM:
367 format -= 3;
368 break;
369 case V_008F0C_BUF_NUM_FORMAT_USCALED:
370 format -= 2;
371 break;
372 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
373 format -= 1;
374 break;
375 default:
376 unreachable("bad nfmt");
377 case V_008F0C_BUF_NUM_FORMAT_UINT:
378 break;
379 case V_008F0C_BUF_NUM_FORMAT_SINT:
380 format += 1;
381 break;
382 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
383 format += 2;
384 break;
385 }
386
387 return format;
388 } else {
389 return dfmt | (nfmt << 4);
390 }
391 }
392
393 #define DUP2(v) v, v
394 #define DUP3(v) v, v, v
395 #define DUP4(v) v, v, v, v
396
397 #define FMT(dfmt, nfmt) 0xb, {HW_FMT(dfmt, nfmt), HW_FMT(dfmt##_##dfmt, nfmt), HW_FMT_INVALID, HW_FMT(dfmt##_##dfmt##_##dfmt##_##dfmt, nfmt)}
398 #define FMT_32(nfmt) 0xf, {HW_FMT(32, nfmt), HW_FMT(32_32, nfmt), HW_FMT(32_32_32, nfmt), HW_FMT(32_32_32_32, nfmt)}
399 #define FMT_64(nfmt) 0x3, {HW_FMT(32_32, nfmt), HW_FMT(32_32_32_32, nfmt), DUP2(HW_FMT_INVALID)}
400 #define FMTP(dfmt, nfmt) 0xf, {DUP4(HW_FMT(dfmt, nfmt))}
401
402 #define DST_SEL(x, y, z, w) \
403 (S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_##x) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_##y) | \
404 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_##z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_##w))
405
406 #define LIST_NFMT_8_16(nfmt) \
407 [(int)PIPE_FORMAT_R8_##nfmt] = {DST_SEL(X,0,0,1), 1, 1, 1, FMT(8, nfmt)}, \
408 [(int)PIPE_FORMAT_R8G8_##nfmt] = {DST_SEL(X,Y,0,1), 2, 2, 1, FMT(8, nfmt)}, \
409 [(int)PIPE_FORMAT_R8G8B8_##nfmt] = {DST_SEL(X,Y,Z,1), 3, 3, 1, FMT(8, nfmt)}, \
410 [(int)PIPE_FORMAT_B8G8R8_##nfmt] = {DST_SEL(Z,Y,X,1), 3, 3, 1, FMT(8, nfmt)}, \
411 [(int)PIPE_FORMAT_R8G8B8A8_##nfmt] = {DST_SEL(X,Y,Z,W), 4, 4, 1, FMT(8, nfmt)}, \
412 [(int)PIPE_FORMAT_B8G8R8A8_##nfmt] = {DST_SEL(Z,Y,X,W), 4, 4, 1, FMT(8, nfmt)}, \
413 [(int)PIPE_FORMAT_R16_##nfmt] = {DST_SEL(X,0,0,1), 2, 1, 2, FMT(16, nfmt)}, \
414 [(int)PIPE_FORMAT_R16G16_##nfmt] = {DST_SEL(X,Y,0,1), 4, 2, 2, FMT(16, nfmt)}, \
415 [(int)PIPE_FORMAT_R16G16B16_##nfmt] = {DST_SEL(X,Y,Z,1), 6, 3, 2, FMT(16, nfmt)}, \
416 [(int)PIPE_FORMAT_R16G16B16A16_##nfmt] = {DST_SEL(X,Y,Z,W), 8, 4, 2, FMT(16, nfmt)},
417
418 #define LIST_NFMT_32_64(nfmt) \
419 [(int)PIPE_FORMAT_R32_##nfmt] = {DST_SEL(X,0,0,1), 4, 1, 4, FMT_32(nfmt)}, \
420 [(int)PIPE_FORMAT_R32G32_##nfmt] = {DST_SEL(X,Y,0,1), 8, 2, 4, FMT_32(nfmt)}, \
421 [(int)PIPE_FORMAT_R32G32B32_##nfmt] = {DST_SEL(X,Y,Z,1), 12, 3, 4, FMT_32(nfmt)}, \
422 [(int)PIPE_FORMAT_R32G32B32A32_##nfmt] = {DST_SEL(X,Y,Z,W), 16, 4, 4, FMT_32(nfmt)}, \
423 [(int)PIPE_FORMAT_R64_##nfmt] = {DST_SEL(X,Y,0,0), 8, 1, 8, FMT_64(nfmt)}, \
424 [(int)PIPE_FORMAT_R64G64_##nfmt] = {DST_SEL(X,Y,Z,W), 16, 2, 8, FMT_64(nfmt)}, \
425 [(int)PIPE_FORMAT_R64G64B64_##nfmt] = {DST_SEL(X,Y,Z,W), 24, 3, 8, FMT_64(nfmt)}, \
426 [(int)PIPE_FORMAT_R64G64B64A64_##nfmt] = {DST_SEL(X,Y,Z,W), 32, 4, 8, FMT_64(nfmt)}, \
427
428 #define VB_FORMATS \
429 [(int)PIPE_FORMAT_NONE] = {DST_SEL(0,0,0,1), 0, 4, 0, 0xf, {DUP4(HW_FMT_INVALID)}}, \
430 LIST_NFMT_8_16(UNORM) \
431 LIST_NFMT_8_16(SNORM) \
432 LIST_NFMT_8_16(USCALED) \
433 LIST_NFMT_8_16(SSCALED) \
434 LIST_NFMT_8_16(UINT) \
435 LIST_NFMT_8_16(SINT) \
436 LIST_NFMT_32_64(UINT) \
437 LIST_NFMT_32_64(SINT) \
438 LIST_NFMT_32_64(FLOAT) \
439 [(int)PIPE_FORMAT_R16_FLOAT] = {DST_SEL(X,0,0,1), 2, 1, 2, FMT(16, FLOAT)}, \
440 [(int)PIPE_FORMAT_R16G16_FLOAT] = {DST_SEL(X,Y,0,1), 4, 2, 2, FMT(16, FLOAT)}, \
441 [(int)PIPE_FORMAT_R16G16B16_FLOAT] = {DST_SEL(X,Y,Z,1), 6, 3, 2, FMT(16, FLOAT)}, \
442 [(int)PIPE_FORMAT_R16G16B16A16_FLOAT] = {DST_SEL(X,Y,Z,W), 8, 4, 2, FMT(16, FLOAT)}, \
443 [(int)PIPE_FORMAT_B10G10R10A2_UNORM] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, UNORM)}, \
444 [(int)PIPE_FORMAT_B10G10R10A2_SNORM] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, SNORM), \
445 AA(AC_ALPHA_ADJUST_SNORM)}, \
446 [(int)PIPE_FORMAT_B10G10R10A2_USCALED] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, USCALED)}, \
447 [(int)PIPE_FORMAT_B10G10R10A2_SSCALED] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, SSCALED), \
448 AA(AC_ALPHA_ADJUST_SSCALED)}, \
449 [(int)PIPE_FORMAT_B10G10R10A2_UINT] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, UINT)}, \
450 [(int)PIPE_FORMAT_B10G10R10A2_SINT] = {DST_SEL(Z,Y,X,W), 4, 4, 0, FMTP(2_10_10_10, SINT), \
451 AA(AC_ALPHA_ADJUST_SINT)}, \
452 [(int)PIPE_FORMAT_R10G10B10A2_UNORM] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, UNORM)}, \
453 [(int)PIPE_FORMAT_R10G10B10A2_SNORM] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, SNORM), \
454 AA(AC_ALPHA_ADJUST_SNORM)}, \
455 [(int)PIPE_FORMAT_R10G10B10A2_USCALED] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, USCALED)}, \
456 [(int)PIPE_FORMAT_R10G10B10A2_SSCALED] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, SSCALED), \
457 AA(AC_ALPHA_ADJUST_SSCALED)}, \
458 [(int)PIPE_FORMAT_R10G10B10A2_UINT] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, UINT)}, \
459 [(int)PIPE_FORMAT_R10G10B10A2_SINT] = {DST_SEL(X,Y,Z,W), 4, 4, 0, FMTP(2_10_10_10, SINT), \
460 AA(AC_ALPHA_ADJUST_SINT)}, \
461 [(int)PIPE_FORMAT_R11G11B10_FLOAT] = {DST_SEL(X,Y,Z,1), 4, 3, 0, FMTP(10_11_11, FLOAT)}, \
462
463 #define HW_FMT(dfmt, nfmt) (V_008F0C_BUF_DATA_FORMAT_##dfmt | (V_008F0C_BUF_NUM_FORMAT_##nfmt << 4))
464 #define HW_FMT_INVALID (V_008F0C_BUF_DATA_FORMAT_INVALID | (V_008F0C_BUF_NUM_FORMAT_UNORM << 4))
465 #define AA(v) v
466 static const struct ac_vtx_format_info vb_formats_gfx6_alpha_adjust[] = {VB_FORMATS};
467 #undef AA
468
469 #define AA(v) AC_ALPHA_ADJUST_NONE
470 static const struct ac_vtx_format_info vb_formats_gfx6[] = {VB_FORMATS};
471 #undef HW_FMT_INVALID
472 #undef HW_FMT
473
474 #define HW_FMT(dfmt, nfmt) V_008F0C_GFX10_FORMAT_##dfmt##_##nfmt
475 #define HW_FMT_INVALID V_008F0C_GFX10_FORMAT_INVALID
476 static const struct ac_vtx_format_info vb_formats_gfx10[] = {VB_FORMATS};
477 #undef HW_FMT_INVALID
478 #undef HW_FMT
479
480 #define HW_FMT(dfmt, nfmt) V_008F0C_GFX11_FORMAT_##dfmt##_##nfmt
481 #define HW_FMT_INVALID V_008F0C_GFX11_FORMAT_INVALID
482 static const struct ac_vtx_format_info vb_formats_gfx11[] = {VB_FORMATS};
483
484 const struct ac_vtx_format_info *
ac_get_vtx_format_info_table(enum amd_gfx_level level,enum radeon_family family)485 ac_get_vtx_format_info_table(enum amd_gfx_level level, enum radeon_family family)
486 {
487 if (level >= GFX11)
488 return vb_formats_gfx11;
489 else if (level >= GFX10)
490 return vb_formats_gfx10;
491 bool alpha_adjust = level <= GFX8 && family != CHIP_STONEY;
492 return alpha_adjust ? vb_formats_gfx6_alpha_adjust : vb_formats_gfx6;
493 }
494
495 const struct ac_vtx_format_info *
ac_get_vtx_format_info(enum amd_gfx_level level,enum radeon_family family,enum pipe_format fmt)496 ac_get_vtx_format_info(enum amd_gfx_level level, enum radeon_family family, enum pipe_format fmt)
497 {
498 return &ac_get_vtx_format_info_table(level, family)[fmt];
499 }
500
501 /**
502 * Check whether the specified fetch size is safe to use with MTBUF.
503 *
504 * Split typed vertex buffer loads when necessary to avoid any
505 * alignment issues that trigger memory violations and eventually a GPU
506 * hang. This can happen if the stride (static or dynamic) is unaligned and
507 * also if the VBO offset is aligned to a scalar (eg. stride is 8 and VBO
508 * offset is 2 for R16G16B16A16_SNORM).
509 */
510 static bool
is_fetch_size_safe(const enum amd_gfx_level gfx_level,const struct ac_vtx_format_info * vtx_info,const unsigned offset,const unsigned alignment,const unsigned channels)511 is_fetch_size_safe(const enum amd_gfx_level gfx_level, const struct ac_vtx_format_info* vtx_info,
512 const unsigned offset, const unsigned alignment, const unsigned channels)
513 {
514 if (!(vtx_info->has_hw_format & BITFIELD_BIT(channels - 1)))
515 return false;
516
517 unsigned vertex_byte_size = vtx_info->chan_byte_size * channels;
518 return (gfx_level >= GFX7 && gfx_level <= GFX9) ||
519 (offset % vertex_byte_size == 0 && MAX2(alignment, 1) % vertex_byte_size == 0);
520 }
521
522 /**
523 * Gets the number of channels that can be safely fetched by MTBUF (typed buffer load)
524 * instructions without triggering alignment-related issues.
525 */
526 unsigned
ac_get_safe_fetch_size(const enum amd_gfx_level gfx_level,const struct ac_vtx_format_info * vtx_info,const unsigned offset,const unsigned max_channels,const unsigned alignment,const unsigned num_channels)527 ac_get_safe_fetch_size(const enum amd_gfx_level gfx_level, const struct ac_vtx_format_info* vtx_info,
528 const unsigned offset, const unsigned max_channels, const unsigned alignment,
529 const unsigned num_channels)
530 {
531 /* Packed formats can't be split. */
532 if (!vtx_info->chan_byte_size)
533 return vtx_info->num_channels;
534
535 /* Early exit if the specified number of channels is fine. */
536 if (is_fetch_size_safe(gfx_level, vtx_info, offset, alignment, num_channels))
537 return num_channels;
538
539 /* First, assume that more load instructions are worse and try using a larger data format. */
540 unsigned new_channels = num_channels + 1;
541 while (new_channels <= max_channels &&
542 !is_fetch_size_safe(gfx_level, vtx_info, offset, alignment, new_channels)) {
543 new_channels++;
544 }
545
546 /* Found a feasible load size. */
547 if (new_channels <= max_channels)
548 return new_channels;
549
550 /* Try decreasing load size (at the cost of more load instructions). */
551 new_channels = num_channels;
552 while (new_channels > 1 &&
553 !is_fetch_size_safe(gfx_level, vtx_info, offset, alignment, new_channels)) {
554 new_channels--;
555 }
556
557 return new_channels;
558 }
559
ac_get_sampler_dim(enum amd_gfx_level gfx_level,enum glsl_sampler_dim dim,bool is_array)560 enum ac_image_dim ac_get_sampler_dim(enum amd_gfx_level gfx_level, enum glsl_sampler_dim dim,
561 bool is_array)
562 {
563 switch (dim) {
564 case GLSL_SAMPLER_DIM_1D:
565 if (gfx_level == GFX9)
566 return is_array ? ac_image_2darray : ac_image_2d;
567 return is_array ? ac_image_1darray : ac_image_1d;
568 case GLSL_SAMPLER_DIM_2D:
569 case GLSL_SAMPLER_DIM_RECT:
570 case GLSL_SAMPLER_DIM_EXTERNAL:
571 return is_array ? ac_image_2darray : ac_image_2d;
572 case GLSL_SAMPLER_DIM_3D:
573 return ac_image_3d;
574 case GLSL_SAMPLER_DIM_CUBE:
575 return ac_image_cube;
576 case GLSL_SAMPLER_DIM_MS:
577 return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
578 case GLSL_SAMPLER_DIM_SUBPASS:
579 return ac_image_2darray;
580 case GLSL_SAMPLER_DIM_SUBPASS_MS:
581 return ac_image_2darraymsaa;
582 default:
583 unreachable("bad sampler dim");
584 }
585 }
586
ac_get_image_dim(enum amd_gfx_level gfx_level,enum glsl_sampler_dim sdim,bool is_array)587 enum ac_image_dim ac_get_image_dim(enum amd_gfx_level gfx_level, enum glsl_sampler_dim sdim,
588 bool is_array)
589 {
590 enum ac_image_dim dim = ac_get_sampler_dim(gfx_level, sdim, is_array);
591
592 /* Match the resource type set in the descriptor. */
593 if (dim == ac_image_cube || (gfx_level <= GFX8 && dim == ac_image_3d))
594 dim = ac_image_2darray;
595 else if (sdim == GLSL_SAMPLER_DIM_2D && !is_array && gfx_level == GFX9) {
596 /* When a single layer of a 3D texture is bound, the shader
597 * will refer to a 2D target, but the descriptor has a 3D type.
598 * Since the HW ignores BASE_ARRAY in this case, we need to
599 * send 3 coordinates. This doesn't hurt when the underlying
600 * texture is non-3D.
601 */
602 dim = ac_image_3d;
603 }
604
605 return dim;
606 }
607
ac_get_fs_input_vgpr_cnt(const struct ac_shader_config * config,uint8_t * num_fragcoord_components)608 unsigned ac_get_fs_input_vgpr_cnt(const struct ac_shader_config *config,
609 uint8_t *num_fragcoord_components)
610 {
611 unsigned num_input_vgprs = 0;
612 unsigned fragcoord_components = 0;
613
614 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
615 num_input_vgprs += 2;
616 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
617 num_input_vgprs += 2;
618 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
619 num_input_vgprs += 2;
620 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
621 num_input_vgprs += 3;
622 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
623 num_input_vgprs += 2;
624 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
625 num_input_vgprs += 2;
626 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
627 num_input_vgprs += 2;
628 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
629 num_input_vgprs += 1;
630 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr)) {
631 num_input_vgprs += 1;
632 fragcoord_components++;
633 }
634 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr)) {
635 num_input_vgprs += 1;
636 fragcoord_components++;
637 }
638 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr)) {
639 num_input_vgprs += 1;
640 fragcoord_components++;
641 }
642 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr)) {
643 num_input_vgprs += 1;
644 fragcoord_components++;
645 }
646 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
647 num_input_vgprs += 1;
648 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
649 num_input_vgprs += 1;
650 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
651 num_input_vgprs += 1;
652 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
653 num_input_vgprs += 1;
654
655 if (num_fragcoord_components)
656 *num_fragcoord_components = fragcoord_components;
657
658 return num_input_vgprs;
659 }
660
ac_get_ps_iter_mask(unsigned ps_iter_samples)661 uint16_t ac_get_ps_iter_mask(unsigned ps_iter_samples)
662 {
663 /* The bit pattern matches that used by fixed function fragment
664 * processing.
665 */
666 switch (ps_iter_samples) {
667 case 1: return 0xffff;
668 case 2: return 0x5555;
669 case 4: return 0x1111;
670 case 8: return 0x0101;
671 case 16: return 0x0001;
672 default:
673 unreachable("invalid sample count");
674 }
675 }
676
ac_choose_spi_color_formats(unsigned format,unsigned swap,unsigned ntype,bool is_depth,bool use_rbplus,struct ac_spi_color_formats * formats)677 void ac_choose_spi_color_formats(unsigned format, unsigned swap, unsigned ntype,
678 bool is_depth, bool use_rbplus,
679 struct ac_spi_color_formats *formats)
680 {
681 /* Alpha is needed for alpha-to-coverage.
682 * Blending may be with or without alpha.
683 */
684 unsigned normal = 0; /* most optimal, may not support blending or export alpha */
685 unsigned alpha = 0; /* exports alpha, but may not support blending */
686 unsigned blend = 0; /* supports blending, but may not export alpha */
687 unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
688
689 /* Choose the SPI color formats. These are required values for RB+.
690 * Other chips have multiple choices, though they are not necessarily better.
691 */
692 switch (format) {
693 case V_028C70_COLOR_5_6_5:
694 case V_028C70_COLOR_1_5_5_5:
695 case V_028C70_COLOR_5_5_5_1:
696 case V_028C70_COLOR_4_4_4_4:
697 case V_028C70_COLOR_10_11_11:
698 case V_028C70_COLOR_11_11_10:
699 case V_028C70_COLOR_5_9_9_9:
700 case V_028C70_COLOR_8:
701 case V_028C70_COLOR_8_8:
702 case V_028C70_COLOR_8_8_8_8:
703 case V_028C70_COLOR_10_10_10_2:
704 case V_028C70_COLOR_2_10_10_10:
705 if (ntype == V_028C70_NUMBER_UINT)
706 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
707 else if (ntype == V_028C70_NUMBER_SINT)
708 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
709 else
710 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
711
712 if (!use_rbplus && format == V_028C70_COLOR_8 &&
713 ntype != V_028C70_NUMBER_SRGB && swap == V_028C70_SWAP_STD) /* R */ {
714 /* When RB+ is enabled, R8_UNORM should use FP16_ABGR for 2x
715 * exporting performance. Otherwise, use 32_R to remove useless
716 * instructions needed for 16-bit compressed exports.
717 */
718 blend = normal = V_028714_SPI_SHADER_32_R;
719 }
720 break;
721
722 case V_028C70_COLOR_16:
723 case V_028C70_COLOR_16_16:
724 case V_028C70_COLOR_16_16_16_16:
725 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) {
726 /* UNORM16 and SNORM16 don't support blending */
727 if (ntype == V_028C70_NUMBER_UNORM)
728 normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
729 else
730 normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
731
732 /* Use 32 bits per channel for blending. */
733 if (format == V_028C70_COLOR_16) {
734 if (swap == V_028C70_SWAP_STD) { /* R */
735 blend = V_028714_SPI_SHADER_32_R;
736 blend_alpha = V_028714_SPI_SHADER_32_AR;
737 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
738 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
739 else
740 assert(0);
741 } else if (format == V_028C70_COLOR_16_16) {
742 if (swap == V_028C70_SWAP_STD || swap == V_028C70_SWAP_STD_REV) { /* RG or GR */
743 blend = V_028714_SPI_SHADER_32_GR;
744 blend_alpha = V_028714_SPI_SHADER_32_ABGR;
745 } else if (swap == V_028C70_SWAP_ALT) /* RA */
746 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
747 else
748 assert(0);
749 } else /* 16_16_16_16 */
750 blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
751 } else if (ntype == V_028C70_NUMBER_UINT)
752 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
753 else if (ntype == V_028C70_NUMBER_SINT)
754 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
755 else if (ntype == V_028C70_NUMBER_FLOAT)
756 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
757 else
758 assert(0);
759 break;
760
761 case V_028C70_COLOR_32:
762 if (swap == V_028C70_SWAP_STD) { /* R */
763 blend = normal = V_028714_SPI_SHADER_32_R;
764 alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
765 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
766 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
767 else
768 assert(0);
769 break;
770
771 case V_028C70_COLOR_32_32:
772 if (swap == V_028C70_SWAP_STD || swap == V_028C70_SWAP_STD_REV) { /* RG or GR */
773 blend = normal = V_028714_SPI_SHADER_32_GR;
774 alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
775 } else if (swap == V_028C70_SWAP_ALT) /* RA */
776 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
777 else
778 assert(0);
779 break;
780
781 case V_028C70_COLOR_32_32_32_32:
782 case V_028C70_COLOR_8_24:
783 case V_028C70_COLOR_24_8:
784 case V_028C70_COLOR_X24_8_32_FLOAT:
785 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
786 break;
787
788 default:
789 assert(0);
790 return;
791 }
792
793 /* The DB->CB copy needs 32_ABGR. */
794 if (is_depth)
795 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
796
797 formats->normal = normal;
798 formats->alpha = alpha;
799 formats->blend = blend;
800 formats->blend_alpha = blend_alpha;
801 }
802
ac_compute_late_alloc(const struct radeon_info * info,bool ngg,bool ngg_culling,bool uses_scratch,unsigned * late_alloc_wave64,unsigned * cu_mask)803 void ac_compute_late_alloc(const struct radeon_info *info, bool ngg, bool ngg_culling,
804 bool uses_scratch, unsigned *late_alloc_wave64, unsigned *cu_mask)
805 {
806 *late_alloc_wave64 = 0; /* The limit is per SA. */
807 *cu_mask = 0xffff;
808
809 /* This should never be called on gfx12. Gfx12 doesn't need to mask CUs for late alloc. */
810 assert(info->gfx_level < GFX12);
811
812 /* CU masking can decrease performance and cause a hang with <= 2 CUs per SA. */
813 if (info->min_good_cu_per_sa <= 2)
814 return;
815
816 /* If scratch is used with late alloc, the GPU could deadlock if PS uses scratch too. A more
817 * complicated computation is needed to enable late alloc with scratch (see PAL).
818 */
819 if (uses_scratch)
820 return;
821
822 /* Late alloc is not used for NGG on Navi14 due to a hw bug. */
823 if (ngg && info->family == CHIP_NAVI14)
824 return;
825
826 if (info->gfx_level >= GFX10) {
827 /* For Wave32, the hw will launch twice the number of late alloc waves, so 1 == 2x wave32.
828 * These limits are estimated because they are all safe but they vary in performance.
829 */
830 if (ngg_culling)
831 *late_alloc_wave64 = info->min_good_cu_per_sa * 10;
832 else if (info->gfx_level >= GFX11)
833 *late_alloc_wave64 = 63;
834 else
835 *late_alloc_wave64 = info->min_good_cu_per_sa * 4;
836
837 /* Limit LATE_ALLOC_GS to prevent a hang (hw bug) on gfx10. */
838 if (info->gfx_level == GFX10 && ngg)
839 *late_alloc_wave64 = MIN2(*late_alloc_wave64, 64);
840
841 /* Gfx10: CU2 & CU3 must be disabled to prevent a hw deadlock.
842 * Others: CU1 must be disabled to prevent a hw deadlock.
843 *
844 * The deadlock is caused by late alloc, which usually increases performance.
845 */
846 *cu_mask &= info->gfx_level == GFX10 ? ~BITFIELD_RANGE(2, 2) :
847 ~BITFIELD_RANGE(1, 1);
848 } else {
849 if (info->min_good_cu_per_sa <= 4) {
850 /* Too few available compute units per SA. Disallowing VS to run on one CU could hurt us
851 * more than late VS allocation would help.
852 *
853 * 2 is the highest safe number that allows us to keep all CUs enabled.
854 */
855 *late_alloc_wave64 = 2;
856 } else {
857 /* This is a good initial value, allowing 1 late_alloc wave per SIMD on num_cu - 2.
858 */
859 *late_alloc_wave64 = (info->min_good_cu_per_sa - 2) * 4;
860 }
861
862 /* VS can't execute on one CU if the limit is > 2. */
863 if (*late_alloc_wave64 > 2)
864 *cu_mask = 0xfffe; /* 1 CU disabled */
865 }
866
867 /* Max number that fits into the register field. */
868 if (ngg) /* GS */
869 *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(~0u));
870 else /* VS */
871 *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B11C_LIMIT(~0u));
872 }
873
ac_compute_cs_workgroup_size(const uint16_t sizes[3],bool variable,unsigned max)874 unsigned ac_compute_cs_workgroup_size(const uint16_t sizes[3], bool variable, unsigned max)
875 {
876 if (variable)
877 return max;
878
879 return sizes[0] * sizes[1] * sizes[2];
880 }
881
ac_compute_lshs_workgroup_size(enum amd_gfx_level gfx_level,gl_shader_stage stage,unsigned tess_num_patches,unsigned tess_patch_in_vtx,unsigned tess_patch_out_vtx)882 unsigned ac_compute_lshs_workgroup_size(enum amd_gfx_level gfx_level, gl_shader_stage stage,
883 unsigned tess_num_patches,
884 unsigned tess_patch_in_vtx,
885 unsigned tess_patch_out_vtx)
886 {
887 /* When tessellation is used, API VS runs on HW LS, API TCS runs on HW HS.
888 * These two HW stages are merged on GFX9+.
889 */
890
891 bool merged_shaders = gfx_level >= GFX9;
892 unsigned ls_workgroup_size = tess_num_patches * tess_patch_in_vtx;
893 unsigned hs_workgroup_size = tess_num_patches * tess_patch_out_vtx;
894
895 if (merged_shaders)
896 return MAX2(ls_workgroup_size, hs_workgroup_size);
897 else if (stage == MESA_SHADER_VERTEX)
898 return ls_workgroup_size;
899 else if (stage == MESA_SHADER_TESS_CTRL)
900 return hs_workgroup_size;
901 else
902 unreachable("invalid LSHS shader stage");
903 }
904
ac_compute_esgs_workgroup_size(enum amd_gfx_level gfx_level,unsigned wave_size,unsigned es_verts,unsigned gs_inst_prims)905 unsigned ac_compute_esgs_workgroup_size(enum amd_gfx_level gfx_level, unsigned wave_size,
906 unsigned es_verts, unsigned gs_inst_prims)
907 {
908 /* ESGS may operate in workgroups if on-chip GS (LDS rings) are enabled.
909 *
910 * GFX6: Not possible in the HW.
911 * GFX7-8 (unmerged): possible in the HW, but not implemented in Mesa.
912 * GFX9+ (merged): implemented in Mesa.
913 */
914
915 if (gfx_level <= GFX8)
916 return wave_size;
917
918 unsigned workgroup_size = MAX2(es_verts, gs_inst_prims);
919 return CLAMP(workgroup_size, 1, 256);
920 }
921
ac_compute_ngg_workgroup_size(unsigned es_verts,unsigned gs_inst_prims,unsigned max_vtx_out,unsigned prim_amp_factor)922 unsigned ac_compute_ngg_workgroup_size(unsigned es_verts, unsigned gs_inst_prims,
923 unsigned max_vtx_out, unsigned prim_amp_factor)
924 {
925 /* NGG always operates in workgroups.
926 *
927 * For API VS/TES/GS:
928 * - 1 invocation per input vertex
929 * - 1 invocation per input primitive
930 *
931 * The same invocation can process both an input vertex and primitive,
932 * however 1 invocation can only output up to 1 vertex and 1 primitive.
933 */
934
935 unsigned max_vtx_in = es_verts < 256 ? es_verts : 3 * gs_inst_prims;
936 unsigned max_prim_in = gs_inst_prims;
937 unsigned max_prim_out = gs_inst_prims * prim_amp_factor;
938 unsigned workgroup_size = MAX4(max_vtx_in, max_vtx_out, max_prim_in, max_prim_out);
939
940 return CLAMP(workgroup_size, 1, 256);
941 }
942
ac_compute_num_tess_patches(const struct radeon_info * info,uint32_t num_tcs_input_cp,uint32_t num_tcs_output_cp,uint32_t vram_per_patch,uint32_t lds_per_patch,uint32_t wave_size,bool tess_uses_primid)943 uint32_t ac_compute_num_tess_patches(const struct radeon_info *info, uint32_t num_tcs_input_cp,
944 uint32_t num_tcs_output_cp, uint32_t vram_per_patch,
945 uint32_t lds_per_patch, uint32_t wave_size,
946 bool tess_uses_primid)
947 {
948 /* The VGT HS block increments the patch ID unconditionally
949 * within a single threadgroup. This results in incorrect
950 * patch IDs when instanced draws are used.
951 *
952 * The intended solution is to restrict threadgroups to
953 * a single instance by setting SWITCH_ON_EOI, which
954 * should cause IA to split instances up. However, this
955 * doesn't work correctly on GFX6 when there is no other
956 * SE to switch to.
957 */
958 const bool has_primid_instancing_bug = info->gfx_level == GFX6 && info->max_se == 1;
959 if (has_primid_instancing_bug && tess_uses_primid)
960 return 1;
961
962 /* Ensure that we only need 4 waves per CU, so that we don't need to check
963 * resource usage (such as whether we have enough VGPRs to fit the whole
964 * threadgroup into the CU). It also ensures that the number of tcs in and out
965 * vertices per threadgroup are at most 256, which is the hw limit.
966 */
967 const unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
968 unsigned num_patches = 256 / max_verts_per_patch;
969
970 /* Not necessary for correctness, but higher numbers are slower.
971 * The hardware can do more, but we prefer fully occupied waves.
972 * eg. 64 triangle patches means 3 fully occupied Wave64 waves.
973 */
974 num_patches = MIN2(num_patches, 64);
975
976 /* When distributed tessellation is unsupported, switch between SEs
977 * at a higher frequency to manually balance the workload between SEs.
978 */
979 if (!info->has_distributed_tess && info->max_se > 1)
980 num_patches = MIN2(num_patches, 16); /* recommended */
981
982 /* Make sure the output data fits in the offchip buffer */
983 if (vram_per_patch) {
984 const uint32_t tess_offchip_block_dw_size = info->family == CHIP_HAWAII ? 4096 : 8192;
985 num_patches =
986 MIN2(num_patches, (tess_offchip_block_dw_size * 4) / vram_per_patch);
987 }
988
989 /* Make sure that the data fits in LDS. This assumes the shaders only
990 * use LDS for the inputs and outputs.
991 */
992 if (lds_per_patch) {
993 const unsigned max_lds_size = (info->gfx_level >= GFX9 ? 64 * 1024 : 32 * 1024); /* hw limit */
994 /* Target at least 2 workgroups per CU. */
995 const unsigned target_lds_size = max_lds_size / 2 -
996 (info->gfx_level >= GFX11 ? AC_HS_MSG_VOTE_LDS_BYTES : 0);
997 num_patches = MIN2(num_patches, target_lds_size / lds_per_patch);
998 assert(num_patches * lds_per_patch <= max_lds_size);
999 }
1000 num_patches = MAX2(num_patches, 1);
1001
1002 /* Make sure that vector lanes are fully occupied by cutting off the last wave
1003 * if it's only partially filled.
1004 */
1005 const unsigned temp_verts_per_tg = num_patches * max_verts_per_patch;
1006
1007 if (temp_verts_per_tg > wave_size &&
1008 (wave_size - temp_verts_per_tg % wave_size >= MAX2(max_verts_per_patch, 8)))
1009 num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
1010
1011 if (info->gfx_level == GFX6) {
1012 /* GFX6 bug workaround, related to power management. Limit LS-HS
1013 * threadgroups to only one wave.
1014 */
1015 const unsigned one_wave = wave_size / max_verts_per_patch;
1016 num_patches = MIN2(num_patches, one_wave);
1017 }
1018
1019 return num_patches;
1020 }
1021
ac_apply_cu_en(uint32_t value,uint32_t clear_mask,unsigned value_shift,const struct radeon_info * info)1022 uint32_t ac_apply_cu_en(uint32_t value, uint32_t clear_mask, unsigned value_shift,
1023 const struct radeon_info *info)
1024 {
1025 /* Register field position and mask. */
1026 uint32_t cu_en_mask = ~clear_mask;
1027 unsigned cu_en_shift = ffs(cu_en_mask) - 1;
1028 /* The value being set. */
1029 uint32_t cu_en = (value & cu_en_mask) >> cu_en_shift;
1030
1031 uint32_t set_cu_en = info->spi_cu_en;
1032
1033 if (info->gfx_level >= GFX12 && clear_mask == 0) {
1034 /* The CU mask has 32 bits and is per SE, not per SA. This math doesn't work with
1035 * asymmetric WGP harvesting because SA0 doesn't always end on the same bit.
1036 */
1037 set_cu_en &= BITFIELD_MASK(info->max_good_cu_per_sa);
1038 set_cu_en |= set_cu_en << info->max_good_cu_per_sa;
1039 }
1040
1041 /* AND the field by spi_cu_en. */
1042 uint32_t spi_cu_en = info->spi_cu_en >> value_shift;
1043 return (value & ~cu_en_mask) |
1044 (((cu_en & spi_cu_en) << cu_en_shift) & cu_en_mask);
1045 }
1046
1047 /* Return the register value and tune bytes_per_wave to increase scratch performance. */
ac_get_scratch_tmpring_size(const struct radeon_info * info,unsigned bytes_per_wave,unsigned * max_seen_bytes_per_wave,uint32_t * tmpring_size)1048 void ac_get_scratch_tmpring_size(const struct radeon_info *info,
1049 unsigned bytes_per_wave, unsigned *max_seen_bytes_per_wave,
1050 uint32_t *tmpring_size)
1051 {
1052 /* SPI_TMPRING_SIZE and COMPUTE_TMPRING_SIZE are essentially scratch buffer descriptors.
1053 * WAVES means NUM_RECORDS. WAVESIZE is the size of each element, meaning STRIDE.
1054 * Thus, WAVESIZE must be constant while the scratch buffer is being used by the GPU.
1055 *
1056 * If you want to increase WAVESIZE without waiting for idle, you need to allocate a new
1057 * scratch buffer and use it instead. This will result in multiple scratch buffers being
1058 * used at the same time, each with a different WAVESIZE.
1059 *
1060 * If you want to decrease WAVESIZE, you don't have to. There is no advantage in decreasing
1061 * WAVESIZE after it's been increased.
1062 *
1063 * Shaders with SCRATCH_EN=0 don't allocate scratch space.
1064 */
1065 const unsigned size_shift = info->gfx_level >= GFX11 ? 8 : 10;
1066 const unsigned min_size_per_wave = BITFIELD_BIT(size_shift);
1067
1068 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1069 assert((bytes_per_wave & BITFIELD_MASK(size_shift)) == 0 &&
1070 "scratch size per wave should be aligned");
1071
1072 /* Add 1 scratch item to make the number of items odd. This should improve scratch
1073 * performance by more randomly distributing scratch waves among memory channels.
1074 */
1075 if (bytes_per_wave)
1076 bytes_per_wave |= min_size_per_wave;
1077
1078 *max_seen_bytes_per_wave = MAX2(*max_seen_bytes_per_wave, bytes_per_wave);
1079
1080 unsigned max_scratch_waves = info->max_scratch_waves;
1081 if (info->gfx_level >= GFX11)
1082 max_scratch_waves /= info->max_se; /* WAVES is per SE */
1083
1084 /* TODO: We could decrease WAVES to make the whole buffer fit into the infinity cache. */
1085 *tmpring_size = S_0286E8_WAVES(max_scratch_waves) |
1086 S_0286E8_WAVESIZE(*max_seen_bytes_per_wave >> size_shift);
1087 }
1088
1089 /* Convert chip-agnostic memory access flags into hw-specific cache flags.
1090 *
1091 * "access" must be a result of ac_nir_get_mem_access_flags() with the appropriate ACCESS_TYPE_*
1092 * flags set.
1093 */
ac_get_hw_cache_flags(enum amd_gfx_level gfx_level,enum gl_access_qualifier access)1094 union ac_hw_cache_flags ac_get_hw_cache_flags(enum amd_gfx_level gfx_level,
1095 enum gl_access_qualifier access)
1096 {
1097 union ac_hw_cache_flags result;
1098 result.value = 0;
1099
1100 assert(util_bitcount(access & (ACCESS_TYPE_LOAD | ACCESS_TYPE_STORE |
1101 ACCESS_TYPE_ATOMIC)) == 1);
1102 assert(!(access & ACCESS_TYPE_SMEM) || access & ACCESS_TYPE_LOAD);
1103 assert(!(access & ACCESS_IS_SWIZZLED_AMD) || !(access & ACCESS_TYPE_SMEM));
1104 assert(!(access & ACCESS_MAY_STORE_SUBDWORD) || access & ACCESS_TYPE_STORE);
1105
1106 bool scope_is_device = access & (ACCESS_COHERENT | ACCESS_VOLATILE);
1107
1108 if (gfx_level >= GFX12) {
1109 if (access & ACCESS_CP_GE_COHERENT_AMD) {
1110 bool cp_sdma_ge_use_system_memory_scope = gfx_level == GFX12;
1111 result.gfx12.scope = cp_sdma_ge_use_system_memory_scope ?
1112 gfx12_scope_memory : gfx12_scope_device;
1113 } else if (scope_is_device) {
1114 result.gfx12.scope = gfx12_scope_device;
1115 } else {
1116 result.gfx12.scope = gfx12_scope_cu;
1117 }
1118
1119 if (access & ACCESS_NON_TEMPORAL) {
1120 if (access & ACCESS_TYPE_LOAD) {
1121 /* Don't use non_temporal for SMEM because it can't set regular_temporal for MALL. */
1122 if (!(access & ACCESS_TYPE_SMEM))
1123 result.gfx12.temporal_hint = gfx12_load_near_non_temporal_far_regular_temporal;
1124 } else if (access & ACCESS_TYPE_STORE) {
1125 result.gfx12.temporal_hint = gfx12_store_near_non_temporal_far_regular_temporal;
1126 } else {
1127 result.gfx12.temporal_hint = gfx12_atomic_non_temporal;
1128 }
1129 }
1130 } else if (gfx_level >= GFX11) {
1131 /* GFX11 simplified it and exposes what is actually useful.
1132 *
1133 * GLC means device scope for loads only. (stores and atomics are always device scope)
1134 * SLC means non-temporal for GL1 and GL2 caches. (GL1 = hit-evict, GL2 = stream, unavailable in SMEM)
1135 * DLC means non-temporal for MALL. (noalloc, i.e. coherent bypass)
1136 *
1137 * GL0 doesn't have a non-temporal flag, so you always get LRU caching in CU scope.
1138 */
1139 if (access & ACCESS_TYPE_LOAD && scope_is_device)
1140 result.value |= ac_glc;
1141
1142 if (access & ACCESS_NON_TEMPORAL && !(access & ACCESS_TYPE_SMEM))
1143 result.value |= ac_slc;
1144 } else if (gfx_level >= GFX10) {
1145 /* GFX10-10.3:
1146 *
1147 * VMEM and SMEM loads (SMEM only supports the first four):
1148 * !GLC && !DLC && !SLC means CU scope <== use for normal loads with CU scope
1149 * GLC && !DLC && !SLC means SA scope
1150 * !GLC && DLC && !SLC means CU scope, GL1 bypass
1151 * GLC && DLC && !SLC means device scope <== use for normal loads with device scope
1152 * !GLC && !DLC && SLC means CU scope, non-temporal (GL0 = GL1 = hit-evict, GL2 = stream) <== use for non-temporal loads with CU scope
1153 * GLC && !DLC && SLC means SA scope, non-temporal (GL1 = hit-evict, GL2 = stream)
1154 * !GLC && DLC && SLC means CU scope, GL0 non-temporal, GL1-GL2 coherent bypass (GL0 = hit-evict, GL1 = bypass, GL2 = noalloc)
1155 * GLC && DLC && SLC means device scope, GL2 coherent bypass (noalloc) <== use for non-temporal loads with device scope
1156 *
1157 * VMEM stores/atomics (stores are CU scope only if they overwrite the whole cache line,
1158 * atomics are always device scope, GL1 is always bypassed):
1159 * !GLC && !DLC && !SLC means CU scope <== use for normal stores with CU scope
1160 * GLC && !DLC && !SLC means device scope <== use for normal stores with device scope
1161 * !GLC && DLC && !SLC means CU scope, GL2 non-coherent bypass
1162 * GLC && DLC && !SLC means device scope, GL2 non-coherent bypass
1163 * !GLC && !DLC && SLC means CU scope, GL2 non-temporal (stream) <== use for non-temporal stores with CU scope
1164 * GLC && !DLC && SLC means device scope, GL2 non-temporal (stream) <== use for non-temporal stores with device scope
1165 * !GLC && DLC && SLC means CU scope, GL2 coherent bypass (noalloc)
1166 * GLC && DLC && SLC means device scope, GL2 coherent bypass (noalloc)
1167 *
1168 * "stream" allows write combining in GL2. "coherent bypass" doesn't.
1169 * "non-coherent bypass" doesn't guarantee ordering with any coherent stores.
1170 */
1171 if (scope_is_device && !(access & ACCESS_TYPE_ATOMIC))
1172 result.value |= ac_glc | (access & ACCESS_TYPE_LOAD ? ac_dlc : 0);
1173
1174 if (access & ACCESS_NON_TEMPORAL && !(access & ACCESS_TYPE_SMEM))
1175 result.value |= ac_slc;
1176 } else {
1177 /* GFX6-GFX9:
1178 *
1179 * VMEM loads:
1180 * !GLC && !SLC means CU scope
1181 * GLC && !SLC means (GFX6: device scope, GFX7-9: device scope [*])
1182 * !GLC && SLC means (GFX6: CU scope, GFX7: device scope, GFX8-9: CU scope), GL2 non-temporal (stream)
1183 * GLC && SLC means device scope, GL2 non-temporal (stream)
1184 *
1185 * VMEM stores (atomics don't have [*]):
1186 * !GLC && !SLC means (GFX6: CU scope, GFX7-9: device scope [*])
1187 * GLC && !SLC means (GFX6-7: device scope, GFX8-9: device scope [*])
1188 * !GLC && SLC means (GFX6: CU scope, GFX7-9: device scope [*]), GL2 non-temporal (stream)
1189 * GLC && SLC means device scope, GL2 non-temporal (stream)
1190 *
1191 * [*] data can be cached in GL1 for future CU scope
1192 *
1193 * SMEM loads:
1194 * GLC means device scope (available on GFX8+)
1195 */
1196 if (scope_is_device && !(access & ACCESS_TYPE_ATOMIC)) {
1197 /* SMEM doesn't support the device scope on GFX6-7. */
1198 assert(gfx_level >= GFX8 || !(access & ACCESS_TYPE_SMEM));
1199 result.value |= ac_glc;
1200 }
1201
1202 if (access & ACCESS_NON_TEMPORAL && !(access & ACCESS_TYPE_SMEM))
1203 result.value |= ac_slc;
1204
1205 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All store opcodes not
1206 * aligned to a dword are affected.
1207 */
1208 if (gfx_level == GFX6 && access & ACCESS_MAY_STORE_SUBDWORD)
1209 result.value |= ac_glc;
1210 }
1211
1212 if (access & ACCESS_IS_SWIZZLED_AMD) {
1213 if (gfx_level >= GFX12)
1214 result.gfx12.swizzled = true;
1215 else
1216 result.value |= ac_swizzled;
1217 }
1218
1219 return result;
1220 }
1221
ac_get_all_edge_flag_bits(enum amd_gfx_level gfx_level)1222 unsigned ac_get_all_edge_flag_bits(enum amd_gfx_level gfx_level)
1223 {
1224 return gfx_level >= GFX12 ?
1225 ((1u << 8) | (1u << 17) | (1u << 26)) :
1226 ((1u << 9) | (1u << 19) | (1u << 29));
1227 }
1228
1229 /**
1230 * Returns a unique index for a per-patch semantic name and index. The index
1231 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
1232 * can be calculated.
1233 */
1234 unsigned
ac_shader_io_get_unique_index_patch(unsigned semantic)1235 ac_shader_io_get_unique_index_patch(unsigned semantic)
1236 {
1237 switch (semantic) {
1238 case VARYING_SLOT_TESS_LEVEL_OUTER:
1239 return 0;
1240 case VARYING_SLOT_TESS_LEVEL_INNER:
1241 return 1;
1242 default:
1243 if (semantic >= VARYING_SLOT_PATCH0 && semantic < VARYING_SLOT_PATCH0 + 30)
1244 return 2 + (semantic - VARYING_SLOT_PATCH0);
1245
1246 assert(!"invalid semantic");
1247 return 0;
1248 }
1249 }
1250